From 86f68f308e23c944f32ca8bf2259da6dd07d505b Mon Sep 17 00:00:00 2001 From: "rodwyer@stanford.edu" Date: Mon, 16 Sep 2024 18:09:55 -0700 Subject: [PATCH 01/19] First commit of firmware changes --- TrigScint/python/trigScint.py | 36 +++++++++++++++++++++++++++++++++++ scripts/ldmx-env.sh | 2 -- 2 files changed, 36 insertions(+), 2 deletions(-) diff --git a/TrigScint/python/trigScint.py b/TrigScint/python/trigScint.py index 7d8ccb312..f8fcbec77 100644 --- a/TrigScint/python/trigScint.py +++ b/TrigScint/python/trigScint.py @@ -285,6 +285,42 @@ def __init__(self,name) : trigScintTrack = TrigScintTrackProducer( "trigScintTrack" ) +class TrigScintTrackProducer(ldmxcfg.Producer) : + """Configuration for track producer for Trigger Scintillators""" + + def __init__(self,name) : + super().__init__(name,'trigscint::','TrigScint') + + self.delta_max = 0.75 + self.tracking_threshold = 0. #to add in neighboring channels + self.seeding_collection = "TriggerPad1Clusters" + self.further_input_collections = ["TriggerPad2Clusters","TriggerPad3Clusters"] + self.allow_skip_last_collection = False + self.vertical_bar_start_index = 52 + self.number_horizontal_bars = 24 #16 for x,y segmented geometry only + self.number_vertical_bars = 0 #8 for x,y segmented geometry only + self.horizontal_bar_width = 3. + self.horizontal_bar_gap = 0.3 + self.vertical_bar_width = 3. + self.vertical_bar_gap = 0.3 + self.input_pass_name="" #take any pass + self.output_collection="TriggerPadTracks" + self.verbosity = 0 + +class TrigScintFirmwareTracker(ldmxcfg.Producer) : + """Configuration for the track producer from the Firmware Tracker""" + def __init__(self,name) : + super().__init__(name,'trigscint::TrigScintFirmwareTracker','TrigScint') + self.clustering_threshold=40.0 + self.digis1_collection='trigScintDigisPad1' + self.digis2_collection='trigScintDigisPad2' + self.digis3_collection='trigScintDigisPad3' + self.input_pass_name="" + self.output_collection="TriggerPadTracks" + self.verbosity = 0 + self.time_tolerance = 50.0 + self.pad_time = -1.5 + class QIEAnalyzer(ldmxcfg.Analyzer) : """Configuration for linearized QIE analyzer for Trigger Scintillators""" diff --git a/scripts/ldmx-env.sh b/scripts/ldmx-env.sh index aae7a3f4f..f374afd79 100644 --- a/scripts/ldmx-env.sh +++ b/scripts/ldmx-env.sh @@ -198,7 +198,6 @@ if hash docker &> /dev/null; then -e LDMX_BASE \ -e DISPLAY=${LDMX_CONTAINER_DISPLAY}:0 \ $_envs \ - -v /tmp/.X11-unix:/tmp/.X11-unix \ $_mounts \ -u $(id -u ${USER}):$(id -g ${USER}) \ $LDMX_DOCKER_TAG "$@" @@ -266,7 +265,6 @@ elif hash singularity &> /dev/null; then # Run the container __ldmx_run() { - local csv_list="/tmp/.X11-unix" for dir_to_mount in "${LDMX_CONTAINER_MOUNTS[@]}"; do csv_list="$dir_to_mount,$csv_list" done From 445a64852dfb8f83d93b3709b8f3ab17eaad128d Mon Sep 17 00:00:00 2001 From: rodwyer100 Date: Mon, 16 Sep 2024 18:27:55 -0700 Subject: [PATCH 02/19] First commit of firmware changes --- TrigScint/exampleConfigs/firmwareEx.py | 175 + TrigScint/exampleConfigs/meganEx.py | 167 + .../TrigScint/TrigScintFirmwareTracker.h | 124 + TrigScint/include/TrigScint/ap_common.h | 376 + TrigScint/include/TrigScint/ap_decl.h | 212 + TrigScint/include/TrigScint/ap_fixed.h | 360 + TrigScint/include/TrigScint/ap_fixed_base.h | 2354 ++++++ TrigScint/include/TrigScint/ap_fixed_ref.h | 718 ++ .../include/TrigScint/ap_fixed_special.h | 230 + TrigScint/include/TrigScint/ap_int.h | 330 + TrigScint/include/TrigScint/ap_int_base.h | 1885 +++++ TrigScint/include/TrigScint/ap_int_ref.h | 1346 +++ TrigScint/include/TrigScint/ap_int_special.h | 223 + TrigScint/include/TrigScint/clusterproducer.h | 12 + TrigScint/include/TrigScint/etc/ap_private.h | 7199 +++++++++++++++++ TrigScint/include/TrigScint/objdef.h | 96 + TrigScint/include/TrigScint/testutils.h | 16 + TrigScint/include/TrigScint/trackproducer.h | 12 + .../TrigScint/TrigScintFirmwareTracker.cxx | 290 + .../src/TrigScint/clusterproducer_sw.cxx | 77 + TrigScint/src/TrigScint/trackproducer_hw.cxx | 98 + 21 files changed, 16300 insertions(+) create mode 100644 TrigScint/exampleConfigs/firmwareEx.py create mode 100644 TrigScint/exampleConfigs/meganEx.py create mode 100644 TrigScint/include/TrigScint/TrigScintFirmwareTracker.h create mode 100644 TrigScint/include/TrigScint/ap_common.h create mode 100644 TrigScint/include/TrigScint/ap_decl.h create mode 100644 TrigScint/include/TrigScint/ap_fixed.h create mode 100644 TrigScint/include/TrigScint/ap_fixed_base.h create mode 100644 TrigScint/include/TrigScint/ap_fixed_ref.h create mode 100644 TrigScint/include/TrigScint/ap_fixed_special.h create mode 100644 TrigScint/include/TrigScint/ap_int.h create mode 100644 TrigScint/include/TrigScint/ap_int_base.h create mode 100644 TrigScint/include/TrigScint/ap_int_ref.h create mode 100644 TrigScint/include/TrigScint/ap_int_special.h create mode 100755 TrigScint/include/TrigScint/clusterproducer.h create mode 100644 TrigScint/include/TrigScint/etc/ap_private.h create mode 100755 TrigScint/include/TrigScint/objdef.h create mode 100755 TrigScint/include/TrigScint/testutils.h create mode 100755 TrigScint/include/TrigScint/trackproducer.h create mode 100644 TrigScint/src/TrigScint/TrigScintFirmwareTracker.cxx create mode 100755 TrigScint/src/TrigScint/clusterproducer_sw.cxx create mode 100755 TrigScint/src/TrigScint/trackproducer_hw.cxx diff --git a/TrigScint/exampleConfigs/firmwareEx.py b/TrigScint/exampleConfigs/firmwareEx.py new file mode 100644 index 000000000..a207e7521 --- /dev/null +++ b/TrigScint/exampleConfigs/firmwareEx.py @@ -0,0 +1,175 @@ +#!/bin/python + +import sys +import os +import json + +# we need the ldmx configuration package to construct the object + +from LDMX.Framework import ldmxcfg + +# set a 'pass name' +passName="sim" +p=ldmxcfg.Process(passName) + +#import all processors +from LDMX.SimCore import generators +from LDMX.SimCore import simulator +from LDMX.Biasing import filters + +from LDMX.Detectors.makePath import * +from LDMX.SimCore import simcfg + +#pull in command line options +nEle=4 # simulated beam electrons +runNum=10 +version="ldmx-det-v14" +outputNameString= "ldmxdetv14gap10mm_firmware.root" #sample identifier +outDir= "" #sample identifier + +# +# Instantiate the simulator. +# +sim = simulator.simulator("test") + +# +# Set the path to the detector to use (pulled from job config) +# +sim.setDetector( version, True ) +sim.scoringPlanes = makeScoringPlanesPath(version) + +outname=outputNameString #+".root" +print("NAME = " + outname) + +# +# Set run parameters. These are all pulled from the job config +# +p.run = runNum +p.maxEvents = 100 +nElectrons = nEle +beamEnergy = 4.0; #in GeV + +sim.description = "Inclusive "+str(beamEnergy)+" GeV electron events, "+str(nElectrons)+"e" +#sim.randomSeeds = [ SEED1 , SEED2 ] +sim.beamSpotSmear = [20., 80., 0] + + +mpgGen = generators.multi( "mgpGen" ) # this is the line that actually creates the generator +mpgGen.vertex = [ -44., 0., -880. ] # mm +mpgGen.nParticles = nElectrons +mpgGen.pdgID = 11 +mpgGen.enablePoisson = False #True + +import math +theta = math.radians(5.45) +beamEnergyMeV=1000*beamEnergy +px = beamEnergyMeV*math.sin(theta) +py = 0.; +pz= beamEnergyMeV*math.cos(theta) +mpgGen.momentum = [ px, py, pz ] + +# +# Set the multiparticle gun as generator +# +sim.generators = [ mpgGen ] + +#reconstruction and vetoes + +#Ecal and Hcal hardwired/geometry stuff +#import LDMX.Ecal.EcalGeometry +import LDMX.Ecal.ecal_hardcoded_conditions +from LDMX.Ecal import EcalGeometry +#egeom = EcalGeometry.EcalGeometryProvider.getInstance() +#Hcal hardwired/geometry stuff +from LDMX.Hcal import HcalGeometry +import LDMX.Hcal.hcal_hardcoded_conditions +#hgeom = HcalGeometry.HcalGeometryProvider.getInstance() + + +from LDMX.Ecal import digi as eDigi +from LDMX.Ecal import vetos +from LDMX.Hcal import digi as hDigi +from LDMX.Hcal import hcal + +from LDMX.Recon.simpleTrigger import TriggerProcessor + +from LDMX.TrigScint.trigScint import TrigScintDigiProducer +from LDMX.TrigScint.trigScint import TrigScintClusterProducer +from LDMX.TrigScint.trigScint import trigScintTrack +from LDMX.TrigScint.trigScint import TrigScintFirmwareTracker + +if "v12" in version : + tsSimColls=[ "TriggerPadTagSimHits", "TriggerPadUpSimHits", "TriggerPadDnSimHits" ] +else : + tsSimColls=[ "TriggerPad2SimHits", "TriggerPad3SimHits", "TriggerPad1SimHits" ] + +# ecal digi chain +# ecalDigi =eDigi.EcalDigiProducer('EcalDigis') +# ecalReco =eDigi.EcalRecProducer('ecalRecon') +# ecalVeto =vetos.EcalVetoProcessor('ecalVetoBDT') + +# #hcal digi chain +# hcalDigi =hDigi.HcalDigiProducer('hcalDigis') +# hcalReco =hDigi.HcalRecProducer('hcalRecon') +# hcalVeto =hcal.HcalVetoProcessor('hcalVeto') +# #hcalDigi.inputCollName="HcalSimHits" +#hcalDigi.inputPassName=passName + +# TS digi + clustering + track chain +tsDigisTag =TrigScintDigiProducer.pad2() +tsDigisTag.input_collection = tsSimColls[0]# +"_"+passName +tsDigisTag.input_pass_name = "sim" +tsDigisUp =TrigScintDigiProducer.pad3() +tsDigisUp.input_collection = tsSimColls[1]# +"_"+passName +tsDigisUp.input_pass_name = "sim" +tsDigisDown=TrigScintDigiProducer.pad1() +tsDigisDown.input_collection = tsSimColls[2]# +"_"+passName +tsDigisDown.input_pass_name = "sim" + +tsClustersTag =TrigScintClusterProducer.pad2() +tsClustersUp =TrigScintClusterProducer.pad1() +tsClustersDown =TrigScintClusterProducer.pad3() + +if "v12" in version : + tsClustersTag.pad_time = -2. + tsClustersUp.pad_time = 0. + tsClustersDown.pad_time = 0. + +tsDigisUp.verbosity=0 +tsClustersUp.verbosity=1 +trigScintTrack.verbosity=1 + +trigScintTrack.delta_max = 0.75 + +trigFirm = TrigScintFirmwareTracker( "trigFirm" ) +trigFirm.input_pass_name = "sim" +trigFirm.digis1_collection = "trigScintDigisPad1" +trigFirm.digis2_collection = "trigScintDigisPad2" +trigFirm.digis3_collection = "trigScintDigisPad3" +trigFirm.output_collection = "TriggerPadTracksFirmware" + +from LDMX.Recon.electronCounter import ElectronCounter +eCount = ElectronCounter( nElectrons, "ElectronCounter") # first argument is number of electrons in simulation +eCount.use_simulated_electron_number = False +eCount.input_collection="TriggerPadTracks" +eCount.input_pass_name=passName + +# # p.sequence=[ sim, ecalDigi, ecalReco, ecalVeto, hcalDigi, hcalReco, hcalVeto, tsDigisTag, tsDigisUp, tsDigisDown, tsClustersTag, tsClustersUp, tsClustersDown, trigScintTrack, eCount ] +# #hcal digi keeps crashing in config step +p.sequence=[ sim, tsDigisTag, tsDigisUp, tsDigisDown, tsClustersTag, tsClustersUp, tsClustersDown, trigScintTrack, trigFirm, eCount] +# p.sequence=[sim] + +p.outputFiles=[outname] + +p.termLogLevel = 0 # default is 2 (WARNING); but then logFrequency is ignored. level 1 = INFO. + +#print this many events to stdout (independent on number of events, edge case: round-off effects when not divisible. so can go up by a factor 2 or so) +logEvents=20 +if p.maxEvents < logEvents : + logEvents = p.maxEvents +p.logFrequency = int( p.maxEvents/logEvents ) + +json.dumps(p.parameterDump(), indent=2) + +with open('parameterDump.json', 'w') as outfile: + json.dump(p.parameterDump(), outfile, indent=4) diff --git a/TrigScint/exampleConfigs/meganEx.py b/TrigScint/exampleConfigs/meganEx.py new file mode 100644 index 000000000..e7e4bc203 --- /dev/null +++ b/TrigScint/exampleConfigs/meganEx.py @@ -0,0 +1,167 @@ +#!/bin/python + +import sys +import os +import json + +# we need the ldmx configuration package to construct the object + +from LDMX.Framework import ldmxcfg + +# set a 'pass name' +passName="sim" +p=ldmxcfg.Process(passName) + +#import all processors +from LDMX.SimCore import generators +from LDMX.SimCore import simulator +from LDMX.Biasing import filters + +from LDMX.Detectors.makePath import * +from LDMX.SimCore import simcfg + +#pull in command line options +nEle=4 # simulated beam electrons +runNum=10 +version="ldmx-det-v14" +outputNameString= "ldmxdetv14gap10mm.root" #sample identifier +outDir= "" #sample identifier + +# +# Instantiate the simulator. +# +sim = simulator.simulator("test") + +# +# Set the path to the detector to use (pulled from job config) +# +sim.setDetector( version, True ) +sim.scoringPlanes = makeScoringPlanesPath(version) + +outname=outputNameString #+".root" +print("NAME = " + outname) + +# +# Set run parameters. These are all pulled from the job config +# +p.run = runNum +p.maxEvents = 100 +nElectrons = nEle +beamEnergy = 4.0; #in GeV + +sim.description = "Inclusive "+str(beamEnergy)+" GeV electron events, "+str(nElectrons)+"e" +#sim.randomSeeds = [ SEED1 , SEED2 ] +sim.beamSpotSmear = [20., 80., 0] + + +mpgGen = generators.multi( "mgpGen" ) # this is the line that actually creates the generator +mpgGen.vertex = [ -44., 0., -880. ] # mm +mpgGen.nParticles = nElectrons +mpgGen.pdgID = 11 +mpgGen.enablePoisson = False #True + +import math +theta = math.radians(5.45) +beamEnergyMeV=1000*beamEnergy +px = beamEnergyMeV*math.sin(theta) +py = 0.; +pz= beamEnergyMeV*math.cos(theta) +mpgGen.momentum = [ px, py, pz ] + +# +# Set the multiparticle gun as generator +# +sim.generators = [ mpgGen ] + +#reconstruction and vetoes + +#Ecal and Hcal hardwired/geometry stuff +#import LDMX.Ecal.EcalGeometry +import LDMX.Ecal.ecal_hardcoded_conditions +from LDMX.Ecal import EcalGeometry +#egeom = EcalGeometry.EcalGeometryProvider.getInstance() +#Hcal hardwired/geometry stuff +from LDMX.Hcal import HcalGeometry +import LDMX.Hcal.hcal_hardcoded_conditions +#hgeom = HcalGeometry.HcalGeometryProvider.getInstance() + + +from LDMX.Ecal import digi as eDigi +from LDMX.Ecal import vetos +from LDMX.Hcal import digi as hDigi +from LDMX.Hcal import hcal + +from LDMX.Recon.simpleTrigger import TriggerProcessor + +from LDMX.TrigScint.trigScint import TrigScintDigiProducer +from LDMX.TrigScint.trigScint import TrigScintClusterProducer +from LDMX.TrigScint.trigScint import trigScintTrack + +if "v12" in version : + tsSimColls=[ "TriggerPadTagSimHits", "TriggerPadUpSimHits", "TriggerPadDnSimHits" ] +else : + tsSimColls=[ "TriggerPad2SimHits", "TriggerPad3SimHits", "TriggerPad1SimHits" ] + +# ecal digi chain +# ecalDigi =eDigi.EcalDigiProducer('EcalDigis') +# ecalReco =eDigi.EcalRecProducer('ecalRecon') +# ecalVeto =vetos.EcalVetoProcessor('ecalVetoBDT') + +# #hcal digi chain +# hcalDigi =hDigi.HcalDigiProducer('hcalDigis') +# hcalReco =hDigi.HcalRecProducer('hcalRecon') +# hcalVeto =hcal.HcalVetoProcessor('hcalVeto') +# #hcalDigi.inputCollName="HcalSimHits" +#hcalDigi.inputPassName=passName + +# TS digi + clustering + track chain +tsDigisTag =TrigScintDigiProducer.pad2() +tsDigisTag.input_collection = tsSimColls[0]# +"_"+passName +tsDigisTag.input_pass_name = "sim" +tsDigisUp =TrigScintDigiProducer.pad3() +tsDigisUp.input_collection = tsSimColls[1]# +"_"+passName +tsDigisUp.input_pass_name = "sim" +tsDigisDown=TrigScintDigiProducer.pad1() +tsDigisDown.input_collection = tsSimColls[2]# +"_"+passName +tsDigisDown.input_pass_name = "sim" + +tsClustersTag =TrigScintClusterProducer.pad2() +tsClustersUp =TrigScintClusterProducer.pad1() +tsClustersDown =TrigScintClusterProducer.pad3() + +if "v12" in version : + tsClustersTag.pad_time = -2. + tsClustersUp.pad_time = 0. + tsClustersDown.pad_time = 0. + +tsDigisUp.verbosity=0 +tsClustersUp.verbosity=1 +trigScintTrack.verbosity=1 + +trigScintTrack.delta_max = 0.75 + +from LDMX.Recon.electronCounter import ElectronCounter +eCount = ElectronCounter( nElectrons, "ElectronCounter") # first argument is number of electrons in simulation +eCount.use_simulated_electron_number = False +eCount.input_collection="TriggerPadTracks" +eCount.input_pass_name=passName + +# # p.sequence=[ sim, ecalDigi, ecalReco, ecalVeto, hcalDigi, hcalReco, hcalVeto, tsDigisTag, tsDigisUp, tsDigisDown, tsClustersTag, tsClustersUp, tsClustersDown, trigScintTrack, eCount ] +# #hcal digi keeps crashing in config step +p.sequence=[ sim, tsDigisTag, tsDigisUp, tsDigisDown, tsClustersTag, tsClustersUp, tsClustersDown, trigScintTrack, eCount] +# p.sequence=[sim] + +p.outputFiles=[outname] + +p.termLogLevel = 0 # default is 2 (WARNING); but then logFrequency is ignored. level 1 = INFO. + +#print this many events to stdout (independent on number of events, edge case: round-off effects when not divisible. so can go up by a factor 2 or so) +logEvents=20 +if p.maxEvents < logEvents : + logEvents = p.maxEvents +p.logFrequency = int( p.maxEvents/logEvents ) + +json.dumps(p.parameterDump(), indent=2) + +with open('parameterDump.json', 'w') as outfile: + json.dump(p.parameterDump(), outfile, indent=4) diff --git a/TrigScint/include/TrigScint/TrigScintFirmwareTracker.h b/TrigScint/include/TrigScint/TrigScintFirmwareTracker.h new file mode 100644 index 000000000..c06ce0743 --- /dev/null +++ b/TrigScint/include/TrigScint/TrigScintFirmwareTracker.h @@ -0,0 +1,124 @@ +/** + * @file TrigScintFirmwareTracker.h + * @brief Clustering of trigger scintillator hits + * @author Lene Kristian Bryngemark, Stanford University + */ + +#ifndef TRIGSCINT_TRIGSCINTFIRMWARETRACKER_H +#define TRIGSCINT_TRIGSCINTFIRMWARETRACKER_H + +// LDMX Framework +#include "Framework/Configure/Parameters.h" // Needed to import parameters from configuration file +#include "Framework/Event.h" +#include "Framework/EventProcessor.h" //Needed to declare processor +#include "Recon/Event/EventConstants.h" +#include "TrigScint/TrigScintFirmwareTracker.h" +#include "TrigScint/Event/TrigScintHit.h" +#include "TrigScint/Event/TrigScintTrack.h" +#include "TrigScint/objdef.h" + +namespace trigscint { + +/** + * @class TrigScintFirmwareTracker + * @brief + */ +class TrigScintFirmwareTracker : public framework::Producer { + public: + TrigScintFirmwareTracker(const std::string& name, framework::Process& process) + : Producer(name, process) {} + + void configure(framework::config::Parameters& ps) override; + + void produce(framework::Event& event) override; + + ldmx::TrigScintTrack makeTrack(Track outTrk); + + /** + * add a hit at index idx to a cluster + */ + + void onProcessStart() override; + + void onProcessEnd() override; + + private: + // collection of clusters produced + std::vector digis1_; + + // collection of clusters produced + std::vector digis2_; + + // collection of clusters produced + std::vector digis3_; + + + + // min threshold for adding a hit to a cluster + double minThr_{0.}; + + // max number of neighboring hits to combine when forming a cluster + int maxWidth_{2}; + + // specific verbosity of this producer + int verbose_{0}; + + // expected arrival time of hits in the pad [ns] + double padTime_{0.}; + + // maximum allowed delay for hits to be considered for clustering + double timeTolerance_{0.}; + + // output collection (clusters) + std::string output_collection_; + + // input collection (hits) + std::string digis1_collection_; + std::string digis2_collection_; + std::string digis3_collection_; + + + std::vector tracks_; + + + // specific pass name to use for track making + std::string passName_{""}; + + // vertical bar start index + int vertBarStartIdx_{52}; + + // cluster channel nb centroid (will be content weighted) + float centroid_{0.}; + + // cluster channel nb horizontal centroid (will be content weighted) + float centroidX_{-1}; + + // cluster channel nb vertical centroid (will be content weighted) + float centroidY_{-1}; + + // energy (edep), PE, or sth + float val_{0.}; + + // edep content, only; leave val_ for PE + float valE_{0.}; + + // book keep which channels have already been added to the cluster at hand + std::vector v_addedIndices_; + + // book keep which channels have already been added to any cluster + std::vector v_usedIndices_; + + // fraction of cluster energy deposition associated with beam electron sim + // hits + float beamE_{0.}; + + // cluster time (energy weighted based on hit time) + float time_{0.}; + + // empty map container + std::map hitChannelMap_; +}; + +} // namespace trigscint + +#endif /* TRIGSCINT_TRIGSCINTCLUSTERPRODUCER_H */ diff --git a/TrigScint/include/TrigScint/ap_common.h b/TrigScint/include/TrigScint/ap_common.h new file mode 100644 index 000000000..994851902 --- /dev/null +++ b/TrigScint/include/TrigScint/ap_common.h @@ -0,0 +1,376 @@ +/* + * Copyright 2011-2019 Xilinx, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef __AP_COMMON_H__ +#define __AP_COMMON_H__ + +// ---------------------------------------------------------------------- + +// Forward declaration of all AP types. +#include + + +#ifdef __SYNTHESIS__ +#error "The open-source version of AP types does not support synthesis." +#endif // ifdef __SYNTHESIS__ +#define _AP_ENABLE_HALF_ 0 + + +#if _AP_ENABLE_HALF_ == 1 +// Before ap_private definition. +#ifdef __SYNTHESIS__ +#define _HLS_HALF_DEFINED_ +typedef __fp16 half; +#else +class half; +#endif // __SYNTHESIS__ +#endif // _AP_ENABLE_HALF_ + +// ---------------------------------------------------------------------- + +// Macro functions +#define AP_MAX(a, b) ((a) > (b) ? (a) : (b)) +#define AP_MIN(a, b) ((a) < (b) ? (a) : (b)) +#define AP_ABS(a) ((a) >= 0 ? (a) : -(a)) + +#ifndef AP_ASSERT +#ifndef __SYNTHESIS__ +#include +#define AP_ASSERT(cond, msg) assert((cond) && (msg)) +#else +#define AP_ASSERT(cond, msg) +#endif // ifndef __SYNTHESIS__ +#endif // ifndef AP_ASSERT + +#ifndef __SYNTHESIS__ +// for fprintf messages. +#include +// for exit on error. +#include +#endif + +// same disable condition as assert. +#if !defined(__SYNTHESIS__) && !defined(NDEBUG) + +#define _AP_DEBUG(cond, ...) \ + do { \ + if ((cond)) { \ + fprintf(stderr, "DEBUG: " __VA_ARGS__); \ + fprintf(stderr, "\n"); \ + } \ + } while (0) +#define _AP_WARNING(cond, ...) \ + do { \ + if ((cond)) { \ + fprintf(stderr, "WARNING: " __VA_ARGS__); \ + fprintf(stderr, "\n"); \ + } \ + } while (0) +#define _AP_ERROR(cond, ...) \ + do { \ + if ((cond)) { \ + fprintf(stderr, "ERROR: " __VA_ARGS__); \ + fprintf(stderr, "\n"); \ + abort(); \ + } \ + } while (0) + +#else // if !defined(__SYNTHESIS__) && !defined(NDEBUG) + +#define __AP_VOID_CAST static_cast +#define _AP_DEBUG(cond, ...) (__AP_VOID_CAST(0)) +#define _AP_WARNING(cond, ...) (__AP_VOID_CAST(0)) +#define _AP_ERROR(cond, ...) (__AP_VOID_CAST(0)) + +#endif // if !defined(__SYNTHESIS__) && !defined(NDEBUG) else + +// ---------------------------------------------------------------------- + +// Attribute only for synthesis +#ifdef __SYNTHESIS__ +#define INLINE inline __attribute__((always_inline)) +//#define INLINE inline __attribute__((noinline)) +#else +#define INLINE inline +#endif + +#define AP_WEAK +// __attribute__((weak)) + +#ifndef AP_INT_MAX_W +#define AP_INT_MAX_W 1024 +#endif + +#define BIT_WIDTH_UPPER_LIMIT (1 << 15) +#if AP_INT_MAX_W > BIT_WIDTH_UPPER_LIMIT +#error "Bitwidth exceeds 32768 (1 << 15), the maximum allowed value" +#endif + +#define MAX_MODE(BITS) ((BITS + 1023) / 1024) + +// ---------------------------------------------------------------------- + +// XXX apcc cannot handle global std::ios_base::Init() brought in by +#ifndef AP_AUTOCC +#ifndef __SYNTHESIS__ +// for overload operator<< +#include +#endif +#endif // ifndef AP_AUTOCC + +#ifndef __SYNTHESIS__ +// for string format. +#include +// for string. +#include +#endif + +// for detecting if char is signed. +enum { CHAR_IS_SIGNED = (char)-1 < 0 }; + +// TODO we have similar traits in x_hls_utils.h, should consider unify. +namespace _ap_type { +template +struct is_signed { + static const bool value = _Tp(-1) < _Tp(1); +}; + +template +struct is_integral { + static const bool value = false; +}; +#define DEF_IS_INTEGRAL(CTYPE) \ + template <> \ + struct is_integral { \ + static const bool value = true; \ + }; +DEF_IS_INTEGRAL(bool) +DEF_IS_INTEGRAL(char) +DEF_IS_INTEGRAL(signed char) +DEF_IS_INTEGRAL(unsigned char) +DEF_IS_INTEGRAL(short) +DEF_IS_INTEGRAL(unsigned short) +DEF_IS_INTEGRAL(int) +DEF_IS_INTEGRAL(unsigned int) +DEF_IS_INTEGRAL(long) +DEF_IS_INTEGRAL(unsigned long) +DEF_IS_INTEGRAL(ap_slong) +DEF_IS_INTEGRAL(ap_ulong) +#undef DEF_IS_INTEGRAL + +template +struct enable_if {}; +// partial specialization for true +template +struct enable_if { + typedef _Tp type; +}; + +template +struct remove_const { + typedef _Tp type; +}; + +template +struct remove_const<_Tp const> { + typedef _Tp type; +}; +} // namespace _ap_type + +// ---------------------------------------------------------------------- + +// Define ssdm_int and _ssdm_op. +// XXX deleted in open-source version + +#ifndef NON_C99STRING +#define _AP_C99 true +#else +#define _AP_C99 false +#endif + +static inline unsigned char guess_radix(const char* s) { + unsigned char rd = 10; ///< default radix + const char* p = s; + // skip neg sign if it exists + if (p[0] == '-' || p[0] == '+') ++p; + // guess based on following two bits. + if (p[0] == '0') { + if (p[1] == 'b' || p[1] == 'B') { + rd = 2; + } else if (p[1] == 'o' || p[1] == 'O') { + rd = 8; + } else if (p[1] == 'x' || p[1] == 'X') { + rd = 16; + } else if (p[1] == 'd' || p[1] == 'D') { + rd = 10; + } + } + return rd; +} + +// ---------------------------------------------------------------------- + +// Basic integral struct upon which ap_int and ap_fixed are defined. +#ifdef __SYNTHESIS__ +// Use ssdm_int, a compiler dependent, attribute constrained integeral type as +// basic data type. +#define _AP_ROOT_TYPE ssdm_int +// Basic ops. +#define _AP_ROOT_op_concat(Ret, X, Y) _ssdm_op_concat(Ret, X, Y) +#define _AP_ROOT_op_get_bit(Val, Bit) _ssdm_op_get_bit(Val, Bit) +#define _AP_ROOT_op_set_bit(Val, Bit, Repl) _ssdm_op_set_bit(Val, Bit, Repl) +#define _AP_ROOT_op_get_range(Val, Lo, Hi) _ssdm_op_get_range(Val, Lo, Hi) +#define _AP_ROOT_op_set_range(Val, Lo, Hi, Repl) \ + _ssdm_op_set_range(Val, Lo, Hi, Repl) +#define _AP_ROOT_op_reduce(Op, Val) _ssdm_op_reduce(Op, Val) +#else // ifdef __SYNTHESIS__ +// Use ap_private for compiler-independent basic data type +template +class ap_private; +/// model ssdm_int in standard C++ for simulation. +template +struct ssdm_int_sim { + /// integral type with template-specified width and signedness. + ap_private<_AP_W, _AP_S> V; + ssdm_int_sim() {} +}; +#define _AP_ROOT_TYPE ssdm_int_sim +// private's ref uses _AP_ROOT_TYPE. +#include +// XXX The C-sim model cannot use GCC-extension +// Basic ops. Ret and Val are ap_private. +template +inline _Tp1 _AP_ROOT_op_concat(const _Tp1& Ret, const _Tp2& X, const _Tp3& Y) { + _Tp1 r = (X).operator,(Y); + return r; +} +#define _AP_ROOT_op_get_bit(Val, Bit) (Val).get_bit((Bit)) +template +inline _Tp1& _AP_ROOT_op_set_bit(_Tp1& Val, const _Tp2& Bit, const _Tp3& Repl) { + (Val).set_bit((Bit), (Repl)); + return Val; +} +// notice the order of high and low index is different in ssdm call and +// ap_private.range()... +#define _AP_ROOT_op_get_range(Val, Lo, Hi) (Val).range((Hi), (Lo)) +template +inline _Tp1& _AP_ROOT_op_set_range(_Tp1& Val, const _Tp2& Lo, const _Tp3& Hi, + const _Tp4& Repl) { + (Val).range((Hi), (Lo)) = Repl; + return (Val); +} +#define _AP_ROOT_op_and_reduce(Val) (Val).and_reduce() +#define _AP_ROOT_op_nand_reduce(Val) (Val).nand_reduce() +#define _AP_ROOT_op_or_reduce(Val) (Val).or_reduce() +#define _AP_ROOT_op_xor_reduce(Val) (Val).xor_reduce() +// ## is the concatenation in preprocessor: +#define _AP_ROOT_op_reduce(Op, Val) _AP_ROOT_op_##Op##_reduce(Val) +#endif // ifdef __SYNTHESIS__ else + +// ---------------------------------------------------------------------- + +// Constants for half, single, double pricision floating points +#define HALF_MAN 10 +#define FLOAT_MAN 23 +#define DOUBLE_MAN 52 + +#define HALF_EXP 5 +#define FLOAT_EXP 8 +#define DOUBLE_EXP 11 + +#define BIAS(e) ((1L << (e - 1L)) - 1L) +#define HALF_BIAS BIAS(HALF_EXP) +#define FLOAT_BIAS BIAS(FLOAT_EXP) +#define DOUBLE_BIAS BIAS(DOUBLE_EXP) + +#define APFX_IEEE_DOUBLE_E_MAX DOUBLE_BIAS +#define APFX_IEEE_DOUBLE_E_MIN (-DOUBLE_BIAS + 1) + +INLINE ap_ulong doubleToRawBits(double pf) { + union { + ap_ulong __L; + double __D; + } LD; + LD.__D = pf; + return LD.__L; +} + +INLINE unsigned int floatToRawBits(float pf) { + union { + unsigned int __L; + float __D; + } LD; + LD.__D = pf; + return LD.__L; +} + +#if _AP_ENABLE_HALF_ == 1 +INLINE unsigned short halfToRawBits(half pf) { +#ifdef __SYNTHESIS__ + union { + unsigned short __L; + half __D; + } LD; + LD.__D = pf; + return LD.__L; +#else + return pf.get_bits(); +#endif +} +#endif + +// usigned long long is at least 64-bit +INLINE double rawBitsToDouble(ap_ulong pi) { + union { + ap_ulong __L; + double __D; + } LD; + LD.__L = pi; + return LD.__D; +} + +// long is at least 32-bit +INLINE float rawBitsToFloat(unsigned long pi) { + union { + unsigned int __L; + float __D; + } LD; + LD.__L = pi; + return LD.__D; +} + +#if _AP_ENABLE_HALF_ == 1 +// short is at least 16-bit +INLINE half rawBitsToHalf(unsigned short pi) { +#ifdef __SYNTHESIS__ + union { + unsigned short __L; + half __D; + } LD; + LD.__L = pi; + return LD.__D; +#else + // sim model of half has a non-trivial constructor + half __D; + __D.set_bits(pi); + return __D; +#endif +} +#endif + +#endif // ifndef __AP_COMMON_H__ + +// -*- cpp -*- diff --git a/TrigScint/include/TrigScint/ap_decl.h b/TrigScint/include/TrigScint/ap_decl.h new file mode 100644 index 000000000..ddd00f1c7 --- /dev/null +++ b/TrigScint/include/TrigScint/ap_decl.h @@ -0,0 +1,212 @@ +/* + * Copyright 2011-2019 Xilinx, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef __AP_DECL_H__ +#define __AP_DECL_H__ + +// ---------------------------------------------------------------------- + +#if !defined(__AP_FIXED_H__) && !defined(__AP_INT_H__) && !defined(__AUTOPILOT_CBE_H__) && !defined(__HLS_HALF_H__) +#error "Only ap_fixed.h and ap_int.h can be included directly in user code." +#endif + +// Test __SYNTHESIS__ only for mode +#if !defined(__SYNTHESIS__) && (defined(AESL_SYN) || defined(__HLS_SYN__)) +//#pragma message "AESL_SYN and __HLS_SYN__ should be replaced by __SYNTHESIS__" +#define __SYNTHESIS__ +#endif + +/* for safety*/ +#if (defined(_AP_N) || defined(_AP_C)) +#error One or more of the following is defined: _AP_N, _AP_C. Definition conflicts with their usage as template parameters. +#endif + +/* for safety*/ +#if (defined(_AP_W) || defined(_AP_I) || defined(_AP_S) || defined(_AP_Q) || \ + defined(_AP_O) || defined(_AP_W2) || defined(_AP_I2) || \ + defined(_AP_S2) || defined(_AP_Q2) || defined(_AP_O2) || \ + defined(_AP_N) || defined(_AP_N2)) +#error \ + "One or more of the following is defined: _AP_W, _AP_I, _AP_S, _AP_Q, _AP_O, _AP_N, _AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2. Definition conflicts with their usage as template parameters." +#endif + +/*for safety*/ +#if (defined(_AP_W3) || defined(_AP_S3) || defined(_AP_W4) || defined(_AP_S4)) +#error \ + "One or more of the following is defined: _AP_W3, _AP_S3, _AP_W4,_AP_S4. Definition conflicts with their usage as template parameters." +#endif + +#if (defined(_AP_W1) || defined(_AP_S1) || defined(_AP_T) || \ + defined(_AP_T1) || defined(_AP_T2) || defined(_AP_T3) || defined(_AP_T4)) +#error \ + "One or more of the following is defined: _AP_W1, _AP_S1, _AP_T, _AP_T1, _AP_T2, _AP_T3, _AP_T4. Definition conflicts with their usage as template parameters." +#endif + +#ifndef __cplusplus +#error "AP data type can only be used in C++" +#endif + +// ---------------------------------------------------------------------- + +#ifndef __SC_COMPATIBLE__ +/// ap_fixed quantification mode +enum ap_q_mode { + AP_RND, //< rounding to plus infinity + AP_RND_ZERO, //< rounding to zero + AP_RND_MIN_INF, //< rounding to minus infinity + AP_RND_INF, //< rounding to infinity + AP_RND_CONV, //< convergent rounding + AP_TRN, //< truncation + AP_TRN_ZERO, //< truncation to zero +}; + +// FIXME for legacy code +#ifndef SYSTEMC_INCLUDED +#define SC_RND AP_RND +#define SC_RND_ZERO AP_RND_ZERO +#define SC_RND_MIN_INF AP_RND_MIN_INF +#define SC_RND_INF AP_RND_INF +#define SC_RND_CONV AP_RND_CONV +#define SC_TRN AP_TRN +#define SC_TRN_ZERO AP_TRN_ZERO +#endif // !defined(SYSTEMC_INCLUDED) + +/// ap_fixed saturation mode +enum ap_o_mode { + AP_SAT, //< saturation + AP_SAT_ZERO, //< saturation to zero + AP_SAT_SYM, //< symmetrical saturation + AP_WRAP, //< wrap-around (*) + AP_WRAP_SM, //< sign magnitude wrap-around (*) +}; + +// FIXME for legacy code +#ifndef SYSTEMC_INCLUDED +#define SC_SAT AP_SAT +#define SC_SAT_ZERO AP_SAT_ZERO +#define SC_SAT_SYM AP_SAT_SYM +#define SC_WRAP AP_WRAP +#define SC_WRAP_SM AP_WRAP_SM +#endif // !defined(SYSTEMC_INCLUDED) + +#else // defined(__SC_COMPATIBLE__) + +// There will not be sc_fxdefs.h, and the emu should be defined by ap_fixed. + +/// ap_fixed quantification mode +enum ap_q_mode { + SC_RND, //< rounding to plus infinity + SC_RND_ZERO, //< rounding to zero + SC_RND_MIN_INF, //< rounding to minus infinity + SC_RND_INF, //< rounding to infinity + SC_RND_CONV, //< convergent rounding + SC_TRN, //< truncation + SC_TRN_ZERO, //< truncation to zero +}; + +#define AP_RND SC_RND +#define AP_RND_ZERO SC_RND_ZERO +#define AP_RND_MIN_INF SC_RND_MIN_INF +#define AP_RND_INF SC_RND_INF +#define AP_RND_CONV SC_RND_CONV +#define AP_TRN SC_TRN +#define AP_TRN_ZERO SC_TRN_ZERO + +/// ap_fixed saturation mode +enum ap_o_mode { + SC_SAT, //< saturation + SC_SAT_ZERO, //< saturation to zero + SC_SAT_SYM, //< symmetrical saturation + SC_WRAP, //< wrap-around (*) + SC_WRAP_SM, //< sign magnitude wrap-around (*) +}; + +#define AP_SAT SC_SAT +#define AP_SAT_ZERO SC_SAT_ZERO +#define AP_SAT_SYM SC_SAT_SYM +#define AP_WRAP SC_WRAP +#define AP_WRAP_SM SC_WRAP_SM + +#endif // defined(__SC_COMPATIBLE__) + +template +struct ap_int_base; + +template +struct ap_int; + +template +struct ap_uint; + +template +struct ap_range_ref; + +template +struct ap_bit_ref; + +template +struct ap_concat_ref; + +template +struct ap_fixed_base; + +template +struct ap_fixed; + +template +struct ap_ufixed; + +template +struct af_range_ref; + +template +struct af_bit_ref; + +/// string base mode +enum BaseMode { AP_BIN = 2, AP_OCT = 8, AP_DEC = 10, AP_HEX = 16 }; + +#ifndef SYSTEMC_INCLUDED +#define SC_BIN 2 +#define SC_OCT 8 +#define SC_DEC 10 +#define SC_HEX 16 +#endif // !defined(SYSTEMC_INCLUDED) + +// Alias C data types +#ifdef _MSC_VER +typedef signed __int64 ap_slong; +typedef unsigned __int64 ap_ulong; +#else // !defined(_MSC_VER) +typedef signed long long ap_slong; +typedef unsigned long long ap_ulong; +#endif // !defined(_MSC_VER) + +enum { + _AP_SIZE_char = 8, + _AP_SIZE_short = sizeof(short) * 8, + _AP_SIZE_int = sizeof(int) * 8, + _AP_SIZE_long = sizeof(long) * 8, + _AP_SIZE_ap_slong = sizeof(ap_slong) * 8 +}; + +#endif // !defined(__AP_DECL_H__) + +// -*- cpp -*- diff --git a/TrigScint/include/TrigScint/ap_fixed.h b/TrigScint/include/TrigScint/ap_fixed.h new file mode 100644 index 000000000..6362f3d71 --- /dev/null +++ b/TrigScint/include/TrigScint/ap_fixed.h @@ -0,0 +1,360 @@ +/* + * Copyright 2011-2019 Xilinx, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef __AP_FIXED_H__ +#define __AP_FIXED_H__ + +#include +#include +#include + +//--------------------------------------------------------------- + +/// Signed Arbitrary Precision Fixed-Point Type. +// default for _AP_Q, _AP_O and _AP_N set in ap_decl.h +template +struct ap_fixed : ap_fixed_base<_AP_W, _AP_I, true, _AP_Q, _AP_O, _AP_N> { + typedef ap_fixed_base<_AP_W, _AP_I, true, _AP_Q, _AP_O, _AP_N> Base; + // Constructor + /// default ctor + INLINE ap_fixed() : Base() {} + + /// default copy ctor + INLINE ap_fixed(const ap_fixed& op) { Base::V = op.V; } + + /// copy ctor from ap_fixed_base. + template + INLINE ap_fixed(const ap_fixed_base<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, + _AP_O2, _AP_N2>& op) + : Base(op) {} + + template + INLINE ap_fixed(const volatile ap_fixed_base<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, + _AP_O2, _AP_N2>& op) + : Base(op) {} + + //// from ap_fixed + //template + //INLINE ap_fixed( + // const ap_fixed<_AP_W2, _AP_I2, _AP_Q2, _AP_O2, _AP_N2>& op) + // : Base(ap_fixed_base<_AP_W2, _AP_I2, true, _AP_Q2, _AP_O2, _AP_N2>(op)) {} + + //template + //INLINE ap_fixed( + // const volatile ap_fixed<_AP_W2, _AP_I2, _AP_Q2, _AP_O2, _AP_N2>& op) + // : Base(ap_fixed_base<_AP_W2, _AP_I2, true, _AP_Q2, _AP_O2, _AP_N2>(op)) {} + + //// from ap_ufixed. + //template + //INLINE ap_fixed( + // const ap_ufixed<_AP_W2, _AP_I2, _AP_Q2, _AP_O2, _AP_N2>& op) + // : Base(ap_fixed_base<_AP_W2, _AP_I2, false, _AP_Q2, _AP_O2, _AP_N2>(op)) { + //} + + //template + //INLINE ap_fixed( + // const volatile ap_ufixed<_AP_W2, _AP_I2, _AP_Q2, _AP_O2, _AP_N2>& op) + // : Base(ap_fixed_base<_AP_W2, _AP_I2, false, _AP_Q2, _AP_O2, _AP_N2>(op)) { + //} + + /// copy ctor from ap_int_base. + template + INLINE ap_fixed(const ap_int_base<_AP_W2, _AP_S2>& op) : Base(op) {} + + template + INLINE ap_fixed(const volatile ap_int_base<_AP_W2, _AP_S2>& op) : Base(op) {} + + //// from ap_int. + //template + //INLINE ap_fixed(const ap_int<_AP_W2>& op) + // : Base(ap_int_base<_AP_W2, true>(op)) {} + + //template + //INLINE ap_fixed(const volatile ap_int<_AP_W2>& op) + // : Base(ap_int_base<_AP_W2, true>(op)) {} + + //// from ap_uint. + //template + //INLINE ap_fixed(const ap_uint<_AP_W2>& op) + // : Base(ap_int_base<_AP_W2, false>(op)) {} + + //template + //INLINE ap_fixed(const volatile ap_uint<_AP_W2>& op) + // : Base(ap_int_base<_AP_W2, false>(op)) {} + + // from ap_bit_ref. + template + INLINE ap_fixed(const ap_bit_ref<_AP_W2, _AP_S2>& op) : Base(op) {} + + // from ap_range_ref. + template + INLINE ap_fixed(const ap_range_ref<_AP_W2, _AP_S2>& op) : Base(op) {} + + // from ap_concat_ref. + template + INLINE ap_fixed(const ap_concat_ref<_AP_W2, _AP_T2, _AP_W3, _AP_T3>& op) + : Base(op) {} + + // from af_bit_ref. + template + INLINE ap_fixed( + const af_bit_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2>& op) + : Base(op) {} + + // from af_range_ref. + template + INLINE ap_fixed( + const af_range_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2>& op) + : Base(op) {} + +// from c types. +#define CTOR(TYPE) \ + INLINE ap_fixed(TYPE v) : Base(v) {} + + CTOR(bool) + CTOR(char) + CTOR(signed char) + CTOR(unsigned char) + CTOR(short) + CTOR(unsigned short) + CTOR(int) + CTOR(unsigned int) + CTOR(long) + CTOR(unsigned long) + CTOR(ap_slong) + CTOR(ap_ulong) +#if _AP_ENABLE_HALF_ == 1 + CTOR(half) +#endif + CTOR(float) + CTOR(double) +#undef CTOR + + INLINE ap_fixed(const char* s) : Base(s) {} + + INLINE ap_fixed(const char* s, signed char rd) : Base(s, rd) {} + + // Assignment + // The assignment operator is technically inherited; however, it is always + // hidden by an explicitly or implicitly defined assignment operator for the + // derived class. + /* XXX ctor will be used when right is not of proper type. */ + INLINE ap_fixed& operator=( + const ap_fixed<_AP_W, _AP_I, _AP_Q, _AP_O, _AP_N>& op) { + Base::V = op.V; + return *this; + } + + INLINE void operator=( + const ap_fixed<_AP_W, _AP_I, _AP_Q, _AP_O, _AP_N>& op) volatile { + Base::V = op.V; + } + + INLINE ap_fixed& operator=( + const volatile ap_fixed<_AP_W, _AP_I, _AP_Q, _AP_O, _AP_N>& op) { + Base::V = op.V; + return *this; + } + + INLINE void operator=( + const volatile ap_fixed<_AP_W, _AP_I, _AP_Q, _AP_O, _AP_N>& op) volatile { + Base::V = op.V; + } +}; // struct ap_fixed. + +//------------------------------------------------------------------- + +// Unsigned Arbitrary Precision Fixed-Point Type. +// default for _AP_Q, _AP_O and _AP_N set in ap_decl.h +template +struct ap_ufixed : ap_fixed_base<_AP_W, _AP_I, false, _AP_Q, _AP_O, _AP_N> { + typedef ap_fixed_base<_AP_W, _AP_I, false, _AP_Q, _AP_O, _AP_N> Base; + // Constructor + /// default ctor + INLINE ap_ufixed() : Base() {} + + /// default copy ctor + INLINE ap_ufixed(const ap_ufixed& op) { Base::V = op.V; } + + /// copy ctor from ap_fixed_base + template + INLINE ap_ufixed(const ap_fixed_base<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, + _AP_O2, _AP_N2>& op) + : Base(op) {} + + /// copy ctor from ap_fixed_base + template + INLINE ap_ufixed(const volatile ap_fixed_base<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, + _AP_O2, _AP_N2>& op) + : Base(op) {} + + //template + //INLINE ap_ufixed( + // const ap_fixed<_AP_W2, _AP_I2, _AP_Q2, _AP_O2, _AP_N2>& op) + // : Base(ap_fixed_base<_AP_W2, _AP_I2, true, _AP_Q2, _AP_O2, _AP_N2>(op)) {} + + //template + //INLINE ap_ufixed( + // const volatile ap_fixed<_AP_W2, _AP_I2, _AP_Q2, _AP_O2, _AP_N2>& op) + // : Base(ap_fixed_base<_AP_W2, _AP_I2, true, _AP_Q2, _AP_O2, _AP_N2>(op)) {} + + //template + //INLINE ap_ufixed( + // const ap_ufixed<_AP_W2, _AP_I2, _AP_Q2, _AP_O2, _AP_N2>& op) + // : Base(ap_fixed_base<_AP_W2, _AP_I2, false, _AP_Q2, _AP_O2, _AP_N2>(op)) { + //} + + //template + //INLINE ap_ufixed( + // const volatile ap_ufixed<_AP_W2, _AP_I2, _AP_Q2, _AP_O2, _AP_N2>& op) + // : Base(ap_fixed_base<_AP_W2, _AP_I2, false, _AP_Q2, _AP_O2, _AP_N2>(op)) { + //} + + /// copy ctor from ap_int_base. + template + INLINE ap_ufixed(const ap_int_base<_AP_W2, _AP_S2>& op) : Base(op) {} + + template + INLINE ap_ufixed(const volatile ap_int_base<_AP_W2, _AP_S2>& op) : Base(op) {} + + //template + //INLINE ap_ufixed(const ap_int<_AP_W2>& op) + // : Base(ap_int_base<_AP_W2, true>(op)) {} + + //template + //INLINE ap_ufixed(const volatile ap_int<_AP_W2>& op) + // : Base(ap_int_base<_AP_W2, true>(op)) {} + + //template + //INLINE ap_ufixed(const ap_uint<_AP_W2>& op) + // : Base(ap_int_base<_AP_W2, false>(op)) {} + + //template + //INLINE ap_ufixed(const volatile ap_uint<_AP_W2>& op) + // : Base(ap_int_base<_AP_W2, false>(op)) {} + + template + INLINE ap_ufixed(const ap_bit_ref<_AP_W2, _AP_S2>& op) : Base(op) {} + + template + INLINE ap_ufixed(const ap_range_ref<_AP_W2, _AP_S2>& op) : Base(op) {} + + template + INLINE ap_ufixed(const ap_concat_ref<_AP_W2, _AP_T2, _AP_W3, _AP_T3>& op) + : Base(op) {} + + template + INLINE ap_ufixed( + const af_bit_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2>& op) + : Base(op) {} + + template + INLINE ap_ufixed( + const af_range_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2>& op) + : Base(op) {} + +#define CTOR(TYPE) \ + INLINE ap_ufixed(TYPE v) : Base(v) {} + + CTOR(bool) + CTOR(char) + CTOR(signed char) + CTOR(unsigned char) + CTOR(short) + CTOR(unsigned short) + CTOR(int) + CTOR(unsigned int) + CTOR(long) + CTOR(unsigned long) + CTOR(ap_slong) + CTOR(ap_ulong) +#if _AP_ENABLE_HALF_ == 1 + CTOR(half) +#endif + CTOR(float) + CTOR(double) +#undef CTOR + + INLINE ap_ufixed(const char* s) : Base(s) {} + + INLINE ap_ufixed(const char* s, signed char rd) : Base(s, rd) {} + + // Assignment + INLINE ap_ufixed& operator=( + const ap_ufixed<_AP_W, _AP_I, _AP_Q, _AP_O, _AP_N>& op) { + Base::V = op.V; + return *this; + } + + INLINE void operator=( + const ap_ufixed<_AP_W, _AP_I, _AP_Q, _AP_O, _AP_N>& op) volatile { + Base::V = op.V; + } + + INLINE ap_ufixed& operator=( + const volatile ap_ufixed<_AP_W, _AP_I, _AP_Q, _AP_O, _AP_N>& op) { + Base::V = op.V; + return *this; + } + + INLINE void operator=(const volatile ap_ufixed<_AP_W, _AP_I, _AP_Q, _AP_O, + _AP_N>& op) volatile { + Base::V = op.V; + } +}; // struct ap_ufixed + + +#if !defined(__SYNTHESIS__) && (defined(SYSTEMC_H) || defined(SYSTEMC_INCLUDED)) +// XXX sc_trace overload for ap_fixed is already included in +// "ap_sysc/ap_sc_extras.h", so do not define in synthesis. +template +INLINE void sc_trace(sc_core::sc_trace_file* tf, + const ap_fixed<_AP_W, _AP_I, _AP_Q, _AP_O, _AP_N>& op, + const std::string& name) { + tf->trace(sc_dt::sc_lv<_AP_W>(op.to_string(2).c_str()), name); +} + +template +INLINE void sc_trace(sc_core::sc_trace_file* tf, + const ap_ufixed<_AP_W, _AP_I, _AP_Q, _AP_O, _AP_N>& op, + const std::string& name) { + tf->trace(sc_dt::sc_lv<_AP_W>(op.to_string(2).c_str()), name); +} +#endif // System C sim + +// Specialization of std containers, so that std::complex can have its +// image part automatically zero-initialized when only real part is provided. +#include + +#endif // ifndef __AP_FIXED_H__ + +// -*- cpp -*- diff --git a/TrigScint/include/TrigScint/ap_fixed_base.h b/TrigScint/include/TrigScint/ap_fixed_base.h new file mode 100644 index 000000000..eb2bdbf5d --- /dev/null +++ b/TrigScint/include/TrigScint/ap_fixed_base.h @@ -0,0 +1,2354 @@ +/* + * Copyright 2011-2019 Xilinx, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef __AP_FIXED_BASE_H__ +#define __AP_FIXED_BASE_H__ + +#ifndef __AP_FIXED_H__ +#error "Only ap_fixed.h and ap_int.h can be included directly in user code." +#endif + +// for ap_int_base and its reference types. +#include +#ifndef __SYNTHESIS__ +#if _AP_ENABLE_HALF_ == 1 +// for half type +#include +#endif +// for std io +#include +#endif + +#ifndef __cplusplus +#error "C++ is required to include this header file" +#else // __cplusplus + +// for warning on unsupported rounding mode in conversion to float/double. +#if !defined(__SYNTHESIS__) && __cplusplus >= 201103L && \ + (defined(__gnu_linux__) || defined(_WIN32)) +#define AP_FIXED_ENABLE_CPP_FENV 1 +#include +#endif + +// ---------------------------------------------------------------------- + +/* Major TODO + long double support: constructor, assign and other operators. + binary operators with ap_fixed_base and const char*. + return ap_fixed/ap_ufixed when result signedness is known. +*/ + +// Helper function in conversion to floating point types. + +#ifdef __SYNTHESIS__ +#define _AP_ctype_op_get_bit(var, index) _AP_ROOT_op_get_bit(var, index) +#define _AP_ctype_op_set_bit(var, index, x) _AP_ROOT_op_set_bit(var, index, x) +#define _AP_ctype_op_get_range(var, low, high) \ + _AP_ROOT_op_get_range(var, low, high) +#define _AP_ctype_op_set_range(var, low, high, x) \ + _AP_ROOT_op_set_range(var, low, high, x) +#else // ifdef __SYNTHESIS__ +template +inline bool _AP_ctype_op_get_bit(_Tp1& var, const _Tp2& index) { + return !!(var & (1ull << (index))); +} +template +inline _Tp1 _AP_ctype_op_set_bit(_Tp1& var, const _Tp2& index, const _Tp3& x) { + var |= (((x) ? 1ull : 0ull) << (index)); + return var; +} +template +inline _Tp1 _AP_ctype_op_get_range(_Tp1& var, const _Tp2& low, + const _Tp3& high) { + _Tp1 r = var; + ap_ulong mask = -1ll; + mask >>= (sizeof(_Tp1) * 8 - ((high) - (low) + 1)); + r >>= (low); + r &= mask; + return r; +} +template +inline _Tp1 _AP_ctype_op_set_range(_Tp1& var, const _Tp2& low, const _Tp3& high, + const _Tp4& x) { + ap_ulong mask = -1ll; + mask >>= (_AP_SIZE_ap_slong - ((high) - (low) + 1)); + var &= ~(mask << (low)); + var |= ((mask & x) << (low)); + return var; +} +#endif // ifdef __SYNTHESIS__ + + +// trait for letting base class to return derived class. +// Notice that derived class template is incomplete, and we cannot use +// the member of the derived class. +template +struct _ap_fixed_factory; +template +struct _ap_fixed_factory<_AP_W2, _AP_I2, true> { + typedef ap_fixed<_AP_W2, _AP_I2> type; +}; +template +struct _ap_fixed_factory<_AP_W2, _AP_I2, false> { + typedef ap_ufixed<_AP_W2, _AP_I2> type; +}; + +/// ap_fixed_base: AutoPilot fixed point. +/** partial specialization of signed. + @tparam _AP_W width. + @tparam _AP_I integral part width. + @tparam _AP_S signed. + @tparam _AP_Q quantization mode. Default is AP_TRN. + @tparam _AP_O saturation mode. Default is AP_WRAP. + @tparam _AP_N saturation wrap value. Default is 0. + */ +// default for _AP_Q, _AP_O and _AP_N set in ap_decl.h +template +struct ap_fixed_base : _AP_ROOT_TYPE<_AP_W, _AP_S> { + public: + typedef _AP_ROOT_TYPE<_AP_W, _AP_S> Base; + static const int width = _AP_W; + static const int iwidth = _AP_I; + static const ap_q_mode qmode = _AP_Q; + static const ap_o_mode omode = _AP_O; + + /// Return type trait. + template + struct RType { + enum { + _AP_F = _AP_W - _AP_I, + F2 = _AP_W2 - _AP_I2, + mult_w = _AP_W + _AP_W2, + mult_i = _AP_I + _AP_I2, + mult_s = _AP_S || _AP_S2, + plus_w = AP_MAX(_AP_I + (_AP_S2 && !_AP_S), _AP_I2 + (_AP_S && !_AP_S2)) + + 1 + AP_MAX(_AP_F, F2), + plus_i = + AP_MAX(_AP_I + (_AP_S2 && !_AP_S), _AP_I2 + (_AP_S && !_AP_S2)) + 1, + plus_s = _AP_S || _AP_S2, + minus_w = + AP_MAX(_AP_I + (_AP_S2 && !_AP_S), _AP_I2 + (_AP_S && !_AP_S2)) + 1 + + AP_MAX(_AP_F, F2), + minus_i = + AP_MAX(_AP_I + (_AP_S2 && !_AP_S), _AP_I2 + (_AP_S && !_AP_S2)) + 1, + minus_s = true, +#ifndef __SC_COMPATIBLE__ + div_w = _AP_S2 + _AP_W + AP_MAX(F2, 0), +#else + div_w = _AP_S2 + _AP_W + AP_MAX(F2, 0) + AP_MAX(_AP_I2, 0), +#endif + div_i = _AP_S2 + _AP_I + F2, + div_s = _AP_S || _AP_S2, + logic_w = + AP_MAX(_AP_I + (_AP_S2 && !_AP_S), _AP_I2 + (_AP_S && !_AP_S2)) + + AP_MAX(_AP_F, F2), + logic_i = AP_MAX(_AP_I + (_AP_S2 && !_AP_S), _AP_I2 + (_AP_S && !_AP_S2)), + logic_s = _AP_S || _AP_S2 + }; + + typedef ap_fixed_base<_AP_W, _AP_I, _AP_S> lhs; + typedef ap_fixed_base<_AP_W2, _AP_I2, _AP_S2> rhs; + + typedef ap_fixed_base mult_base; + typedef ap_fixed_base plus_base; + typedef ap_fixed_base minus_base; + typedef ap_fixed_base logic_base; + typedef ap_fixed_base div_base; + typedef ap_fixed_base<_AP_W, _AP_I, _AP_S> arg1_base; + + typedef typename _ap_fixed_factory::type mult; + typedef typename _ap_fixed_factory::type plus; + typedef typename _ap_fixed_factory::type minus; + typedef typename _ap_fixed_factory::type logic; + typedef typename _ap_fixed_factory::type div; + typedef typename _ap_fixed_factory<_AP_W, _AP_I, _AP_S>::type arg1; + }; + + private: +#ifndef __SYNTHESIS__ + // This cannot handle hex float format string. + void fromString(const std::string& val, unsigned char radix) { + _AP_ERROR(!(radix == 2 || radix == 8 || radix == 10 || radix == 16), + "ap_fixed_base::fromString(%s, %d)", val.c_str(), radix); + + Base::V = 0; + int startPos = 0; + int endPos = val.length(); + int decPos = val.find("."); + if (decPos == -1) decPos = endPos; + + // handle sign + bool isNegative = false; + if (val[0] == '-') { + isNegative = true; + ++startPos; + } else if (val[0] == '+') + ++startPos; + + // If there are no integer bits, e.g.: + // .0000XXXX, then keep at least one bit. + // If the width is greater than the number of integer bits, e.g.: + // XXXX.XXXX, then we keep the integer bits + // if the number of integer bits is greater than the width, e.g.: + // XXX000 then we keep the integer bits. + // Always keep one bit. + ap_fixed_base + integer_bits = 0; + + // Figure out if we can shift instead of multiply + unsigned shift = (radix == 16 ? 4 : radix == 8 ? 3 : radix == 2 ? 1 : 0); + + //std::cout << "\n\n" << val << "\n"; + //std::cout << startPos << " " << decPos << " " << endPos << "\n"; + + bool sticky_int = false; + + // Traverse the integer digits from the MSD, multiplying by radix as we go. + for (int i = startPos; i < decPos; i++) { + // Get a digit + char cdigit = val[i]; + if (cdigit == '\0') continue; + unsigned digit = ap_private_ops::decode_digit(cdigit, radix); + + sticky_int |= integer_bits[AP_MAX(_AP_I, 4) + 4 - 1] | + integer_bits[AP_MAX(_AP_I, 4) + 4 - 2] | + integer_bits[AP_MAX(_AP_I, 4) + 4 - 3] | + integer_bits[AP_MAX(_AP_I, 4) + 4 - 4]; + // Shift or multiply the value by the radix + if (shift) + integer_bits <<= shift; + else + integer_bits *= radix; + + // Add in the digit we just interpreted + integer_bits += digit; + //std::cout << "idigit = " << digit << " " << integer_bits.to_string() + // << " " << sticky_int << "\n"; + } + integer_bits[AP_MAX(_AP_I, 4) + 4 - 3] = + integer_bits[AP_MAX(_AP_I, 4) + 4 - 3] | sticky_int; + + ap_fixed_base fractional_bits = 0; + bool sticky = false; + + // Traverse the fractional digits from the LSD, dividing by radix as we go. + for (int i = endPos - 1; i >= decPos + 1; i--) { + // Get a digit + char cdigit = val[i]; + if (cdigit == '\0') continue; + unsigned digit = ap_private_ops::decode_digit(cdigit, radix); + // Add in the digit we just interpreted + fractional_bits += digit; + + sticky |= fractional_bits[0] | fractional_bits[1] | fractional_bits[2] | + fractional_bits[3]; + // Shift or divide the value by the radix + if (shift) + fractional_bits >>= shift; + else + fractional_bits /= radix; + + //std::cout << "fdigit = " << digit << " " << fractional_bits.to_string() + // << " " << sticky << "\n"; + } + + //std::cout << "Int =" << integer_bits.to_string() << " " << + // fractional_bits.to_string() << "\n"; + + fractional_bits[0] = fractional_bits[0] | sticky; + + if (isNegative) + *this = -(integer_bits + fractional_bits); + else + *this = integer_bits + fractional_bits; + + //std::cout << "end = " << this->to_string(16) << "\n"; + } + + /// report invalid constrction of ap_fixed_base + INLINE void report() { + if (!_AP_S && _AP_O == AP_WRAP_SM) { + fprintf(stderr, "ap_ufxied<...> cannot support AP_WRAP_SM.\n"); + exit(1); + } + if (_AP_W > MAX_MODE(AP_INT_MAX_W) * 1024) { + fprintf(stderr, + "[E] ap_%sfixed<%d, ...>: Bitwidth exceeds the " + "default max value %d. Please use macro " + "AP_INT_MAX_W to set a larger max value.\n", + _AP_S ? "" : "u", _AP_W, MAX_MODE(AP_INT_MAX_W) * 1024); + exit(1); + } + } +#else + INLINE void report() {} +#endif // ifdef __SYNTHESIS__ + + /// @name helper functions. + // @{ + INLINE void overflow_adjust(bool underflow, bool overflow, bool lD, + bool sign) { + if (!underflow && !overflow) return; + if (_AP_O == AP_WRAP) { + if (_AP_N == 0) return; + if (_AP_S) { + // signed AP_WRAP + // n_bits == 1 + Base::V = _AP_ROOT_op_set_bit(Base::V, _AP_W - 1, sign); + if (_AP_N > 1) { + // n_bits > 1 + ap_int_base<_AP_W, false> mask(-1); + if (sign) mask.V = 0; + Base::V = + _AP_ROOT_op_set_range(Base::V, _AP_W - _AP_N, _AP_W - 2, mask.V); + } + } else { + // unsigned AP_WRAP + ap_int_base<_AP_W, false> mask(-1); + Base::V = + _AP_ROOT_op_set_range(Base::V, _AP_W - _AP_N, _AP_W - 1, mask.V); + } + } else if (_AP_O == AP_SAT_ZERO) { + Base::V = 0; + } else if (_AP_O == AP_WRAP_SM && _AP_S) { + bool Ro = _AP_ROOT_op_get_bit(Base::V, _AP_W - 1); + if (_AP_N == 0) { + if (lD != Ro) { + Base::V = ~Base::V; + Base::V = _AP_ROOT_op_set_bit(Base::V, _AP_W - 1, lD); + } + } else { + if (_AP_N == 1 && sign != Ro) { + Base::V = ~Base::V; + } else if (_AP_N > 1) { + bool lNo = _AP_ROOT_op_get_bit(Base::V, _AP_W - _AP_N); + if (lNo == sign) Base::V = ~Base::V; + ap_int_base<_AP_W, false> mask(-1); + if (sign) mask.V = 0; + Base::V = + _AP_ROOT_op_set_range(Base::V, _AP_W - _AP_N, _AP_W - 2, mask.V); + } + Base::V = _AP_ROOT_op_set_bit(Base::V, _AP_W - 1, sign); + } + } else { + if (_AP_S) { + if (overflow) { + Base::V = 1; + Base::V <<= _AP_W - 1; + Base::V = ~Base::V; + } else if (underflow) { + Base::V = 1; + Base::V <<= _AP_W - 1; + if (_AP_O == AP_SAT_SYM) Base::V |= 1; + } + } else { + if (overflow) + Base::V = ~(ap_int_base<_AP_W, false>(0).V); + else if (underflow) + Base::V = 0; + } + } + } + + INLINE bool quantization_adjust(bool qb, bool r, bool s) { + bool carry = (bool)_AP_ROOT_op_get_bit(Base::V, _AP_W - 1); + if (_AP_Q == AP_TRN) return false; + if (_AP_Q == AP_RND_ZERO) + qb &= s || r; + else if (_AP_Q == AP_RND_MIN_INF) + qb &= r; + else if (_AP_Q == AP_RND_INF) + qb &= !s || r; + else if (_AP_Q == AP_RND_CONV) + qb &= _AP_ROOT_op_get_bit(Base::V, 0) || r; + else if (_AP_Q == AP_TRN_ZERO) + qb = s && (qb || r); + Base::V += qb; + return carry && (!(bool)_AP_ROOT_op_get_bit(Base::V, _AP_W - 1)); + } + // @} + + public: + /// @name constructors. + // @{ + /// default ctor. + INLINE ap_fixed_base() {} + + /// copy ctor. + template + INLINE ap_fixed_base( + const ap_fixed_base<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2>& op) { + operator=(op); + report(); + } + + template + INLINE ap_fixed_base( + const volatile ap_fixed_base<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2>& op) { + operator=(op); + report(); + } + + template + INLINE ap_fixed_base(const ap_int_base<_AP_W2, _AP_S2>& op) { + ap_fixed_base<_AP_W2, _AP_W2, _AP_S2> tmp; + tmp.V = op.V; + operator=(tmp); + report(); + } + + template + INLINE ap_fixed_base(const volatile ap_int_base<_AP_W2, _AP_S2>& op) { + ap_fixed_base<_AP_W2, _AP_W2, _AP_S2> tmp; + tmp.V = op.V; + operator=(tmp); + report(); + } + +#ifndef __SYNTHESIS__ +#ifndef NON_C99STRING + INLINE ap_fixed_base(const char* s, signed char rd = 0) { + unsigned char radix = rd; + std::string str = ap_private_ops::parseString(s, radix); // will guess rd, default 10 + _AP_ERROR(radix == 0, "ap_fixed_base(const char* \"%s\", %d), str=%s, radix = %d", + s, rd, str.c_str(), radix); // TODO remove this check + fromString(str, radix); + } +#else + INLINE ap_fixed_base(const char* s, signed char rd = 10) { + ap_int_base<_AP_W, _AP_S> t(s, rd); + Base::V = t.V; + } +#endif // ifndef NON_C99STRING +#else // ifndef __SYNTHESIS__ + // XXX _ssdm_string2bits only takes const string and const radix. + // It seems XFORM will do compile time processing of the string. + INLINE ap_fixed_base(const char* s) { + typeof(Base::V) t; + _ssdm_string2bits((void*)(&t), (const char*)(s), 10, _AP_I, _AP_S, _AP_Q, + _AP_O, _AP_N, _AP_C99); + Base::V = t; + } + INLINE ap_fixed_base(const char* s, signed char rd) { + typeof(Base::V) t; + _ssdm_string2bits((void*)(&t), (const char*)(s), rd, _AP_I, _AP_S, _AP_Q, + _AP_O, _AP_N, _AP_C99); + Base::V = t; + } +#endif // ifndef __SYNTHESIS__ else + + template + INLINE ap_fixed_base(const ap_bit_ref<_AP_W2, _AP_S2>& op) { + *this = ((bool)op); + report(); + } + + template + INLINE ap_fixed_base(const ap_range_ref<_AP_W2, _AP_S2>& op) { + *this = (ap_int_base<_AP_W2, false>(op)); + report(); + } + + template + INLINE ap_fixed_base( + const ap_concat_ref<_AP_W2, _AP_T2, _AP_W3, _AP_T3>& op) { + *this = (ap_int_base<_AP_W2 + _AP_W3, false>(op)); + report(); + } + + template + INLINE ap_fixed_base( + const af_bit_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2>& op) { + *this = (bool(op)); + report(); + } + + template + INLINE ap_fixed_base( + const af_range_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2>& op) { + *this = (ap_int_base<_AP_W2, false>(op)); + report(); + } + + // ctors from c types. + // make a temp ap_fixed_base first, and use ap_fixed_base.operator= +#define CTOR_FROM_INT(C_TYPE, _AP_W2, _AP_S2) \ + INLINE ap_fixed_base(const C_TYPE x) { \ + ap_fixed_base<(_AP_W2), (_AP_W2), (_AP_S2)> tmp; \ + tmp.V = x; \ + *this = tmp; \ + } + + CTOR_FROM_INT(bool, 1, false) + CTOR_FROM_INT(char, 8, CHAR_IS_SIGNED) + CTOR_FROM_INT(signed char, 8, true) + CTOR_FROM_INT(unsigned char, 8, false) + CTOR_FROM_INT(short, _AP_SIZE_short, true) + CTOR_FROM_INT(unsigned short, _AP_SIZE_short, false) + CTOR_FROM_INT(int, _AP_SIZE_int, true) + CTOR_FROM_INT(unsigned int, _AP_SIZE_int, false) + CTOR_FROM_INT(long, _AP_SIZE_long, true) + CTOR_FROM_INT(unsigned long, _AP_SIZE_long, false) + CTOR_FROM_INT(ap_slong, _AP_SIZE_ap_slong, true) + CTOR_FROM_INT(ap_ulong, _AP_SIZE_ap_slong, false) +#undef CTOR_FROM_INT +/* + * TODO: + *Theere used to be several funtions which were AP_WEAK. + *Now they're all INLINE expect ap_fixed_base(double d) + *Maybe we can use '#pragma HLS inline' instead of INLINE. + */ + AP_WEAK ap_fixed_base(double d) { + ap_int_base<64, false> ireg; + ireg.V = doubleToRawBits(d); + bool isneg = _AP_ROOT_op_get_bit(ireg.V, 63); + + ap_int_base exp; + ap_int_base exp_tmp; + exp_tmp.V = + _AP_ROOT_op_get_range(ireg.V, DOUBLE_MAN, DOUBLE_MAN + DOUBLE_EXP - 1); + exp = exp_tmp - DOUBLE_BIAS; + ap_int_base man; + man.V = _AP_ROOT_op_get_range(ireg.V, 0, DOUBLE_MAN - 1); + // do not support NaN + _AP_WARNING(exp == APFX_IEEE_DOUBLE_E_MAX + 1 && man.V != 0, + "assign NaN to fixed point value"); + man.V = _AP_ROOT_op_set_bit(man.V, DOUBLE_MAN, 1); + if (isneg) man = -man; + if ((ireg.V & 0x7fffffffffffffffLL) == 0) { + Base::V = 0; + } else { + int _AP_W2 = DOUBLE_MAN + 2, _AP_I2 = exp.V + 2, _AP_F = _AP_W - _AP_I, + F2 = _AP_W2 - _AP_I2; + bool _AP_S2 = true, + QUAN_INC = F2 > _AP_F && + !(_AP_Q == AP_TRN || (_AP_Q == AP_TRN_ZERO && !_AP_S2)); + bool carry = false; + // handle quantization + unsigned sh_amt = (F2 > _AP_F) ? F2 - _AP_F : _AP_F - F2; + if (F2 == _AP_F) + Base::V = man.V; + else if (F2 > _AP_F) { + if (sh_amt < DOUBLE_MAN + 2) + Base::V = man.V >> sh_amt; + else { + Base::V = isneg ? -1 : 0; + } + if ((_AP_Q != AP_TRN) && !((_AP_Q == AP_TRN_ZERO) && !_AP_S2)) { + bool qb = (F2 - _AP_F > _AP_W2) ? isneg : (bool)_AP_ROOT_op_get_bit( + man.V, F2 - _AP_F - 1); + bool r = + (F2 > _AP_F + 1) + ? _AP_ROOT_op_get_range(man.V, 0, (F2 - _AP_F - 2 < _AP_W2) + ? (F2 - _AP_F - 2) + : (_AP_W2 - 1)) != 0 + : false; + carry = quantization_adjust(qb, r, isneg); + } + } else { // no quantization + Base::V = man.V; + if (sh_amt < _AP_W) + Base::V = Base::V << sh_amt; + else + Base::V = 0; + } + // handle overflow/underflow + if ((_AP_O != AP_WRAP || _AP_N != 0) && + ((!_AP_S && _AP_S2) || + _AP_I - _AP_S < + _AP_I2 - _AP_S2 + + (QUAN_INC || + (_AP_S2 && (_AP_O == AP_SAT_SYM))))) { // saturation + bool deleted_zeros = _AP_S2 ? true : !carry, deleted_ones = true; + bool neg_src = isneg; + bool lD = false; + int pos1 = F2 - _AP_F + _AP_W; + int pos2 = F2 - _AP_F + _AP_W + 1; + bool newsignbit = _AP_ROOT_op_get_bit(Base::V, _AP_W - 1); + if (pos1 < _AP_W2 && pos1 >= 0) + // lD = _AP_ROOT_op_get_bit(man.V, pos1); + lD = (man.V >> pos1) & 1; + if (pos1 < _AP_W2) { + bool Range1_all_ones = true; + bool Range1_all_zeros = true; + bool Range2_all_ones = true; + ap_int_base Range2; + ap_int_base all_ones(-1); + + if (pos2 >= 0 && pos2 < _AP_W2) { + // Range2.V = _AP_ROOT_op_get_range(man.V, + // pos2, _AP_W2 - 1); + Range2.V = man.V; + Range2.V >>= pos2; + Range2_all_ones = Range2 == (all_ones >> pos2); + } else if (pos2 < 0) + Range2_all_ones = false; + if (pos1 >= 0 && pos2 < _AP_W2) { + Range1_all_ones = Range2_all_ones && lD; + Range1_all_zeros = !Range2.V && !lD; + } else if (pos2 == _AP_W2) { + Range1_all_ones = lD; + Range1_all_zeros = !lD; + } else if (pos1 < 0) { + Range1_all_zeros = !man.V; + Range1_all_ones = false; + } + + deleted_zeros = + deleted_zeros && (carry ? Range1_all_ones : Range1_all_zeros); + deleted_ones = + carry ? Range2_all_ones && (pos1 < 0 || !lD) : Range1_all_ones; + neg_src = isneg && !(carry && Range1_all_ones); + } else + neg_src = isneg && newsignbit; + bool neg_trg = _AP_S && newsignbit; + bool overflow = (neg_trg || !deleted_zeros) && !isneg; + bool underflow = (!neg_trg || !deleted_ones) && neg_src; + if ((_AP_O == AP_SAT_SYM) && _AP_S2 && _AP_S) + underflow |= + neg_src && + (_AP_W > 1 ? _AP_ROOT_op_get_range(Base::V, 0, _AP_W - 2) == 0 + : true); + overflow_adjust(underflow, overflow, lD, neg_src); + } + } + report(); + } + + // TODO more optimized implementation. + INLINE ap_fixed_base(float d) { *this = ap_fixed_base(double(d)); } + +#if _AP_ENABLE_HALF_ == 1 + // TODO more optimized implementation. + INLINE ap_fixed_base(half d) { *this = ap_fixed_base(double(d)); } +#endif + // @} + + /// @name assign operator + /// assign, using another ap_fixed_base of same template parameters. + /* + INLINE ap_fixed_base& operator=( + const ap_fixed_base<_AP_W, _AP_I, _AP_S, _AP_Q, _AP_O, _AP_N>& op) { + Base::V = op.V; + return *this; + } + */ + + template + INLINE ap_fixed_base& operator=( + const ap_fixed_base<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2>& op) { + + const int _AP_F = _AP_W - _AP_I; + const int F2 = _AP_W2 - _AP_I2; + const int QUAN_INC = + F2 > _AP_F && !(_AP_Q == AP_TRN || (_AP_Q == AP_TRN_ZERO && !_AP_S2)); + + if (!op) Base::V = 0; + bool carry = false; + bool signbit = _AP_ROOT_op_get_bit(op.V, _AP_W2 - 1); + bool isneg = signbit && _AP_S2; + if (F2 == _AP_F) + Base::V = op.V; + else if (F2 > _AP_F) { + unsigned int sh_amt = F2 - _AP_F; + // moves bits right, handle quantization. + if (sh_amt < _AP_W2) { + Base::V = op.V >> sh_amt; + } else { + Base::V = isneg ? -1 : 0; + } + if (_AP_Q != AP_TRN && !(_AP_Q == AP_TRN_ZERO && !_AP_S2)) { + bool qbit = _AP_ROOT_op_get_bit(op.V, F2 - _AP_F - 1); + // bit after LSB. + bool qb = (F2 - _AP_F > _AP_W2) ? _AP_S2 && signbit : qbit; + enum { hi = ((F2 - _AP_F - 2) < _AP_W2) ? (F2 - _AP_F - 2) : (_AP_W2 - 1) }; + // bits after qb. + bool r = (F2 > _AP_F + 1) ? (_AP_ROOT_op_get_range(op.V, 0, hi) != 0) : false; + carry = quantization_adjust(qb, r, isneg); + } + } else { + unsigned sh_amt = _AP_F - F2; + // moves bits left, no quantization + if (sh_amt < _AP_W) { + if (_AP_W > _AP_W2) { + // extend and then shift, avoid losing bits. + Base::V = op.V; + Base::V <<= sh_amt; + } else { + // shift and truncate. + Base::V = op.V << sh_amt; + } + } else { + Base::V = 0; + } + } + // handle overflow/underflow + if ((_AP_O != AP_WRAP || _AP_N != 0) && + ((!_AP_S && _AP_S2) || + _AP_I - _AP_S < + _AP_I2 - _AP_S2 + + (QUAN_INC || (_AP_S2 && _AP_O == AP_SAT_SYM)))) { // saturation + bool deleted_zeros = _AP_S2 ? true : !carry; + bool deleted_ones = true; + bool neg_src = isneg; + bool newsignbit = _AP_ROOT_op_get_bit(Base::V, _AP_W - 1); + enum { pos1 = F2 - _AP_F + _AP_W, pos2 = F2 - _AP_F + _AP_W + 1 }; + bool lD = (pos1 < _AP_W2 && pos1 >= 0) ? _AP_ROOT_op_get_bit(op.V, pos1) + : false; + if (pos1 < _AP_W2) { + bool Range1_all_ones = true; + bool Range1_all_zeros = true; + bool Range2_all_ones = true; + ap_int_base<_AP_W2, false> all_ones(-1); + + if (pos2 < _AP_W2 && pos2 >= 0) { + ap_int_base<_AP_W2, false> Range2; + Range2.V = _AP_ROOT_op_get_range(op.V, pos2, _AP_W2 - 1); + Range2_all_ones = Range2 == (all_ones >> pos2); + } else if (pos2 < 0) { + Range2_all_ones = false; + } + + if (pos1 >= 0 && pos2 < _AP_W2) { + ap_int_base<_AP_W2, false> Range1; + Range1.V = _AP_ROOT_op_get_range(op.V, pos1, _AP_W2 - 1); + Range1_all_ones = Range1 == (all_ones >> pos1); + Range1_all_zeros = !Range1.V; + } else if (pos2 == _AP_W2) { + Range1_all_ones = lD; + Range1_all_zeros = !lD; + } else if (pos1 < 0) { + Range1_all_zeros = !op.V; + Range1_all_ones = false; + } + + deleted_zeros = + deleted_zeros && (carry ? Range1_all_ones : Range1_all_zeros); + deleted_ones = + carry ? Range2_all_ones && (pos1 < 0 || !lD) : Range1_all_ones; + neg_src = isneg && !(carry && Range1_all_ones); + } else + neg_src = isneg && newsignbit; + bool neg_trg = _AP_S && newsignbit; + bool overflow = (neg_trg || !deleted_zeros) && !isneg; + bool underflow = (!neg_trg || !deleted_ones) && neg_src; + if ((_AP_O == AP_SAT_SYM) && _AP_S2 && _AP_S) + underflow |= + neg_src && + (_AP_W > 1 ? _AP_ROOT_op_get_range(Base::V, 0, _AP_W - 2) == 0 + : true); + + overflow_adjust(underflow, overflow, lD, neg_src); + } + return *this; + } // operator= + + template + INLINE ap_fixed_base& operator=( + const volatile ap_fixed_base<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2>& op) { + operator=(const_cast&>(op)); + return *this; + } + + /// Set this ap_fixed_base with ULL. + INLINE ap_fixed_base& setBits(ap_ulong bv) { + // TODO when ull is not be long enough... + Base::V = bv; + return *this; + } + + /// Return a ap_fixed_base object whose this->V is assigned by bv. + static INLINE ap_fixed_base bitsToFixed(ap_ulong bv) { + // TODO fix when ull is not be long enough... + ap_fixed_base t; +#ifdef __SYNTHESIS__ + t.V = bv; +#else + t.V.set_bits(bv); +#endif + return t; + } + + // Explicit conversion functions to ap_int_base. + /** Captures all integer bits, in truncate mode. + * @param[in] Cnative follow conversion from double to int. + */ + INLINE ap_int_base to_ap_int_base( + bool Cnative = true) const { + ap_int_base ret; + if (_AP_I == 0) { + ret.V = 0; + } else if (_AP_I > 0 && _AP_I <= _AP_W) { + ret.V = _AP_ROOT_op_get_range(Base::V, _AP_W - _AP_I, _AP_W - 1); + } else if (_AP_I > _AP_W) { + ret.V = _AP_ROOT_op_get_range(Base::V, 0, _AP_W - 1); + ret.V <<= (_AP_I - _AP_W); + } + /* Consider the following case + * float f = -7.5f; + * ap_fixed<8,4> t = f; // -8 0 0 0 . 0.5 + * int i = t.to_int(); + * the result should be -7 instead of -8. + * Therefore, after truncation, the value should be increated by 1. + * For (-1, 0), carry to MSB will happen, but result 0 is still correct. + */ + if (Cnative && _AP_I < _AP_W) { + // Follow C native data type, conversion from double to int + if (_AP_S && _AP_ROOT_op_get_bit(Base::V, _AP_W - 1) && (_AP_I < _AP_W) && + (_AP_ROOT_op_get_range( + Base::V, 0, _AP_I < 0 ? _AP_W - 1 : _AP_W - _AP_I - 1) != 0)) + ++ret; + } else { + // Follow OSCI library, conversion from sc_fixed to sc_int + } + return ret; + }; + + public: + template + INLINE operator ap_int_base<_AP_W2, _AP_S2>() const { + return ap_int_base<_AP_W2, _AP_S2>(to_ap_int_base()); + } + + // Explicit conversion function to C built-in integral type. + INLINE char to_char() const { return to_ap_int_base().to_char(); } + + INLINE int to_int() const { return to_ap_int_base().to_int(); } + + INLINE unsigned to_uint() const { return to_ap_int_base().to_uint(); } + + INLINE ap_slong to_int64() const { return to_ap_int_base().to_int64(); } + + INLINE ap_ulong to_uint64() const { return to_ap_int_base().to_uint64(); } + + /// covert function to double. + /** only round-half-to-even mode supported, does not obey FE env. */ + INLINE double to_double() const { +#if defined(AP_FIXED_ENABLE_CPP_FENV) + _AP_WARNING(std::fegetround() != FE_TONEAREST, + "Only FE_TONEAREST is supported"); +#endif + enum { BITS = DOUBLE_MAN + DOUBLE_EXP + 1 }; + if (!Base::V) return 0.0f; + bool s = _AP_S && _AP_ROOT_op_get_bit(Base::V, _AP_W - 1); ///< sign. + ap_int_base<_AP_W, false> tmp; + if (s) + tmp.V = -Base::V; // may truncate one bit extra from neg in sim. + else + tmp.V = Base::V; + int l = tmp.countLeadingZeros(); ///< number of leading zeros. + int e = _AP_I - l - 1 + DOUBLE_BIAS; ///< exponent + int lsb_index = _AP_W - l - 1 - DOUBLE_MAN; + // more than 0.5? + bool a = (lsb_index >=2) ? + (_AP_ROOT_op_get_range(tmp.V, 0, lsb_index - 2) != 0) : 0; + // round to even + a |= (lsb_index >=0) ? _AP_ROOT_op_get_bit(tmp.V, lsb_index) : 0; + // ull is at least 64-bit + ap_ulong m; + // may actually left shift, ensure buffer is wide enough. + if (_AP_W > BITS) { + m = (lsb_index >= 1) ? (ap_ulong)(tmp.V >> (lsb_index - 1)) + : (ap_ulong)(tmp.V << (1 - lsb_index)); + } else { + m = (ap_ulong)tmp.V; + m = (lsb_index >= 1) ? (m >> (lsb_index - 1)) + : (m << (1 - lsb_index)); + } + m += a; + m >>= 1; + //std::cout << '\n' << std::hex << m << '\n'; // TODO delete this + // carry to MSB, increase exponent + if (_AP_ctype_op_get_bit(m, DOUBLE_MAN + 1)) { + e += 1; + } + // set sign and exponent + m = _AP_ctype_op_set_bit(m, BITS - 1, s); + //std::cout << m << '\n'; // TODO delete this + m = _AP_ctype_op_set_range(m, DOUBLE_MAN, DOUBLE_MAN + DOUBLE_EXP - 1, e); + //std::cout << std::hex << m << std::dec << std::endl; // TODO delete this + // cast to fp + return rawBitsToDouble(m); + } + + /// convert function to float. + /** only round-half-to-even mode supported, does not obey FE env. */ + INLINE float to_float() const { +#if defined(AP_FIXED_ENABLE_CPP_FENV) + _AP_WARNING(std::fegetround() != FE_TONEAREST, + "Only FE_TONEAREST is supported"); +#endif + enum { BITS = FLOAT_MAN + FLOAT_EXP + 1 }; + if (!Base::V) return 0.0f; + bool s = _AP_S && _AP_ROOT_op_get_bit(Base::V, _AP_W - 1); ///< sign. + ap_int_base<_AP_W, false> tmp; + if (s) + tmp.V = -Base::V; // may truncate one bit extra from neg in sim. + else + tmp.V = Base::V; + int l = tmp.countLeadingZeros(); ///< number of leading zeros. + int e = _AP_I - l - 1 + FLOAT_BIAS; ///< exponent + int lsb_index = _AP_W - l - 1 - FLOAT_MAN; + // more than 0.5? + bool a = (lsb_index >=2) ? + (_AP_ROOT_op_get_range(tmp.V, 0, lsb_index - 2) != 0) : 0; + // round to even + a |= (lsb_index >=0) ? _AP_ROOT_op_get_bit(tmp.V, lsb_index) : 0; + // ul is at least 32-bit + unsigned long m; + // may actually left shift, ensure buffer is wide enough. + if (_AP_W > BITS) { + m = (lsb_index >= 1) ? (unsigned long)(tmp.V >> (lsb_index - 1)) + : (unsigned long)(tmp.V << (1 - lsb_index)); + } else { + m = (unsigned long)tmp.V; + m = (lsb_index >= 1) ? (m >> (lsb_index - 1)) + : (m << (1 - lsb_index)); + } + m += a; + m >>= 1; + // carry to MSB, increase exponent + if (_AP_ctype_op_get_bit(m, FLOAT_MAN + 1)) { + e += 1; + } + // set sign and exponent + m = _AP_ctype_op_set_bit(m, BITS - 1, s); + m = _AP_ctype_op_set_range(m, FLOAT_MAN, FLOAT_MAN + FLOAT_EXP - 1, e); + // cast to fp + return rawBitsToFloat(m); + } + +#if _AP_ENABLE_HALF_ == 1 + /// convert function to half. + /** only round-half-to-even mode supported, does not obey FE env. */ + INLINE half to_half() const { +#if defined(AP_FIXED_ENABLE_CPP_FENV) + _AP_WARNING(std::fegetround() != FE_TONEAREST, + "Only FE_TONEAREST is supported"); +#endif + enum { BITS = HALF_MAN + HALF_EXP + 1 }; + if (!Base::V) return 0.0f; + bool s = _AP_S && _AP_ROOT_op_get_bit(Base::V, _AP_W - 1); ///< sign. + ap_int_base<_AP_W, false> tmp; + if (s) + tmp.V = -Base::V; // may truncate one bit extra from neg in sim. + else + tmp.V = Base::V; + int l = tmp.countLeadingZeros(); ///< number of leading zeros. + int e = _AP_I - l - 1 + HALF_BIAS; ///< exponent + int lsb_index = _AP_W - l - 1 - HALF_MAN; + // more than 0.5? + bool a = (lsb_index >=2) ? + (_AP_ROOT_op_get_range(tmp.V, 0, lsb_index - 2) != 0) : 0; + // round to even + a |= (lsb_index >=0) ? _AP_ROOT_op_get_bit(tmp.V, lsb_index) : 0; + // short is at least 16-bit + unsigned short m; + // may actually left shift, ensure buffer is wide enough. + if (_AP_W > BITS) { + m = (lsb_index >= 1) ? (unsigned short)(tmp.V >> (lsb_index - 1)) + : (unsigned short)(tmp.V << (1 - lsb_index)); + } else { + m = (unsigned short)tmp.V; + m = (lsb_index >= 1) ? (m >> (lsb_index - 1)) + : (m << (1 - lsb_index)); + } + m += a; + m >>= 1; + // carry to MSB, increase exponent + if (_AP_ctype_op_get_bit(m, HALF_MAN + 1)) { + e += 1; + } + // set sign and exponent + m = _AP_ctype_op_set_bit(m, BITS - 1, s); + m = _AP_ctype_op_set_range(m, HALF_MAN, HALF_MAN + HALF_EXP - 1, e); + // cast to fp + return rawBitsToHalf(m); + } +#endif + + // FIXME inherited from old code, this may loose precision! + INLINE operator long double() const { return (long double)to_double(); } + + INLINE operator double() const { return to_double(); } + + INLINE operator float() const { return to_float(); } + +#if _AP_ENABLE_HALF_ == 1 + INLINE operator half() const { return to_half(); } +#endif + + INLINE operator bool() const { return (bool)Base::V != 0; } + + INLINE operator char() const { return (char)to_int(); } + + INLINE operator signed char() const { return (signed char)to_int(); } + + INLINE operator unsigned char() const { return (unsigned char)to_uint(); } + + INLINE operator short() const { return (short)to_int(); } + + INLINE operator unsigned short() const { return (unsigned short)to_uint(); } + + INLINE operator int() const { return to_int(); } + + INLINE operator unsigned int() const { return to_uint(); } + +// FIXME don't assume data width... +#ifdef __x86_64__ + INLINE operator long() const { return (long)to_int64(); } + + INLINE operator unsigned long() const { return (unsigned long)to_uint64(); } +#else + INLINE operator long() const { return (long)to_int(); } + + INLINE operator unsigned long() const { return (unsigned long)to_uint(); } +#endif // ifdef __x86_64__ else + + INLINE operator ap_ulong() const { return to_uint64(); } + + INLINE operator ap_slong() const { return to_int64(); } + + INLINE int length() const { return _AP_W; }; + + // bits_to_int64 deleted. +#ifndef __SYNTHESIS__ + // Used in autowrap, when _AP_W < 64. + INLINE ap_ulong bits_to_uint64() const { + return (Base::V).to_uint64(); + } +#endif + + // Count the number of zeros from the most significant bit + // to the first one bit. Note this is only for ap_fixed_base whose + // _AP_W <= 64, otherwise will incur assertion. + INLINE int countLeadingZeros() { +#ifdef __SYNTHESIS__ + // TODO: used llvm.ctlz intrinsic ? + if (_AP_W <= 32) { + ap_int_base<32, false> t(-1ULL); + t.range(_AP_W - 1, 0) = this->range(0, _AP_W - 1); + return __builtin_ctz(t.V); + } else if (_AP_W <= 64) { + ap_int_base<64, false> t(-1ULL); + t.range(_AP_W - 1, 0) = this->range(0, _AP_W - 1); + return __builtin_ctzll(t.V); + } else { + enum {__N = (_AP_W + 63) / 64}; + int NZeros = 0; + int i = 0; + bool hitNonZero = false; + for (i = 0; i < __N - 1; ++i) { + ap_int_base<64, false> t; + t.range(0, 63) = this->range(_AP_W - i * 64 - 64, _AP_W - i * 64 - 1); + NZeros += hitNonZero ? 0 : __builtin_clzll(t.V); + hitNonZero |= (t != 0); + } + if (!hitNonZero) { + ap_int_base<64, false> t(-1ULL); + t.range(63 - (_AP_W - 1) % 64, 63) = this->range(0, (_AP_W - 1) % 64); + NZeros += __builtin_clzll(t.V); + } + return NZeros; + } +#else + return Base::V.countLeadingZeros(); +#endif + } + + // Arithmetic : Binary + // ------------------------------------------------------------------------- + template + INLINE typename RType<_AP_W2, _AP_I2, _AP_S2>::mult operator*( + const ap_fixed_base<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2>& op2) + const { + typename RType<_AP_W2, _AP_I2, _AP_S2>::mult_base r, t; + r.V = Base::V; + t.V = op2.V; + r.V *= op2.V; + return r; + } + + // multiply function deleted. + + template + INLINE typename RType<_AP_W2, _AP_I2, _AP_S2>::div operator/( + const ap_fixed_base<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2>& op2) + const { + typename RType<_AP_W2, _AP_I2, _AP_S2>::div_base r; +#ifndef __SYNTHESIS__ + enum {F2 = _AP_W2-_AP_I2, + _W1=AP_MAX(_AP_W + AP_MAX(F2, 0) + ((_AP_S2 && !_AP_S) ? 1 : 0), _AP_W2 + ((_AP_S && !_AP_S2) ? 1 : 0))}; + ap_int_base<_W1,_AP_S||_AP_S2> dividend,divisior; + ap_int_base<_W1,_AP_S> tmp1; + ap_int_base<_W1,_AP_S2> tmp2; + tmp1.V = Base::V; + tmp1.V <<= AP_MAX(F2,0); + tmp2.V = op2.V; + dividend = tmp1; + divisior = tmp2; + r.V = ((_AP_S||_AP_S2) ? dividend.V.sdiv(divisior.V): dividend.V.udiv(divisior.V)); +#else + #ifndef __SC_COMPATIBLE__ + ap_fixed_base<_AP_W + AP_MAX(_AP_W2 - _AP_I2, 0),_AP_I, _AP_S> t(*this); + #else + ap_fixed_base<_AP_W + AP_MAX(_AP_W2 - _AP_I2, 0) + AP_MAX(_AP_I2, 0),_AP_I, _AP_S> t(*this); + #endif + r.V = t.V / op2.V; +#endif +/* + enum { + F2 = _AP_W2 - _AP_I2, + shl = AP_MAX(F2, 0) + AP_MAX(_AP_I2, 0), +#ifndef __SC_COMPATIBLE__ + shr = AP_MAX(_AP_I2, 0), +#else + shr = 0, +#endif + W3 = _AP_S2 + _AP_W + shl, + S3 = _AP_S || _AP_S2, + }; + ap_int_base dividend, t; + dividend.V = Base::V; + // multiply both by (1 << F2), and than do integer division. + dividend.V <<= (int) shl; +#ifdef __SYNTHESIS__ + // .V's have right signedness, and will have right extending. + t.V = dividend.V / op2.V; +#else + // XXX op2 may be wider than dividend, and sdiv and udiv takes the same with + // as left hand operand, so data might be truncated by mistake if not + // handled here. + t.V = S3 ? dividend.V.sdiv(op2.V) : dividend.V.udiv(op2.V); +#endif + r.V = t.V >> (int) shr; +*/ + return r; + } + +#define OP_BIN_AF(Sym, Rty) \ + template \ + INLINE typename RType<_AP_W2, _AP_I2, _AP_S2>::Rty operator Sym( \ + const ap_fixed_base<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2>& \ + op2) const { \ + typename RType<_AP_W2, _AP_I2, _AP_S2>::Rty##_base ret, lhs(*this), \ + rhs(op2); \ + ret.V = lhs.V Sym rhs.V; \ + return ret; \ + } + + OP_BIN_AF(+, plus) + OP_BIN_AF(-, minus) + OP_BIN_AF(&, logic) + OP_BIN_AF(|, logic) + OP_BIN_AF(^, logic) + +// Arithmetic : assign +// ------------------------------------------------------------------------- +#define OP_ASSIGN_AF(Sym) \ + template \ + INLINE ap_fixed_base& operator Sym##=( \ + const ap_fixed_base<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2>& \ + op2) { \ + *this = operator Sym(op2); \ + return *this; \ + } + + OP_ASSIGN_AF(*) + OP_ASSIGN_AF(/) + OP_ASSIGN_AF(+) + OP_ASSIGN_AF(-) + OP_ASSIGN_AF(&) + OP_ASSIGN_AF(|) + OP_ASSIGN_AF(^) + + // Prefix and postfix increment and decrement. + // ------------------------------------------------------------------------- + + /// Prefix increment + INLINE ap_fixed_base& operator++() { + operator+=(ap_fixed_base<_AP_W - _AP_I + 1, 1, false>(1)); + return *this; + } + + /// Prefix decrement. + INLINE ap_fixed_base& operator--() { + operator-=(ap_fixed_base<_AP_W - _AP_I + 1, 1, false>(1)); + return *this; + } + + /// Postfix increment + INLINE const ap_fixed_base operator++(int) { + ap_fixed_base r(*this); + operator++(); + return r; + } + + /// Postfix decrement + INLINE const ap_fixed_base operator--(int) { + ap_fixed_base r(*this); + operator--(); + return r; + } + + // Unary arithmetic. + // ------------------------------------------------------------------------- + INLINE ap_fixed_base operator+() { return *this; } + + INLINE ap_fixed_base<_AP_W + 1, _AP_I + 1, true> operator-() const { + ap_fixed_base<_AP_W + 1, _AP_I + 1, true> r(*this); + r.V = -r.V; + return r; + } + + INLINE ap_fixed_base<_AP_W, _AP_I, true, _AP_Q, _AP_O, _AP_N> getNeg() { + ap_fixed_base<_AP_W, _AP_I, true, _AP_Q, _AP_O, _AP_N> r(*this); + r.V = -r.V; + return r; + } + + // Not (!) + // ------------------------------------------------------------------------- + INLINE bool operator!() const { return Base::V == 0; } + + // Bitwise complement + // ------------------------------------------------------------------------- + // XXX different from Mentor's ac_fixed. + INLINE ap_fixed_base<_AP_W, _AP_I, _AP_S> operator~() const { + ap_fixed_base<_AP_W, _AP_I, _AP_S> r; + r.V = ~Base::V; + return r; + } + + // Shift + // ------------------------------------------------------------------------- + // left shift is the same as moving point right, i.e. increate I. + template + INLINE ap_fixed_base<_AP_W, _AP_I + _AP_SHIFT, _AP_S> lshift() const { + ap_fixed_base<_AP_W, _AP_I + _AP_SHIFT, _AP_S> r; + r.V = Base::V; + return r; + } + + template + INLINE ap_fixed_base<_AP_W, _AP_I - _AP_SHIFT, _AP_S> rshift() const { + ap_fixed_base<_AP_W, _AP_I - _AP_SHIFT, _AP_S> r; + r.V = Base::V; + return r; + } + + // Because the return type is the type of the the first operand, shift assign + // operators do not carry out any quantization or overflow + // While systemc, shift assigns for sc_fixed/sc_ufixed will result in + // quantization or overflow (depending on the mode of the first operand) + INLINE ap_fixed_base operator<<(unsigned int sh) const { + ap_fixed_base r; + r.V = Base::V << sh; +// TODO check shift overflow? +#ifdef __SC_COMPATIBLE__ + if (sh == 0) return r; + if (_AP_O != AP_WRAP || _AP_N != 0) { + bool neg_src = _AP_S && _AP_ROOT_op_get_bit(Base::V, _AP_W - 1); + bool allones, allzeros; + ap_int_base<_AP_W, false> ones(-1); + if (sh <= _AP_W) { + ap_int_base<_AP_W, false> range1; + range1.V = _AP_ROOT_op_get_range( + const_cast(this)->Base::V, _AP_W - sh, _AP_W - 1); + allones = range1 == (ones >> (_AP_W - sh)); + allzeros = range1 == 0; + } else { + allones = false; + allzeros = Base::V == 0; + } + bool overflow = !allzeros && !neg_src; + bool underflow = !allones && neg_src; + if ((_AP_O == AP_SAT_SYM) && _AP_S) + underflow |= + neg_src && + (_AP_W > 1 ? _AP_ROOT_op_get_range(r.V, 0, _AP_W - 2) == 0 : true); + bool lD = false; + if (sh < _AP_W) lD = _AP_ROOT_op_get_bit(Base::V, _AP_W - sh - 1); + r.overflow_adjust(underflow, overflow, lD, neg_src); + } +#endif + return r; + } + + INLINE ap_fixed_base operator>>(unsigned int sh) const { + ap_fixed_base r; + r.V = Base::V >> sh; +// TODO check shift overflow? +#ifdef __SC_COMPATIBLE__ + if (sh == 0) return r; + if (_AP_Q != AP_TRN) { + bool qb = false; + if (sh <= _AP_W) qb = _AP_ROOT_op_get_bit(Base::V, sh - 1); + bool rb = false; + if (sh > 1 && sh <= _AP_W) + rb = _AP_ROOT_op_get_range(const_cast(this)->Base::V, 0, + sh - 2) != 0; + else if (sh > _AP_W) + rb = Base::V != 0; + r.quantization_adjust(qb, rb, + _AP_S && _AP_ROOT_op_get_bit(Base::V, _AP_W - 1)); + } +#endif + return r; + } + + // left and right shift for int + INLINE ap_fixed_base operator<<(int sh) const { + ap_fixed_base r; + bool isNeg = sh < 0; + unsigned int ush = isNeg ? -sh : sh; + if (isNeg) { + return operator>>(ush); + } else { + return operator<<(ush); + } + } + + INLINE ap_fixed_base operator>>(int sh) const { + bool isNeg = sh < 0; + unsigned int ush = isNeg ? -sh : sh; + if (isNeg) { + return operator<<(ush); + } else { + return operator>>(ush); + } + } + + // left and right shift for ap_int. + template + INLINE ap_fixed_base operator<<(const ap_int_base<_AP_W2, true>& op2) const { + // TODO the code seems not optimal. ap_fixed<8,8> << ap_int<2> needs only a + // small mux, but integer need a big one! + int sh = op2.to_int(); + return operator<<(sh); + } + + template + INLINE ap_fixed_base operator>>(const ap_int_base<_AP_W2, true>& op2) const { + int sh = op2.to_int(); + return operator>>(sh); + } + + // left and right shift for ap_uint. + template + INLINE ap_fixed_base operator<<(const ap_int_base<_AP_W2, false>& op2) const { + unsigned int sh = op2.to_uint(); + return operator<<(sh); + } + + template + INLINE ap_fixed_base operator>>(const ap_int_base<_AP_W2, false>& op2) const { + unsigned int sh = op2.to_uint(); + return operator>>(sh); + } + + // left and right shift for ap_fixed + template + INLINE ap_fixed_base operator<<( + const ap_fixed_base<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2>& + op2) { + return operator<<(op2.to_ap_int_base()); + } + + template + INLINE ap_fixed_base operator>>( + const ap_fixed_base<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2>& + op2) { + return operator>>(op2.to_ap_int_base()); + } + + // Shift assign. + // ------------------------------------------------------------------------- + + // left shift assign. + INLINE ap_fixed_base& operator<<=(const int sh) { + *this = operator<<(sh); + return *this; + } + + INLINE ap_fixed_base& operator<<=(const unsigned int sh) { + *this = operator<<(sh); + return *this; + } + + template + INLINE ap_fixed_base& operator<<=(const ap_int_base<_AP_W2, _AP_S2>& sh) { + *this = operator<<(sh.to_int()); + return *this; + } + + template + INLINE ap_fixed_base& operator<<=( + const ap_fixed_base<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2>& + sh) { + *this = operator<<(sh.to_int()); + return *this; + } + + // right shift assign. + INLINE ap_fixed_base& operator>>=(const int sh) { + *this = operator>>(sh); + return *this; + } + + INLINE ap_fixed_base& operator>>=(const unsigned int sh) { + *this = operator>>(sh); + return *this; + } + + template + INLINE ap_fixed_base& operator>>=(const ap_int_base<_AP_W2, _AP_S2>& sh) { + *this = operator>>(sh.to_int()); + return *this; + } + + template + INLINE ap_fixed_base& operator>>=( + const ap_fixed_base<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2>& + sh) { + *this = operator>>(sh.to_int()); + return *this; + } + +// Comparisons. +// ------------------------------------------------------------------------- +#define OP_CMP_AF(Sym) \ + template \ + INLINE bool operator Sym(const ap_fixed_base<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, \ + _AP_O2, _AP_N2>& op2) const { \ + enum { _AP_F = _AP_W - _AP_I, F2 = _AP_W2 - _AP_I2 }; \ + if (_AP_F == F2) \ + return Base::V Sym op2.V; \ + else if (_AP_F > F2) \ + return Base::V Sym ap_fixed_base(op2).V; \ + else \ + return ap_fixed_base(*this).V Sym op2.V; \ + return false; \ + } + + OP_CMP_AF(>) + OP_CMP_AF(<) + OP_CMP_AF(>=) + OP_CMP_AF(<=) + OP_CMP_AF(==) + OP_CMP_AF(!=) +// FIXME: Move compare with double out of struct ap_fixed_base defination +// and combine it with compare operator(double, ap_fixed_base) +#define DOUBLE_CMP_AF(Sym) \ + INLINE bool operator Sym(double d) const { return to_double() Sym d; } + + DOUBLE_CMP_AF(>) + DOUBLE_CMP_AF(<) + DOUBLE_CMP_AF(>=) + DOUBLE_CMP_AF(<=) + DOUBLE_CMP_AF(==) + DOUBLE_CMP_AF(!=) + + // Bit and Slice Select + INLINE af_bit_ref<_AP_W, _AP_I, _AP_S, _AP_Q, _AP_O, _AP_N> operator[]( + unsigned index) { + _AP_WARNING(index >= _AP_W, "Attempting to read bit beyond MSB"); + return af_bit_ref<_AP_W, _AP_I, _AP_S, _AP_Q, _AP_O, _AP_N>(this, index); + } + + template + INLINE af_bit_ref<_AP_W, _AP_I, _AP_S, _AP_Q, _AP_O, _AP_N> operator[]( + const ap_int_base<_AP_W2, _AP_S2>& index) { + _AP_WARNING(index < 0, "Attempting to read bit with negative index"); + _AP_WARNING(index >= _AP_W, "Attempting to read bit beyond MSB"); + return af_bit_ref<_AP_W, _AP_I, _AP_S, _AP_Q, _AP_O, _AP_N>(this, + index.to_int()); + } + + INLINE bool operator[](unsigned index) const { + _AP_WARNING(index >= _AP_W, "Attempting to read bit beyond MSB"); + return _AP_ROOT_op_get_bit(const_cast(this)->V, index); + } + + INLINE af_bit_ref<_AP_W, _AP_I, _AP_S, _AP_Q, _AP_O, _AP_N> bit( + unsigned index) { + _AP_WARNING(index >= _AP_W, "Attempting to read bit beyond MSB"); + return af_bit_ref<_AP_W, _AP_I, _AP_S, _AP_Q, _AP_O, _AP_N>(this, index); + } + + template + INLINE af_bit_ref<_AP_W, _AP_I, _AP_S, _AP_Q, _AP_O, _AP_N> bit( + const ap_int_base<_AP_W2, _AP_S2>& index) { + _AP_WARNING(index < 0, "Attempting to read bit with negative index"); + _AP_WARNING(index >= _AP_W, "Attempting to read bit beyond MSB"); + return af_bit_ref<_AP_W, _AP_I, _AP_S, _AP_Q, _AP_O, _AP_N>(this, + index.to_int()); + } + + INLINE bool bit(unsigned index) const { + _AP_WARNING(index >= _AP_W, "Attempting to read bit beyond MSB"); + return _AP_ROOT_op_get_bit(const_cast(this)->V, index); + } + + template + INLINE af_bit_ref<_AP_W, _AP_I, _AP_S, _AP_Q, _AP_O, _AP_N> get_bit( + const ap_int_base<_AP_W2, true>& index) { + _AP_WARNING(index < _AP_I - _AP_W, + "Attempting to read bit with negative index"); + _AP_WARNING(index >= _AP_I, "Attempting to read bit beyond MSB"); + return af_bit_ref<_AP_W, _AP_I, _AP_S, _AP_Q, _AP_O, _AP_N>( + this, index.to_int() + _AP_W - _AP_I); + } + + INLINE bool get_bit(int index) const { + _AP_WARNING(index >= _AP_I, "Attempting to read bit beyond MSB"); + _AP_WARNING(index < _AP_I - _AP_W, "Attempting to read bit beyond MSB"); + return _AP_ROOT_op_get_bit(const_cast(this)->V, + index + _AP_W - _AP_I); + } +#if 0 + INLINE af_bit_ref<_AP_W, _AP_I, _AP_S, _AP_Q, _AP_O, _AP_N> get_bit( + int index) { + _AP_WARNING(index < _AP_I - _AP_W, + "Attempting to read bit with negative index"); + _AP_WARNING(index >= _AP_I, "Attempting to read bit beyond MSB"); + return af_bit_ref<_AP_W, _AP_I, _AP_S, _AP_Q, _AP_O, _AP_N>( + this, index + _AP_W - _AP_I); + } +#endif + + template + INLINE bool get_bit(const ap_int_base<_AP_W2, true>& index) const { + _AP_WARNING(index >= _AP_I, "Attempting to read bit beyond MSB"); + _AP_WARNING(index < _AP_I - _AP_W, "Attempting to read bit beyond MSB"); + return _AP_ROOT_op_get_bit(const_cast(this)->V, + index.to_int() + _AP_W - _AP_I); + } + + INLINE af_range_ref<_AP_W, _AP_I, _AP_S, _AP_Q, _AP_O, _AP_N> range(int Hi, + int Lo) { + _AP_WARNING((Hi >= _AP_W) || (Lo >= _AP_W), "Out of bounds in range()"); + return af_range_ref<_AP_W, _AP_I, _AP_S, _AP_Q, _AP_O, _AP_N>(this, Hi, Lo); + } + + // This is a must to strip constness to produce reference type. + INLINE af_range_ref<_AP_W, _AP_I, _AP_S, _AP_Q, _AP_O, _AP_N> range( + int Hi, int Lo) const { + _AP_WARNING((Hi >= _AP_W) || (Lo >= _AP_W), "Out of bounds in range()"); + return af_range_ref<_AP_W, _AP_I, _AP_S, _AP_Q, _AP_O, _AP_N>( + const_cast(this), Hi, Lo); + } + + template + INLINE af_range_ref<_AP_W, _AP_I, _AP_S, _AP_Q, _AP_O, _AP_N> range( + const ap_int_base<_AP_W2, _AP_S2>& HiIdx, + const ap_int_base<_AP_W3, _AP_S3>& LoIdx) { + int Hi = HiIdx.to_int(); + int Lo = LoIdx.to_int(); + return this->range(Hi, Lo); + } + + template + INLINE af_range_ref<_AP_W, _AP_I, _AP_S, _AP_Q, _AP_O, _AP_N> range( + const ap_int_base<_AP_W2, _AP_S2>& HiIdx, + const ap_int_base<_AP_W3, _AP_S3>& LoIdx) const { + int Hi = HiIdx.to_int(); + int Lo = LoIdx.to_int(); + return this->range(Hi, Lo); + } + + INLINE af_range_ref<_AP_W, _AP_I, _AP_S, _AP_Q, _AP_O, _AP_N> range() { + return this->range(_AP_W - 1, 0); + } + + INLINE af_range_ref<_AP_W, _AP_I, _AP_S, _AP_Q, _AP_O, _AP_N> range() const { + return this->range(_AP_W - 1, 0); + } + + INLINE af_range_ref<_AP_W, _AP_I, _AP_S, _AP_Q, _AP_O, _AP_N> operator()( + int Hi, int Lo) { + return this->range(Hi, Lo); + } + + INLINE af_range_ref<_AP_W, _AP_I, _AP_S, _AP_Q, _AP_O, _AP_N> operator()( + int Hi, int Lo) const { + return this->range(Hi, Lo); + } + + template + INLINE af_range_ref<_AP_W, _AP_I, _AP_S, _AP_Q, _AP_O, _AP_N> operator()( + const ap_int_base<_AP_W2, _AP_S2>& HiIdx, + const ap_int_base<_AP_W3, _AP_S3>& LoIdx) { + int Hi = HiIdx.to_int(); + int Lo = LoIdx.to_int(); + return this->range(Hi, Lo); + } + + template + INLINE af_range_ref<_AP_W, _AP_I, _AP_S, _AP_Q, _AP_O, _AP_N> operator()( + const ap_int_base<_AP_W2, _AP_S2>& HiIdx, + const ap_int_base<_AP_W3, _AP_S3>& LoIdx) const { + int Hi = HiIdx.to_int(); + int Lo = LoIdx.to_int(); + return this->range(Hi, Lo); + } + + INLINE bool is_zero() const { return Base::V == 0; } + + INLINE bool is_neg() const { + if (_AP_S && _AP_ROOT_op_get_bit(Base::V, _AP_W - 1)) return true; + return false; + } + + INLINE int wl() const { return _AP_W; } + + INLINE int iwl() const { return _AP_I; } + + INLINE ap_q_mode q_mode() const { return _AP_Q; } + + INLINE ap_o_mode o_mode() const { return _AP_O; } + + INLINE int n_bits() const { return _AP_N; } + + // print a string representation of this number in the given radix. + // Radix support is 2, 8, 10, or 16. + // The result will include a prefix indicating the radix, except for decimal, + // where no prefix is needed. The default is to output a signed representation + // of signed numbers, or an unsigned representation of unsigned numbers. For + // non-decimal formats, this can be changed by the 'sign' argument. +#ifndef __SYNTHESIS__ + std::string to_string(unsigned char radix = 2, bool sign = _AP_S) const { + // XXX in autosim/autowrap.tcl "(${name}).to_string(2).c_str()" is used to + // initialize sc_lv, which seems incapable of handling format "-0b". + if (radix == 2) sign = false; + + std::string str; + str.clear(); + char step = 0; + bool isNeg = sign && (Base::V < 0); + + // Extend to take care of the -MAX case. + ap_fixed_base<_AP_W + 1, _AP_I + 1> tmp(*this); + if (isNeg) { + tmp = -tmp; + str += '-'; + } + std::string prefix; + switch (radix) { + case 2: + prefix = "0b"; + step = 1; + break; + case 8: + prefix = "0o"; + step = 3; + break; + case 16: + prefix = "0x"; + step = 4; + break; + default: + break; + } + + if (_AP_I > 0) { + // Note we drop the quantization and rounding flags here. The + // integer part is always in range, and the fractional part we + // want to drop. Also, the number is always positive, because + // of the absolute value above. + ap_int_base int_part; + // [1] [ I ] d [ W - I ] + // | | | + // | W-I 0 + // W + int_part.V = _AP_ROOT_op_get_range( + tmp.V, _AP_W - _AP_I, _AP_W); + str += int_part.to_string(radix, false); + } else { + str += prefix; + str += '0'; + } + + ap_fixed_base frac_part = tmp; + + if (radix == 10) { + if (frac_part != 0) { + str += "."; + while (frac_part != 0) { + char digit = (frac_part * radix).to_char(); + str += static_cast(digit + '0'); + frac_part *= radix; + } + } + } else { + if (frac_part != 0) { + str += "."; + for (signed i = _AP_W - _AP_I - 1; i >= 0; i -= step) { + char digit = frac_part.range(i, AP_MAX(0, i - step + 1)).to_char(); + // If we have a partial bit pattern at the end, then we need + // to put it in the high-order bits of 'digit'. + int offset = AP_MIN(0, i - step + 1); + digit <<= -offset; + str += digit < 10 ? static_cast(digit + '0') + : static_cast(digit - 10 + 'a'); + } + if (radix == 16) + str += "p0"; // C99 Hex constants are required to have an exponent. + } + } + return str; + } +#else + // XXX HLS will delete this in synthesis + INLINE char* to_string(unsigned char radix = 2, bool sign = _AP_S) const { + return 0; + } +#endif +}; // struct ap_fixed_base. + +template +INLINE void b_not( + ap_fixed_base<_AP_W, _AP_I, _AP_S, _AP_Q, _AP_O, _AP_N>& ret, + const ap_fixed_base<_AP_W, _AP_I, _AP_S, _AP_Q, _AP_O, _AP_N>& op) { + ret.V = ~op.V; +} + +template +INLINE void b_and( + ap_fixed_base<_AP_W, _AP_I, _AP_S, _AP_Q, _AP_O, _AP_N>& ret, + const ap_fixed_base<_AP_W, _AP_I, _AP_S, _AP_Q, _AP_O, _AP_N>& op1, + const ap_fixed_base<_AP_W, _AP_I, _AP_S, _AP_Q, _AP_O, _AP_N>& op2) { + ret.V = op1.V & op2.V; +} + +template +INLINE void b_or( + ap_fixed_base<_AP_W, _AP_I, _AP_S, _AP_Q, _AP_O, _AP_N>& ret, + const ap_fixed_base<_AP_W, _AP_I, _AP_S, _AP_Q, _AP_O, _AP_N>& op1, + const ap_fixed_base<_AP_W, _AP_I, _AP_S, _AP_Q, _AP_O, _AP_N>& op2) { + ret.V = op1.V | op2.V; +} + +template +INLINE void b_xor( + ap_fixed_base<_AP_W, _AP_I, _AP_S, _AP_Q, _AP_O, _AP_N>& ret, + const ap_fixed_base<_AP_W, _AP_I, _AP_S, _AP_Q, _AP_O, _AP_N>& op1, + const ap_fixed_base<_AP_W, _AP_I, _AP_S, _AP_Q, _AP_O, _AP_N>& op2) { + ret.V = op1.V ^ op2.V; +} + +template +INLINE void neg( + ap_fixed_base<_AP_W, _AP_I, _AP_S, _AP_Q, _AP_O, _AP_N>& ret, + const ap_fixed_base<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2>& op) { + ap_fixed_base<_AP_W2 + !_AP_S2, _AP_I2 + !_AP_S2, true, _AP_Q2, _AP_O2, + _AP_N2> + t; + t.V = -op.V; + ret = t; +} + +template +INLINE void lshift( + ap_fixed_base<_AP_W, _AP_I, _AP_S, _AP_Q, _AP_O, _AP_N>& ret, + const ap_fixed_base<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2>& op, + int i) { + enum { + F2 = _AP_W2 - _AP_I2, + _AP_I3 = AP_MAX(_AP_I, _AP_I2), + _AP_W3 = _AP_I3 + F2, + }; + // wide buffer + ap_fixed_base<_AP_W3, _AP_I3, _AP_S2, _AP_Q2, _AP_O2, _AP_N2> t; + t.V = op.V; + t.V <<= i; // FIXME overflow? + // handle quantization and overflow + ret = t; +} + +template +INLINE void rshift( + ap_fixed_base<_AP_W, _AP_I, _AP_S, _AP_Q, _AP_O, _AP_N>& ret, + const ap_fixed_base<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2>& op, + int i) { + enum { + F = _AP_W - _AP_I, + F2 = _AP_W2 - _AP_I2, + F3 = AP_MAX(F, F2), + _AP_W3 = _AP_I2 + F3, + sh = F - F2, + }; + // wide buffer + ap_fixed_base<_AP_W3, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2> t; + t.V = op.V; + if (sh >= 0) + t.V <<= (int) sh; + t.V >>= i; + // handle quantization and overflow + ret = t; +} + +//// FIXME +//// These partial specialization ctors allow code like +//// char c = 'a'; +//// ap_fixed_base<8, 8, true> x(c); +//// but what bout ap_fixed_base<9, 9, true> y(c) ? +// + +#ifndef __SYNTHESIS__ +INLINE std::string scientificFormat(std::string& input) { + if (input.length() == 0) return input; + + size_t decPosition = input.find('.'); + if (decPosition == std::string::npos) decPosition = input.length(); + + size_t firstNonZeroPos = 0; + for (; input[firstNonZeroPos] > '9' || input[firstNonZeroPos] < '1'; + firstNonZeroPos++) + ; + + int exp; + if (firstNonZeroPos > decPosition) + exp = decPosition - firstNonZeroPos; + else + exp = decPosition - firstNonZeroPos - 1; + std::string expString = ""; + if (exp == 0) + ; + else if (exp < 0) { + expString += "e-"; + exp = -exp; + } else + expString += "e+"; + + if (exp < 10 && exp > 0) { + expString += '0'; + expString += (char)('0' + exp); + } else if (exp != 0) { + std::string tmp; + + std::ostringstream oss; + oss << exp; + + tmp = oss.str(); + expString += tmp; + } + + int lastNonZeroPos = (int)(input.length() - 1); + for (; lastNonZeroPos >= 0; --lastNonZeroPos) + if (input[lastNonZeroPos] <= '9' && input[lastNonZeroPos] > '0') break; + + std::string ans = ""; + ans += input[firstNonZeroPos]; + if (firstNonZeroPos != (size_t)lastNonZeroPos) { + ans += '.'; + for (int i = firstNonZeroPos + 1; i <= lastNonZeroPos; i++) + if (input[i] != '.') ans += input[i]; + } + + ans += expString; + return ans; +} + +INLINE std::string reduceToPrecision(std::string& input, int precision) { + bool isZero = true; + size_t inputLen = input.length(); + for (size_t i = 0; i < inputLen && isZero; i++) + if (input[i] != '.' && input[i] != '0') isZero = false; + if (isZero) return "0"; + + // Find the first valid number, skip '-' + int FirstNonZeroPos = 0; + int LastNonZeroPos = (int)inputLen - 1; + int truncBitPosition = 0; + size_t decPosition = input.find('.'); + for (; input[FirstNonZeroPos] < '1' || input[FirstNonZeroPos] > '9'; + FirstNonZeroPos++) + ; + + for (; input[LastNonZeroPos] < '1' || input[LastNonZeroPos] > '9'; + LastNonZeroPos--) + ; + + if (decPosition == std::string::npos) decPosition = inputLen; + // Count the valid number, to decide whether we need to truncate + if ((int)decPosition > LastNonZeroPos) { + if (LastNonZeroPos - FirstNonZeroPos + 1 <= precision) return input; + truncBitPosition = FirstNonZeroPos + precision; + } else if ((int)decPosition < FirstNonZeroPos) { // This is pure decimal + if (LastNonZeroPos - FirstNonZeroPos + 1 <= precision) { + if (FirstNonZeroPos - decPosition - 1 < 4) { + return input; + } else { + if (input[0] == '-') { + std::string tmp = input.substr(1, inputLen - 1); + return std::string("-") + scientificFormat(tmp); + } else + return scientificFormat(input); + } + } + truncBitPosition = FirstNonZeroPos + precision; + } else { + if (LastNonZeroPos - FirstNonZeroPos <= precision) return input; + truncBitPosition = FirstNonZeroPos + precision + 1; + } + + // duplicate the input string, we want to add "0" before the valid numbers + // This is easy for quantization, since we may change 9999 to 10000 + std::string ans = ""; + std::string dupInput = "0"; + if (input[0] == '-') { + ans += '-'; + dupInput += input.substr(1, inputLen - 1); + } else { + dupInput += input.substr(0, inputLen); + ++truncBitPosition; + } + + // Add 'carry' after truncation, if necessary + bool carry = dupInput[truncBitPosition] > '4'; + for (int i = truncBitPosition - 1; i >= 0 && carry; i--) { + if (dupInput[i] == '.') continue; + if (dupInput[i] == '9') + dupInput[i] = '0'; + else { + ++dupInput[i]; + carry = false; + } + } + + // bits outside precision range should be set to 0 + if (dupInput[0] == '1') + FirstNonZeroPos = 0; + else { + FirstNonZeroPos = 0; + while (dupInput[FirstNonZeroPos] < '1' || dupInput[FirstNonZeroPos] > '9') + ++FirstNonZeroPos; + } + + unsigned it = FirstNonZeroPos; + int NValidNumber = 0; + while (it < dupInput.length()) { + if (dupInput[it] == '.') { + ++it; + continue; + } + ++NValidNumber; + if (NValidNumber > precision) dupInput[it] = '0'; + ++it; + } + + // Here we wanted to adjust the truncate position and the value + decPosition = dupInput.find('.'); + if (decPosition == std::string::npos) // When this is integer + truncBitPosition = (int)dupInput.length(); + else + for (truncBitPosition = (int)(dupInput.length() - 1); truncBitPosition >= 0; + --truncBitPosition) { + if (dupInput[truncBitPosition] == '.') break; + if (dupInput[truncBitPosition] != '0') { + truncBitPosition++; + break; + } + } + + if (dupInput[0] == '1') + dupInput = dupInput.substr(0, truncBitPosition); + else + dupInput = dupInput.substr(1, truncBitPosition - 1); + + decPosition = dupInput.find('.'); + if (decPosition != std::string::npos) { + size_t it = 0; + for (it = decPosition + 1; dupInput[it] == '0'; it++) + ; + if (it - decPosition - 1 < 4) { + ans += dupInput; + return ans; + } else { + ans += scientificFormat(dupInput); + return ans; + } + } else if ((int)(dupInput.length()) <= precision) { + ans += dupInput; + return ans; + } + + ans += scientificFormat(dupInput); + return ans; +} + +template +INLINE void print( + const ap_fixed_base<_AP_W, _AP_I, _AP_S, _AP_Q, _AP_O, _AP_N>& x) { + if (_AP_I > 0) { + ap_int_base<_AP_I, _AP_S> p1; + p1.V = x.V >> (_AP_W - _AP_I); + print(p1.V); // print overlaod for .V should exit + } else { + printf("0"); + } + printf("."); + if (_AP_I < _AP_W) { + ap_int_base<_AP_W - _AP_I, false> p2; + p2.V = _AP_ROOT_op_get_range(x.V, 0, _AP_W - _AP_I); + print(p2.V, false); // print overlaod for .V should exit + } +} +#endif // ifndef __SYNTHESIS__ + +// XXX the following two functions have to exist in synthesis, +// as some old HLS Video Library code uses the ostream overload, +// although HLS will later delete I/O function call. + +/// Output streaming +//----------------------------------------------------------------------------- +// XXX apcc cannot handle global std::ios_base::Init() brought in by +#ifndef AP_AUTOCC +#ifndef __SYNTHESIS__ +template +INLINE std::ostream& operator<<( + std::ostream& out, + const ap_fixed_base<_AP_W, _AP_I, _AP_S, _AP_Q, _AP_O, _AP_N>& x) { + // TODO support std::ios_base::fmtflags + unsigned width = out.width(); + unsigned precision = out.precision(); + char fill = out.fill(); + std::string str = x.to_string(10, _AP_S); + str = reduceToPrecision(str, precision); + if (width > str.length()) { + for (unsigned i = 0; i < width - str.length(); ++i) + out << fill; + } + out << str; + return out; +} +#endif // ifndef __SYNTHESIS__ + +/// Input streaming +// ----------------------------------------------------------------------------- +#ifndef __SYNTHESIS__ +template +INLINE std::istream& operator>>( + std::istream& in, + ap_fixed_base<_AP_W, _AP_I, _AP_S, _AP_Q, _AP_O, _AP_N>& x) { + double d; + in >> d; + x = ap_fixed_base<_AP_W, _AP_I, _AP_S, _AP_Q, _AP_O, _AP_N>(d); + return in; +} +#endif +#endif // ifndef AP_AUTOCC + +/// Operators mixing Integers with ap_fixed_base +// ----------------------------------------------------------------------------- +#define AF_BIN_OP_WITH_INT_SF(BIN_OP, C_TYPE, _AP_W2, _AP_S2, RTYPE) \ + template \ + INLINE typename ap_fixed_base<_AP_W, _AP_I, _AP_S>::template RType< \ + _AP_W2, _AP_W2, _AP_S2>::RTYPE \ + operator BIN_OP( \ + const ap_fixed_base<_AP_W, _AP_I, _AP_S, _AP_Q, _AP_O, _AP_N>& op, \ + C_TYPE i_op) { \ + return op.operator BIN_OP(ap_int_base<_AP_W2, _AP_S2>(i_op)); \ + } + +#define AF_BIN_OP_WITH_INT(BIN_OP, C_TYPE, _AP_W2, _AP_S2, RTYPE) \ + template \ + INLINE typename ap_fixed_base<_AP_W, _AP_I, _AP_S>::template RType< \ + _AP_W2, _AP_W2, _AP_S2>::RTYPE \ + operator BIN_OP( \ + const ap_fixed_base<_AP_W, _AP_I, _AP_S, _AP_Q, _AP_O, _AP_N>& op, \ + C_TYPE i_op) { \ + return op.operator BIN_OP(ap_fixed_base<_AP_W2, _AP_W2, _AP_S2>(i_op)); \ + } \ + template \ + INLINE typename ap_fixed_base<_AP_W, _AP_I, _AP_S>::template RType< \ + _AP_W2, _AP_W2, _AP_S2>::RTYPE \ + operator BIN_OP( \ + C_TYPE i_op, \ + const ap_fixed_base<_AP_W, _AP_I, _AP_S, _AP_Q, _AP_O, _AP_N>& op) { \ + return ap_fixed_base<_AP_W2, _AP_W2, _AP_S2>(i_op).operator BIN_OP(op); \ + } + +#define AF_REL_OP_WITH_INT(REL_OP, C_TYPE, _AP_W2, _AP_S2) \ + template \ + INLINE bool operator REL_OP( \ + const ap_fixed_base<_AP_W, _AP_I, _AP_S, _AP_Q, _AP_O, _AP_N>& op, \ + C_TYPE i_op) { \ + return op.operator REL_OP(ap_fixed_base<_AP_W2, _AP_W2, _AP_S2>(i_op)); \ + } \ + template \ + INLINE bool operator REL_OP( \ + C_TYPE i_op, \ + const ap_fixed_base<_AP_W, _AP_I, _AP_S, _AP_Q, _AP_O, _AP_N>& op) { \ + return ap_fixed_base<_AP_W2, _AP_W2, _AP_S2>(i_op).operator REL_OP(op); \ + } + +#define AF_ASSIGN_OP_WITH_INT(ASSIGN_OP, C_TYPE, _AP_W2, _AP_S2) \ + template \ + INLINE ap_fixed_base<_AP_W, _AP_I, _AP_S, _AP_Q, _AP_O, _AP_N>& \ + operator ASSIGN_OP( \ + ap_fixed_base<_AP_W, _AP_I, _AP_S, _AP_Q, _AP_O, _AP_N>& op, \ + C_TYPE i_op) { \ + return op.operator ASSIGN_OP(ap_fixed_base<_AP_W2, _AP_W2, _AP_S2>(i_op)); \ + } + +#define AF_ASSIGN_OP_WITH_INT_SF(ASSIGN_OP, C_TYPE, _AP_W2, _AP_S2) \ + template \ + INLINE ap_fixed_base<_AP_W, _AP_I, _AP_S, _AP_Q, _AP_O, _AP_N>& \ + operator ASSIGN_OP( \ + ap_fixed_base<_AP_W, _AP_I, _AP_S, _AP_Q, _AP_O, _AP_N>& op, \ + C_TYPE i_op) { \ + return op.operator ASSIGN_OP(ap_int_base<_AP_W2, _AP_S2>(i_op)); \ + } + +#define ALL_AF_OP_WITH_INT(C_TYPE, BITS, SIGN) \ + AF_BIN_OP_WITH_INT(+, C_TYPE, (BITS), (SIGN), plus) \ + AF_BIN_OP_WITH_INT(-, C_TYPE, (BITS), (SIGN), minus) \ + AF_BIN_OP_WITH_INT(*, C_TYPE, (BITS), (SIGN), mult) \ + AF_BIN_OP_WITH_INT(/, C_TYPE, (BITS), (SIGN), div) \ + AF_BIN_OP_WITH_INT(&, C_TYPE, (BITS), (SIGN), logic) \ + AF_BIN_OP_WITH_INT(|, C_TYPE, (BITS), (SIGN), logic) \ + AF_BIN_OP_WITH_INT(^, C_TYPE, (BITS), (SIGN), logic) \ + AF_BIN_OP_WITH_INT_SF(>>, C_TYPE, (BITS), (SIGN), lhs) \ + AF_BIN_OP_WITH_INT_SF(<<, C_TYPE, (BITS), (SIGN), lhs) \ + \ + AF_ASSIGN_OP_WITH_INT(+=, C_TYPE, (BITS), (SIGN)) \ + AF_ASSIGN_OP_WITH_INT(-=, C_TYPE, (BITS), (SIGN)) \ + AF_ASSIGN_OP_WITH_INT(*=, C_TYPE, (BITS), (SIGN)) \ + AF_ASSIGN_OP_WITH_INT(/=, C_TYPE, (BITS), (SIGN)) \ + AF_ASSIGN_OP_WITH_INT(&=, C_TYPE, (BITS), (SIGN)) \ + AF_ASSIGN_OP_WITH_INT(|=, C_TYPE, (BITS), (SIGN)) \ + AF_ASSIGN_OP_WITH_INT(^=, C_TYPE, (BITS), (SIGN)) \ + AF_ASSIGN_OP_WITH_INT_SF(>>=, C_TYPE, (BITS), (SIGN)) \ + AF_ASSIGN_OP_WITH_INT_SF(<<=, C_TYPE, (BITS), (SIGN)) \ + \ + AF_REL_OP_WITH_INT(>, C_TYPE, (BITS), (SIGN)) \ + AF_REL_OP_WITH_INT(<, C_TYPE, (BITS), (SIGN)) \ + AF_REL_OP_WITH_INT(>=, C_TYPE, (BITS), (SIGN)) \ + AF_REL_OP_WITH_INT(<=, C_TYPE, (BITS), (SIGN)) \ + AF_REL_OP_WITH_INT(==, C_TYPE, (BITS), (SIGN)) \ + AF_REL_OP_WITH_INT(!=, C_TYPE, (BITS), (SIGN)) + +ALL_AF_OP_WITH_INT(bool, 1, false) +ALL_AF_OP_WITH_INT(char, 8, CHAR_IS_SIGNED) +ALL_AF_OP_WITH_INT(signed char, 8, true) +ALL_AF_OP_WITH_INT(unsigned char, 8, false) +ALL_AF_OP_WITH_INT(short, _AP_SIZE_short, true) +ALL_AF_OP_WITH_INT(unsigned short, _AP_SIZE_short, false) +ALL_AF_OP_WITH_INT(int, _AP_SIZE_int, true) +ALL_AF_OP_WITH_INT(unsigned int, _AP_SIZE_int, false) +ALL_AF_OP_WITH_INT(long, _AP_SIZE_long, true) +ALL_AF_OP_WITH_INT(unsigned long, _AP_SIZE_long, false) +ALL_AF_OP_WITH_INT(ap_slong, _AP_SIZE_ap_slong, true) +ALL_AF_OP_WITH_INT(ap_ulong, _AP_SIZE_ap_slong, false) + +#undef ALL_AF_OP_WITH_INT +#undef AF_BIN_OP_WITH_INT +#undef AF_BIN_OP_WITH_INT_SF +#undef AF_ASSIGN_OP_WITH_INT +#undef AF_ASSIGN_OP_WITH_INT_SF +#undef AF_REL_OP_WITH_INT + +/* + * ********************************************************************** + * TODO + * There is no operator defined with float/double/long double, so that + * code like + * ap_fixed<8,4> a = 1.5f; + * a += 0.5f; + * will fail in compilation. + * Operator with warning about conversion might be wanted. + * ********************************************************************** + */ + +#define AF_BIN_OP_WITH_AP_INT(BIN_OP, RTYPE) \ + template \ + INLINE typename ap_fixed_base<_AP_W2, _AP_W2, _AP_S2>::template RType< \ + _AP_W, _AP_I, _AP_S>::RTYPE \ + operator BIN_OP( \ + const ap_int_base<_AP_W2, _AP_S2>& i_op, \ + const ap_fixed_base<_AP_W, _AP_I, _AP_S, _AP_Q, _AP_O, _AP_N>& op) { \ + return ap_fixed_base<_AP_W2, _AP_W2, _AP_S2>(i_op).operator BIN_OP(op); \ + } \ + \ + template \ + INLINE typename ap_fixed_base<_AP_W, _AP_I, _AP_S>::template RType< \ + _AP_W2, _AP_W2, _AP_S2>::RTYPE \ + operator BIN_OP( \ + const ap_fixed_base<_AP_W, _AP_I, _AP_S, _AP_Q, _AP_O, _AP_N>& op, \ + const ap_int_base<_AP_W2, _AP_S2>& i_op) { \ + return op.operator BIN_OP(ap_fixed_base<_AP_W2, _AP_W2, _AP_S2>(i_op)); \ + } + +#define AF_REL_OP_WITH_AP_INT(REL_OP) \ + template \ + INLINE bool operator REL_OP( \ + const ap_fixed_base<_AP_W, _AP_I, _AP_S, _AP_Q, _AP_O, _AP_N>& op, \ + const ap_int_base<_AP_W2, _AP_S2>& i_op) { \ + return op.operator REL_OP(ap_fixed_base<_AP_W2, _AP_W2, _AP_S2>(i_op)); \ + } \ + \ + template \ + INLINE bool operator REL_OP( \ + const ap_int_base<_AP_W2, _AP_S2>& i_op, \ + const ap_fixed_base<_AP_W, _AP_I, _AP_S, _AP_Q, _AP_O, _AP_N>& op) { \ + return ap_fixed_base<_AP_W2, _AP_W2, _AP_S2>(i_op).operator REL_OP(op); \ + } + +#define AF_ASSIGN_OP_WITH_AP_INT(ASSIGN_OP) \ + template \ + INLINE ap_fixed_base<_AP_W, _AP_I, _AP_S, _AP_Q, _AP_O, _AP_N>& \ + operator ASSIGN_OP( \ + ap_fixed_base<_AP_W, _AP_I, _AP_S, _AP_Q, _AP_O, _AP_N>& op, \ + const ap_int_base<_AP_W2, _AP_S2>& i_op) { \ + return op.operator ASSIGN_OP(ap_fixed_base<_AP_W2, _AP_W2, _AP_S2>(i_op)); \ + } \ + \ + template \ + INLINE ap_int_base<_AP_W2, _AP_S2>& operator ASSIGN_OP( \ + ap_int_base<_AP_W2, _AP_S2>& i_op, \ + const ap_fixed_base<_AP_W, _AP_I, _AP_S, _AP_Q, _AP_O, _AP_N>& op) { \ + return i_op.operator ASSIGN_OP(op.to_ap_int_base()); \ + } + +AF_BIN_OP_WITH_AP_INT(+, plus) +AF_BIN_OP_WITH_AP_INT(-, minus) +AF_BIN_OP_WITH_AP_INT(*, mult) +AF_BIN_OP_WITH_AP_INT(/, div) +AF_BIN_OP_WITH_AP_INT(&, logic) +AF_BIN_OP_WITH_AP_INT(|, logic) +AF_BIN_OP_WITH_AP_INT(^, logic) + +#undef AF_BIN_OP_WITH_AP_INT + +AF_ASSIGN_OP_WITH_AP_INT(+=) +AF_ASSIGN_OP_WITH_AP_INT(-=) +AF_ASSIGN_OP_WITH_AP_INT(*=) +AF_ASSIGN_OP_WITH_AP_INT(/=) +AF_ASSIGN_OP_WITH_AP_INT(&=) +AF_ASSIGN_OP_WITH_AP_INT(|=) +AF_ASSIGN_OP_WITH_AP_INT(^=) + +#undef AF_ASSIGN_OP_WITH_AP_INT + +AF_REL_OP_WITH_AP_INT(==) +AF_REL_OP_WITH_AP_INT(!=) +AF_REL_OP_WITH_AP_INT(>) +AF_REL_OP_WITH_AP_INT(>=) +AF_REL_OP_WITH_AP_INT(<) +AF_REL_OP_WITH_AP_INT(<=) + +#undef AF_REL_OP_WITH_AP_INT + +// Relational Operators with double +template +INLINE bool operator==( + double op1, + const ap_fixed_base<_AP_W, _AP_I, _AP_S, _AP_Q, _AP_O, _AP_N>& op2) { + return op2.operator==(op1); +} + +template +INLINE bool operator!=( + double op1, + const ap_fixed_base<_AP_W, _AP_I, _AP_S, _AP_Q, _AP_O, _AP_N>& op2) { + return op2.operator!=(op1); +} + +template +INLINE bool operator>( + double op1, + const ap_fixed_base<_AP_W, _AP_I, _AP_S, _AP_Q, _AP_O, _AP_N>& op2) { + return op2.operator<(op1); +} + +template +INLINE bool operator>=( + double op1, + const ap_fixed_base<_AP_W, _AP_I, _AP_S, _AP_Q, _AP_O, _AP_N>& op2) { + return op2.operator<=(op1); +} + +template +INLINE bool operator<( + double op1, + const ap_fixed_base<_AP_W, _AP_I, _AP_S, _AP_Q, _AP_O, _AP_N>& op2) { + return op2.operator>(op1); +} + +template +INLINE bool operator<=( + double op1, + const ap_fixed_base<_AP_W, _AP_I, _AP_S, _AP_Q, _AP_O, _AP_N>& op2) { + return op2.operator>=(op1); +} + +#endif // ifndef __cplusplus else + +#endif // ifndef __AP_FIXED_BASE_H__ else + +// -*- cpp -*- diff --git a/TrigScint/include/TrigScint/ap_fixed_ref.h b/TrigScint/include/TrigScint/ap_fixed_ref.h new file mode 100644 index 000000000..aefda0a67 --- /dev/null +++ b/TrigScint/include/TrigScint/ap_fixed_ref.h @@ -0,0 +1,718 @@ +/* + * Copyright 2011-2019 Xilinx, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef __AP_FIXED_REF_H__ +#define __AP_FIXED_REF_H__ + +#ifndef __AP_FIXED_H__ +#error "Only ap_fixed.h and ap_int.h can be included directly in user code." +#endif + +#ifndef __cplusplus +#error "C++ is required to include this header file" + +#else +#ifndef __SYNTHESIS__ +#include +#endif +/// Proxy class, which allows bit selection to be used as both rvalue (for +/// reading) and lvalue (for writing) +template +struct af_bit_ref { +#ifdef _MSC_VER +#pragma warning(disable : 4521 4522) +#endif + typedef ap_fixed_base<_AP_W, _AP_I, _AP_S, _AP_Q, _AP_O, _AP_N> ref_type; + ref_type& d_bv; + int d_index; + + public: + INLINE af_bit_ref( + const af_bit_ref<_AP_W, _AP_I, _AP_S, _AP_Q, _AP_O, _AP_N>& ref) + : d_bv(ref.d_bv), d_index(ref.d_index) { +#ifndef __SYNTHESIS__ + _AP_WARNING(d_index < 0, "Index of bit vector (%d) cannot be negative.", + d_index); + _AP_WARNING(d_index >= _AP_W, "Index of bit vector (%d) out of range (%d).", + d_index, _AP_W); +#endif + } + + INLINE af_bit_ref(ref_type* bv, int index = 0) : d_bv(*bv), d_index(index) {} + + INLINE af_bit_ref(const ref_type* bv, int index = 0) + : d_bv(*const_cast(bv)), d_index(index) {} + + /// convert operators. + INLINE operator bool() const { return _AP_ROOT_op_get_bit(d_bv.V, d_index); } + + /// @name assign operators + // @{ + INLINE af_bit_ref& operator=(bool val) { + d_bv.V = _AP_ROOT_op_set_bit(d_bv.V, d_index, val); + return *this; + } + + // Be explicit to prevent it from being deleted, as field d_bv + // is of reference type. + INLINE af_bit_ref& operator=(const af_bit_ref& val) { + return operator=(bool(val)); + } + + template + INLINE af_bit_ref& operator=( + const af_bit_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2>& val) { + return operator=(bool(val)); + } + + template + INLINE af_bit_ref& operator=(const ap_bit_ref<_AP_W2, _AP_S2>& val) { + return operator=(bool(val)); + } + + template + INLINE af_bit_ref& operator=(const ap_int_base<_AP_W2, _AP_S2>& val) { + return operator=(val != 0); + } + + template + INLINE af_bit_ref& operator=(const ap_range_ref<_AP_W2, _AP_S2>& val) { + return operator=(ap_int_base<_AP_W2, false>(val)); + } + + template + INLINE af_bit_ref& operator=( + const af_range_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2>& val) { + return operator=(ap_int_base<_AP_W2, false>(val)); + } + + template + INLINE af_bit_ref& operator=( + const ap_concat_ref<_AP_W2, _AP_T3, _AP_W3, _AP_T3>& val) { + return operator=(ap_int_base<_AP_W2 + _AP_W3, false>(val)); + } + // @} + + /// @name concatenate operators + // @{ + template + INLINE ap_concat_ref<1, af_bit_ref, _AP_W2, ap_int_base<_AP_W2, _AP_S2> > + operator,(ap_int_base<_AP_W2, _AP_S2> &op) { + return ap_concat_ref<1, af_bit_ref, _AP_W2, ap_int_base<_AP_W2, _AP_S2> >( + *this, op); + } + + template + INLINE ap_concat_ref<1, af_bit_ref, 1, ap_bit_ref<_AP_W2, _AP_S2> > operator,( + const ap_bit_ref<_AP_W2, _AP_S2> &op) { + return ap_concat_ref<1, af_bit_ref, 1, ap_bit_ref<_AP_W2, _AP_S2> >(*this, + op); + } + + template + INLINE ap_concat_ref<1, af_bit_ref, _AP_W2, ap_range_ref<_AP_W2, _AP_S2> > + operator,(const ap_range_ref<_AP_W2, _AP_S2> &op) { + return ap_concat_ref<1, af_bit_ref, _AP_W2, ap_range_ref<_AP_W2, _AP_S2> >( + *this, op); + } + + template + INLINE ap_concat_ref<1, af_bit_ref, _AP_W2 + _AP_W3, + ap_concat_ref<_AP_W2, _AP_T2, _AP_W3, _AP_T3> > + operator,(const ap_concat_ref<_AP_W2, _AP_T2, _AP_W3, _AP_T3> &op) { + return ap_concat_ref<1, af_bit_ref, _AP_W2 + _AP_W3, + ap_concat_ref<_AP_W2, _AP_T2, _AP_W3, _AP_T3> >(*this, + op); + } + + template + INLINE ap_concat_ref< + 1, af_bit_ref, _AP_W2, + af_range_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2> > + operator,( + const af_range_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2> &op) { + return ap_concat_ref< + 1, af_bit_ref, _AP_W2, + af_range_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2> >(*this, + op); + } + + template + INLINE ap_concat_ref<1, af_bit_ref, 1, af_bit_ref<_AP_W2, _AP_I2, _AP_S2, + _AP_Q2, _AP_O2, _AP_N2> > + operator,( + const af_bit_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2> &op) { + return ap_concat_ref<1, af_bit_ref, 1, af_bit_ref<_AP_W2, _AP_I2, _AP_S2, + _AP_Q2, _AP_O2, _AP_N2> >( + *this, + const_cast&>( + op)); + } + // @} + + /// @name comparison + // @{ + template + INLINE bool operator==( + const af_bit_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2>& op) { + return get() == op.get(); + } + + template + INLINE bool operator!=( + const af_bit_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2>& op) { + return get() != op.get(); + } + // @} + + INLINE bool operator~() const { + bool bit = _AP_ROOT_op_get_bit(d_bv.V, d_index); + return bit ? false : true; + } + + INLINE bool get() const { return _AP_ROOT_op_get_bit(d_bv.V, d_index); } + + INLINE int length() const { return 1; } + +#ifndef __SYNTHESIS__ + std::string to_string() const { return get() ? "1" : "0"; } +#else + // XXX HLS will delete this in synthesis + INLINE char* to_string() const { return 0; } +#endif +}; // struct af_bit_ref + +// XXX apcc cannot handle global std::ios_base::Init() brought in by +#ifndef AP_AUTOCC +#ifndef __SYNTHESIS__ +template +INLINE std::ostream& operator<<( + std::ostream& os, + const af_bit_ref<_AP_W, _AP_I, _AP_S, _AP_Q, _AP_O, _AP_N>& x) { + os << x.to_string(); + return os; +} +#endif // ifndef __SYNTHESIS__ +#endif // ifndef AP_AUTOCC + +/// Range (slice) reference. +template +struct af_range_ref { +#ifdef _MSC_VER +#pragma warning(disable : 4521 4522) +#endif + typedef ap_fixed_base<_AP_W, _AP_I, _AP_S, _AP_Q, _AP_O, _AP_N> ref_type; + ref_type& d_bv; + int l_index; + int h_index; + + public: + /// copy ctor + INLINE af_range_ref( + const af_range_ref<_AP_W, _AP_I, _AP_S, _AP_Q, _AP_O, _AP_N>& ref) + : d_bv(ref.d_bv), l_index(ref.l_index), h_index(ref.h_index) {} + + /// ctor from ap_fixed_base, higher and lower bound. + /** if h is less than l, the bits selected will be returned in reverse order. + */ + INLINE af_range_ref(ref_type* bv, int h, int l) + : d_bv(*bv), l_index(l), h_index(h) { +#ifndef __SYNTHESIS__ + _AP_WARNING(h < 0 || l < 0, + "Higher bound(%d) and lower(%d) bound cannot be negative.", h, + l); + _AP_WARNING(h >= _AP_W || l >= _AP_W, + "Higher bound(%d) or lower(%d) bound out of range.", h, l); + _AP_WARNING(h < l, "The bits selected will be returned in reverse order."); +#endif + } + + INLINE af_range_ref(const ref_type* bv, int h, int l) + : d_bv(*const_cast(bv)), l_index(l), h_index(h) { +#ifndef __SYNTHESIS__ + _AP_WARNING(h < 0 || l < 0, + "Higher bound(%d) and lower(%d) bound cannot be negative.", h, + l); + _AP_WARNING(h >= _AP_W || l >= _AP_W, + "Higher bound(%d) or lower(%d) bound out of range.", h, l); + _AP_WARNING(h < l, "The bits selected will be returned in reverse order."); +#endif + } + + /// @name assign operators + // @{ + +#define ASSIGN_CTYPE_TO_AF_RANGE(DATA_TYPE) \ + INLINE af_range_ref& operator=(const DATA_TYPE val) { \ + ap_int_base<_AP_W, false> loc(val); \ + d_bv.V = _AP_ROOT_op_set_range(d_bv.V, l_index, h_index, loc.V); \ + return *this; \ + } + + ASSIGN_CTYPE_TO_AF_RANGE(bool) + ASSIGN_CTYPE_TO_AF_RANGE(char) + ASSIGN_CTYPE_TO_AF_RANGE(signed char) + ASSIGN_CTYPE_TO_AF_RANGE(unsigned char) + ASSIGN_CTYPE_TO_AF_RANGE(short) + ASSIGN_CTYPE_TO_AF_RANGE(unsigned short) + ASSIGN_CTYPE_TO_AF_RANGE(int) + ASSIGN_CTYPE_TO_AF_RANGE(unsigned int) + ASSIGN_CTYPE_TO_AF_RANGE(long) + ASSIGN_CTYPE_TO_AF_RANGE(unsigned long) + ASSIGN_CTYPE_TO_AF_RANGE(ap_slong) + ASSIGN_CTYPE_TO_AF_RANGE(ap_ulong) +#if _AP_ENABLE_HALF_ == 1 + ASSIGN_CTYPE_TO_AF_RANGE(half) +#endif + ASSIGN_CTYPE_TO_AF_RANGE(float) + ASSIGN_CTYPE_TO_AF_RANGE(double) +#undef ASSIGN_CTYPE_TO_AF_RANGE + + /// assgin using a string. XXX crucial for cosim. + INLINE af_range_ref& operator=(const char* val) { + const ap_int_base<_AP_W, false> tmp(val); // XXX figure out radix + d_bv.V = _AP_ROOT_op_set_range(d_bv.V, l_index, h_index, tmp.V); + return *this; + } + + /// assign from ap_int_base. + // NOTE Base of other assgin operators. + template + INLINE af_range_ref& operator=(const ap_int_base<_AP_W3, _AP_S3>& val) { + d_bv.V = _AP_ROOT_op_set_range(d_bv.V, l_index, h_index, val.V); + return *this; + } + + /// assign from range reference to ap_int_base. + template + INLINE af_range_ref& operator=(const ap_range_ref<_AP_W2, _AP_S2>& val) { + const ap_int_base<_AP_W2, false> tmp(val); + return operator=(tmp); + } + + /// assign from bit reference to ap_int_base.. + template + INLINE af_range_ref& operator=(const ap_bit_ref<_AP_W2, _AP_S2>& val) { + const ap_int_base<1, false> tmp((bool)val); + return operator=(tmp); + } + + /// assgin from ap_fixed_base. + template + INLINE af_range_ref& operator=( + const ap_fixed_base<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2>& + val) { + d_bv.V = _AP_ROOT_op_set_range(d_bv.V, l_index, h_index, val.V); + return *this; + } + + /// copy assgin. + // XXX This has to be explicit, otherwise it will be deleted, as d_bv is + // of reference type. + INLINE af_range_ref& operator=(const af_range_ref& val) { + ap_int_base<_AP_W, false> tmp(val); + return operator=(tmp); + } + + /// assign from range reference to ap_fixed_base. + template + INLINE af_range_ref& operator=( + const af_range_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2>& val) { + ap_int_base<_AP_W2, false> tmp(val); + return operator=(tmp); + } + + /// assign from bit reference to ap_fixed_base. + template + INLINE af_range_ref& operator=( + const af_bit_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2>& val) { + ap_int_base<1, false> tmp((bool)val); + return operator=(tmp); + } + + /// assign from compound reference. + template + INLINE af_range_ref& operator=( + const ap_concat_ref<_AP_W2, _AP_T3, _AP_W3, _AP_T3>& val) { + const ap_int_base<_AP_W2 + _AP_W3, false> tmp(val); + return operator=(tmp); + } + // @} + + /// @name comparison operators with ap_range_ref. + // @{ + template + INLINE bool operator==(const ap_range_ref<_AP_W2, _AP_S2>& op2) { + ap_int_base<_AP_W, false> lop(*this); + ap_int_base<_AP_W2, false> rop(op2); + return lop == rop; + } + + template + INLINE bool operator!=(const ap_range_ref<_AP_W2, _AP_S2>& op2) { + return !(operator==(op2)); + } + + template + INLINE bool operator<(const ap_range_ref<_AP_W2, _AP_S2>& op2) { + ap_int_base<_AP_W, false> lop(*this); + ap_int_base<_AP_W2, false> rop(op2); + return lop < rop; + } + + template + INLINE bool operator>(const ap_range_ref<_AP_W2, _AP_S2>& op2) { + ap_int_base<_AP_W, false> lop(*this); + ap_int_base<_AP_W2, false> rop(op2); + return lop > rop; + } + + template + INLINE bool operator<=(const ap_range_ref<_AP_W2, _AP_S2>& op2) { + return !(operator>(op2)); + } + + template + INLINE bool operator>=(const ap_range_ref<_AP_W2, _AP_S2>& op2) { + return !(operator<(op2)); + } + // @} + + /// @name comparison operators with af_range_ref. + // @{ + template + INLINE bool operator==( + const af_range_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2>& op2) { + ap_int_base<_AP_W, false> lop(*this); + ap_int_base<_AP_W2, false> rop(op2); + return lop == rop; + } + + template + INLINE bool operator!=( + const af_range_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2>& op2) { + return !(operator==(op2)); + } + + template + INLINE bool operator<( + const af_range_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2>& op2) { + ap_int_base<_AP_W, false> lop(*this); + ap_int_base<_AP_W2, false> rop(op2); + return lop < rop; + } + + template + INLINE bool operator>( + const af_range_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2>& op2) { + ap_int_base<_AP_W, false> lop(*this); + ap_int_base<_AP_W2, false> rop(op2); + return lop > rop; + } + + template + INLINE bool operator<=( + const af_range_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2>& op2) { + return !(operator>(op2)); + } + + template + INLINE bool operator>=( + const af_range_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2>& op2) { + return !(operator<(op2)); + } + // @} + + /// @name concatenate operators. + /// @{ + /// concatenate with ap_int_base. + template + INLINE + ap_concat_ref<_AP_W, af_range_ref, _AP_W2, ap_int_base<_AP_W2, _AP_S2> > + operator,(ap_int_base<_AP_W2, _AP_S2> &op) { + return ap_concat_ref<_AP_W, af_range_ref, _AP_W2, + ap_int_base<_AP_W2, _AP_S2> >(*this, op); + } + + /// concatenate with ap_bit_ref. + template + INLINE ap_concat_ref<_AP_W, af_range_ref, 1, ap_bit_ref<_AP_W2, _AP_S2> > + operator,(const ap_bit_ref<_AP_W2, _AP_S2> &op) { + return ap_concat_ref<_AP_W, af_range_ref, 1, ap_bit_ref<_AP_W2, _AP_S2> >( + *this, const_cast&>(op)); + } + + /// concatenate with ap_bit_ref. + template + INLINE ap_concat_ref<_AP_W, af_range_ref, _AP_W2, ap_range_ref<_AP_W2, _AP_S2> > + operator,(const ap_range_ref<_AP_W2, _AP_S2> &op) { + return ap_concat_ref<_AP_W, af_range_ref, _AP_W2, + ap_range_ref<_AP_W2, _AP_S2> >( + *this, const_cast&>(op)); + } + + /// concatenate with ap_concat_ref. + template + INLINE ap_concat_ref<_AP_W, af_range_ref, _AP_W2 + _AP_W3, + ap_concat_ref<_AP_W2, _AP_T2, _AP_W3, _AP_T3> > + operator,(const ap_concat_ref<_AP_W2, _AP_T2, _AP_W3, _AP_T3> &op) { + return ap_concat_ref<_AP_W, af_range_ref, _AP_W2 + _AP_W3, + ap_concat_ref<_AP_W2, _AP_T2, _AP_W3, _AP_T3> >( + *this, const_cast&>(op)); + } + + /// concatenate with another af_range_ref. + template + INLINE + ap_concat_ref<_AP_W, af_range_ref, _AP_W2, + af_range_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2> > + operator,(const af_range_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2> + &op) { + return ap_concat_ref< + _AP_W, af_range_ref, _AP_W2, + af_range_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2> >( + *this, + const_cast&>( + op)); + } + + /// concatenate with another af_bit_ref. + template + INLINE + ap_concat_ref<_AP_W, af_range_ref, 1, + af_bit_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2> > + operator,( + const af_bit_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2> &op) { + return ap_concat_ref< + _AP_W, af_range_ref, 1, + af_bit_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2> >( + *this, + const_cast&>( + op)); + } + // @} + + INLINE operator ap_ulong() const { + ap_int_base<_AP_W, false> ret; + ret.V = _AP_ROOT_op_get_range(d_bv.V, l_index, h_index); + return ret.to_uint64(); + } + + INLINE operator ap_int_base<_AP_W, false>() const { + ap_int_base<_AP_W, false> ret; + ret.V = _AP_ROOT_op_get_range(d_bv.V, l_index, h_index); + return ret; + } + + INLINE ap_int_base<_AP_W, false> to_ap_int_base() const { + ap_int_base<_AP_W, false> ret; + ret.V = _AP_ROOT_op_get_range(d_bv.V, l_index, h_index); + return ret; + } + + // used in ap_fixed_base::to_string() + INLINE char to_char() const { + return (char)(_AP_ROOT_op_get_range(d_bv.V, l_index, h_index)); + } + + INLINE int to_int() const { + return (int)(_AP_ROOT_op_get_range(d_bv.V, l_index, h_index)); + } + + INLINE unsigned to_uint() const { + return (unsigned)(_AP_ROOT_op_get_range(d_bv.V, l_index, h_index)); + } + + INLINE long to_long() const { + return (long)(_AP_ROOT_op_get_range(d_bv.V, l_index, h_index)); + } + + INLINE unsigned long to_ulong() const { + return (unsigned long)(_AP_ROOT_op_get_range(d_bv.V, l_index, h_index)); + } + + INLINE ap_slong to_int64() const { + return (ap_slong)(_AP_ROOT_op_get_range(d_bv.V, l_index, h_index)); + } + + INLINE ap_ulong to_uint64() const { + return (ap_ulong)(_AP_ROOT_op_get_range(d_bv.V, l_index, h_index)); + } + + INLINE ap_int_base<_AP_W, false> get() const { + ap_int_base<_AP_W, false> ret; + ret.V = _AP_ROOT_op_get_range(d_bv.V, l_index, h_index); + return ret; + } + + template + INLINE void set(const ap_int_base<_AP_W2, false>& val) { + d_bv.V = _AP_ROOT_op_set_range(d_bv.V, l_index, h_index, val.V); + } + + INLINE int length() const { + return h_index >= l_index ? h_index - l_index + 1 : l_index - h_index + 1; + } + +#ifndef __SYNTHESIS__ + std::string to_string(signed char rd = 2) const { + ap_int_base<_AP_W, false> ret; + ret.V = _AP_ROOT_op_get_range(d_bv.V, l_index, h_index); + return ret.to_string(rd); + } +#else + // XXX HLS will delete this in synthesis + INLINE char* to_string(signed char rd = 2) const { + return 0; + } +#endif +}; // struct af_range_ref + +// XXX apcc cannot handle global std::ios_base::Init() brought in by +#ifndef AP_AUTOCC +#ifndef __SYNTHESIS__ +template +INLINE std::ostream& operator<<( + std::ostream& os, + const af_range_ref<_AP_W, _AP_I, _AP_S, _AP_Q, _AP_O, _AP_N>& x) { + os << x.to_string(); + return os; +} +#endif +#endif // ifndef AP_AUTOCC + +#define AF_REF_REL_OP_WITH_INT(REL_OP, C_TYPE, _AP_W2, _AP_S2) \ + template \ + INLINE bool operator REL_OP( \ + const af_range_ref<_AP_W, _AP_I, _AP_S, _AP_Q, _AP_O, _AP_N>& op, \ + C_TYPE op2) { \ + return ap_int_base<_AP_W, false>(op) \ + REL_OP ap_int_base<_AP_W2, _AP_S2>(op2); \ + } \ + \ + template \ + INLINE bool operator REL_OP( \ + C_TYPE op2, \ + const af_range_ref<_AP_W, _AP_I, _AP_S, _AP_Q, _AP_O, _AP_N>& op) { \ + return ap_int_base<_AP_W2, _AP_S2>(op2) \ + REL_OP ap_int_base<_AP_W, false>(op); \ + } \ + \ + template \ + INLINE bool operator REL_OP( \ + const af_bit_ref<_AP_W, _AP_I, _AP_S, _AP_Q, _AP_O, _AP_N>& op, \ + C_TYPE op2) { \ + return bool(op) REL_OP op2; \ + } \ + \ + template \ + INLINE bool operator REL_OP( \ + C_TYPE op2, \ + const af_bit_ref<_AP_W, _AP_I, _AP_S, _AP_Q, _AP_O, _AP_N>& op) { \ + return op2 REL_OP bool(op); \ + } + +#define AF_REF_REL_OPS_WITH_INT(C_TYPE, _AP_W2, _AP_S2) \ + AF_REF_REL_OP_WITH_INT(>, C_TYPE, (_AP_W2), (_AP_S2)) \ + AF_REF_REL_OP_WITH_INT(<, C_TYPE, (_AP_W2), (_AP_S2)) \ + AF_REF_REL_OP_WITH_INT(>=, C_TYPE, (_AP_W2), (_AP_S2)) \ + AF_REF_REL_OP_WITH_INT(<=, C_TYPE, (_AP_W2), (_AP_S2)) \ + AF_REF_REL_OP_WITH_INT(==, C_TYPE, (_AP_W2), (_AP_S2)) \ + AF_REF_REL_OP_WITH_INT(!=, C_TYPE, (_AP_W2), (_AP_S2)) + +AF_REF_REL_OPS_WITH_INT(bool, 1, false) +AF_REF_REL_OPS_WITH_INT(char, 8, CHAR_IS_SIGNED) +AF_REF_REL_OPS_WITH_INT(signed char, 8, true) +AF_REF_REL_OPS_WITH_INT(unsigned char, 8, false) +AF_REF_REL_OPS_WITH_INT(short, _AP_SIZE_short, true) +AF_REF_REL_OPS_WITH_INT(unsigned short, _AP_SIZE_short, false) +AF_REF_REL_OPS_WITH_INT(int, _AP_SIZE_int, true) +AF_REF_REL_OPS_WITH_INT(unsigned int, _AP_SIZE_int, false) +AF_REF_REL_OPS_WITH_INT(long, _AP_SIZE_long, true) +AF_REF_REL_OPS_WITH_INT(unsigned long, _AP_SIZE_long, false) +AF_REF_REL_OPS_WITH_INT(ap_slong, _AP_SIZE_ap_slong, true) +AF_REF_REL_OPS_WITH_INT(ap_ulong, _AP_SIZE_ap_slong, false) + +#undef AF_REF_REL_OP_INT +#undef AF_REF_REL_OPS_WITH_INT + +#define AF_REF_REL_OP_WITH_AP_INT(REL_OP) \ + template \ + INLINE bool operator REL_OP( \ + const af_range_ref<_AP_W, _AP_I, _AP_S, _AP_Q, _AP_O, _AP_N>& op, \ + const ap_int_base<_AP_W2, _AP_S>& op2) { \ + return ap_int_base<_AP_W, false>(op) REL_OP op2; \ + } \ + template \ + INLINE bool operator REL_OP( \ + const ap_int_base<_AP_W2, _AP_S2>& op2, \ + const af_range_ref<_AP_W, _AP_I, _AP_S, _AP_Q, _AP_O, _AP_N>& op) { \ + return op2 REL_OP ap_int_base<_AP_W, false>(op); \ + } \ + template \ + INLINE bool operator REL_OP( \ + const af_bit_ref<_AP_W, _AP_I, _AP_S, _AP_Q, _AP_O, _AP_N>& op, \ + const ap_int_base<_AP_W2, _AP_S2>& op2) { \ + return ap_int_base<1, false>(op) REL_OP op2; \ + } \ + template \ + INLINE bool operator REL_OP( \ + const ap_int_base<_AP_W2, _AP_S2>& op2, \ + const af_bit_ref<_AP_W, _AP_I, _AP_S, _AP_Q, _AP_O, _AP_N>& op) { \ + return op2 REL_OP ap_int_base<1, false>(op); \ + } + +AF_REF_REL_OP_WITH_AP_INT(>) +AF_REF_REL_OP_WITH_AP_INT(<) +AF_REF_REL_OP_WITH_AP_INT(>=) +AF_REF_REL_OP_WITH_AP_INT(<=) +AF_REF_REL_OP_WITH_AP_INT(==) +AF_REF_REL_OP_WITH_AP_INT(!=) + +#endif // ifndef __cplusplus + +#endif // ifndef __AP_FIXED_REF_H__ + +// -*- cpp -*- diff --git a/TrigScint/include/TrigScint/ap_fixed_special.h b/TrigScint/include/TrigScint/ap_fixed_special.h new file mode 100644 index 000000000..0f7a9f7eb --- /dev/null +++ b/TrigScint/include/TrigScint/ap_fixed_special.h @@ -0,0 +1,230 @@ +/* + * Copyright 2011-2019 Xilinx, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef __AP_FIXED_SPECIAL_H__ +#define __AP_FIXED_SPECIAL_H__ + +#ifndef __AP_FIXED_H__ +#error "Only ap_fixed.h and ap_int.h can be included directly in user code." +#endif + +#ifndef __SYNTHESIS__ +#include +#include +#endif +// FIXME AP_AUTOCC cannot handle many standard headers, so declare instead of +// include. +// #include +namespace std { +template class complex; +} + +/* + TODO: Modernize the code using C++11/C++14 + 1. constexpr http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2016/p0415r0.html + 2. move constructor +*/ + +namespace std { +/* + Specialize std::complex to zero initialization ap_fixed. + + To reduce the area cost, ap_fixed is not zero initialized, just like basic + types float or double. However, libstdc++ provides specialization for float, + double and long double, initializing image part to 0 when not specified. + + This has become a difficulty in switching legacy code from these C types to + ap_fixed. To ease the tranform of legacy code, we have to implement + specialization of std::complex<> for our type. + + As ap_fixed is a template, it is impossible to specialize only the methods + that causes default initialization of value type in std::complex<>. An + explicit full specialization of the template class has to be done, covering + all the member functions and operators of std::complex<> as specified + in standard 26.2.4 and 26.2.5. +*/ +template +class complex > { + public: + typedef ap_fixed<_AP_W, _AP_I, _AP_Q, _AP_O, _AP_N> _Tp; + typedef _Tp value_type; + + // 26.2.4/1 + // Constructor without argument + // Default initialize, so that in dataflow, the variable is only written once. + complex() : _M_real(_Tp()), _M_imag(_Tp()) {} + // Constructor with ap_fixed. + // Zero initialize image part when not specified, so that `C(1) == C(1,0)` + complex(const _Tp &__r, const _Tp &__i = _Tp(0)) + : _M_real(__r), _M_imag(__i) {} + + // Constructor with another complex number + template + complex(const complex<_Up> &__z) : _M_real(__z.real()), _M_imag(__z.imag()) {} + +#if __cplusplus >= 201103L + const _Tp& real() const { return _M_real; } + const _Tp& imag() const { return _M_imag; } +#else + _Tp& real() { return _M_real; } + const _Tp& real() const { return _M_real; } + _Tp& imag() { return _M_imag; } + const _Tp& imag() const { return _M_imag; } +#endif + + void real(_Tp __val) { _M_real = __val; } + + void imag(_Tp __val) { _M_imag = __val; } + + // Assign this complex number with ap_fixed. + // Zero initialize image poarrt, so that `C c; c = 1; c == C(1,0);` + complex<_Tp> &operator=(const _Tp __t) { + _M_real = __t; + _M_imag = _Tp(0); + return *this; + } + + // 26.2.5/1 + // Add ap_fixed to this complex number. + complex<_Tp> &operator+=(const _Tp &__t) { + _M_real += __t; + return *this; + } + + // 26.2.5/3 + // Subtract ap_fixed from this complex number. + complex<_Tp> &operator-=(const _Tp &__t) { + _M_real -= __t; + return *this; + } + + // 26.2.5/5 + // Multiply this complex number by ap_fixed. + complex<_Tp> &operator*=(const _Tp &__t) { + _M_real *= __t; + _M_imag *= __t; + return *this; + } + + // 26.2.5/7 + // Divide this complex number by ap_fixed. + complex<_Tp> &operator/=(const _Tp &__t) { + _M_real /= __t; + _M_imag /= __t; + return *this; + } + + // Assign complex number to this complex number. + template + complex<_Tp> &operator=(const complex<_Up> &__z) { + _M_real = __z.real(); + _M_imag = __z.imag(); + return *this; + } + + // 26.2.5/9 + // Add complex number to this. + template + complex<_Tp> &operator+=(const complex<_Up> &__z) { + _M_real += __z.real(); + _M_imag += __z.imag(); + return *this; + } + + // 26.2.5/11 + // Subtract complex number from this. + template + complex<_Tp> &operator-=(const complex<_Up> &__z) { + _M_real -= __z.real(); + _M_imag -= __z.imag(); + return *this; + } + + // 26.2.5/13 + // Multiply this by complex number. + template + complex<_Tp> &operator*=(const complex<_Up> &__z) { + const _Tp __r = _M_real * __z.real() - _M_imag * __z.imag(); + _M_imag = _M_real * __z.imag() + _M_imag * __z.real(); + _M_real = __r; + return *this; + } + + // 26.2.5/15 + // Divide this by complex number. + template + complex<_Tp> &operator/=(const complex<_Up> &__z) { + complex<_Tp> cj (__z.real(), -__z.imag()); + complex<_Tp> a = (*this) * cj; + complex<_Tp> b = cj * __z; + _M_real = a.real() / b.real(); + _M_imag = a.imag() / b.real(); + return *this; + } + + private: + _Tp _M_real; + _Tp _M_imag; + +}; // class complex > + +/* + Non-member operations + These operations are not required by standard in 26.2.6, but libstdc++ + defines them for + float, double or long double's specialization. +*/ +// Compare complex number with ap_fixed. +template +inline bool operator==( + const complex > &__x, + const ap_fixed<_AP_W, _AP_I, _AP_Q, _AP_O, _AP_N> &__y) { + return __x.real() == __y && + __x.imag() == 0; +} + +// Compare ap_fixed with complex number. +template +inline bool operator==( + const ap_fixed<_AP_W, _AP_I, _AP_Q, _AP_O, _AP_N> &__x, + const complex > &__y) { + return __x == __y.real() && + 0 == __y.imag(); +} + +// Compare complex number with ap_fixed. +template +inline bool operator!=( + const complex > &__x, + const ap_fixed<_AP_W, _AP_I, _AP_Q, _AP_O, _AP_N> &__y) { + return __x.real() != __y || + __x.imag() != 0; +} + +// Compare ap_fixed with complex number. +template +inline bool operator!=( + const ap_fixed<_AP_W, _AP_I, _AP_Q, _AP_O, _AP_N> &__x, + const complex > &__y) { + return __x != __y.real() || + 0 != __y.imag(); +} + +} // namespace std + +#endif // ifndef __AP_FIXED_SPECIAL_H__ + +// -*- cpp -*- diff --git a/TrigScint/include/TrigScint/ap_int.h b/TrigScint/include/TrigScint/ap_int.h new file mode 100644 index 000000000..fbdc9c413 --- /dev/null +++ b/TrigScint/include/TrigScint/ap_int.h @@ -0,0 +1,330 @@ +/* + * Copyright 2011-2019 Xilinx, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef __AP_INT_H__ +#define __AP_INT_H__ + +#include +#include +#include + +//--------------------------------------------------------------- + +/// Sign Arbitrary Precision Type. +template +struct ap_int : ap_int_base<_AP_W, true> { + typedef ap_int_base<_AP_W, true> Base; + // Constructor + INLINE ap_int() : Base() {} + + // Copy ctor + INLINE ap_int(const ap_int& op) { Base::V = op.V; } + + template + INLINE ap_int(const ap_int<_AP_W2>& op) { + Base::V = op.V; + } + + template + INLINE ap_int(const volatile ap_int<_AP_W2>& op) { + Base::V = op.V; + } + + template + INLINE ap_int(const ap_uint<_AP_W2>& op) { + Base::V = op.V; + } + + template + INLINE ap_int(const volatile ap_uint<_AP_W2>& op) { + Base::V = op.V; + } + + template + INLINE ap_int(const ap_range_ref<_AP_W2, _AP_S2>& ref) : Base(ref) {} + + template + INLINE ap_int(const ap_bit_ref<_AP_W2, _AP_S2>& ref) : Base(ref) {} + + template + INLINE ap_int(const ap_concat_ref<_AP_W2, _AP_T2, _AP_W3, _AP_T3>& ref) + : Base(ref) {} + + template + INLINE ap_int(const ap_fixed<_AP_W2, _AP_I2, _AP_Q2, _AP_O2, _AP_N2>& op) + : Base((ap_fixed_base<_AP_W2, _AP_I2, true, _AP_Q2, _AP_O2, _AP_N2>)op) {} + + template + INLINE ap_int(const ap_ufixed<_AP_W2, _AP_I2, _AP_Q2, _AP_O2, _AP_N2>& op) + : Base((ap_fixed_base<_AP_W2, _AP_I2, false, _AP_Q2, _AP_O2, _AP_N2>)op) { + } + + template + INLINE ap_int( + const volatile ap_fixed<_AP_W2, _AP_I2, _AP_Q2, _AP_O2, _AP_N2>& op) + : Base((ap_fixed_base<_AP_W2, _AP_I2, true, _AP_Q2, _AP_O2, _AP_N2>)op) {} + + template + INLINE ap_int( + const volatile ap_ufixed<_AP_W2, _AP_I2, _AP_Q2, _AP_O2, _AP_N2>& op) + : Base((ap_fixed_base<_AP_W2, _AP_I2, false, _AP_Q2, _AP_O2, _AP_N2>)op) { + } + + template + INLINE ap_int(const ap_int_base<_AP_W2, _AP_S2>& op) { + Base::V = op.V; + } + + template + INLINE ap_int( + const af_bit_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2>& op) + : Base(op) {} + + template + INLINE ap_int( + const af_range_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2>& op) + : Base(op) {} + + template + INLINE ap_int( + const ap_fixed_base<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2>& op) + : Base(op) {} + +#define CTOR(TYPE) \ + INLINE ap_int(TYPE val) { Base::V = val; } + CTOR(bool) + CTOR(char) + CTOR(signed char) + CTOR(unsigned char) + CTOR(short) + CTOR(unsigned short) + CTOR(int) + CTOR(unsigned int) + CTOR(long) + CTOR(unsigned long) + CTOR(ap_slong) + CTOR(ap_ulong) +#undef CTOR + ap_int(double val) : Base(val) {} + ap_int(float val) : Base(val) {} +#if _AP_ENABLE_HALF_ == 1 + ap_int(half val) : Base(val) {} +#endif + + // ap_int_base will guess radix if radix is not provided. + INLINE ap_int(const char* s) : Base(s) {} + + INLINE ap_int(const char* s, signed char rd) : Base(s, rd) {} + + // Assignment + /* ctor will be used when right is not of proper type. */ + + INLINE ap_int& operator=(const ap_int<_AP_W>& op2) { + Base::V = op2.V; + return *this; + } + + /* cannot bind volatile reference to non-volatile type. */ + INLINE ap_int& operator=(const volatile ap_int<_AP_W>& op2) { + Base::V = op2.V; + return *this; + } + + /* cannot return volatile *this. */ + INLINE void operator=(const ap_int<_AP_W>& op2) volatile { Base::V = op2.V; } + + INLINE void operator=(const volatile ap_int<_AP_W>& op2) volatile { + Base::V = op2.V; + } + +}; // struct ap_int. + +//--------------------------------------------------------------- + +/// Unsigned Arbitrary Precision Type. +template +struct ap_uint : ap_int_base<_AP_W, false> { + typedef ap_int_base<_AP_W, false> Base; + // Constructor + INLINE ap_uint() : Base() {} + + // Copy ctor + INLINE ap_uint(const ap_uint& op) { Base::V = op.V; } + + template + INLINE ap_uint(const ap_uint<_AP_W2>& op) { + Base::V = op.V; + } + + template + INLINE ap_uint(const ap_int<_AP_W2>& op) { + Base::V = op.V; + } + + template + INLINE ap_uint(const volatile ap_uint<_AP_W2>& op) { + Base::V = op.V; + } + + template + INLINE ap_uint(const volatile ap_int<_AP_W2>& op) { + Base::V = op.V; + } + + template + INLINE ap_uint(const ap_range_ref<_AP_W2, _AP_S2>& ref) : Base(ref) {} + + template + INLINE ap_uint(const ap_bit_ref<_AP_W2, _AP_S2>& ref) : Base(ref) {} + + template + INLINE ap_uint(const ap_concat_ref<_AP_W2, _AP_T2, _AP_W3, _AP_T3>& ref) + : Base(ref) {} + + template + INLINE ap_uint(const ap_fixed<_AP_W2, _AP_I2, _AP_Q2, _AP_O2, _AP_N2>& op) + : Base((ap_fixed_base<_AP_W2, _AP_I2, true, _AP_Q2, _AP_O2, _AP_N2>)op) {} + + template + INLINE ap_uint(const ap_ufixed<_AP_W2, _AP_I2, _AP_Q2, _AP_O2, _AP_N2>& op) + : Base((ap_fixed_base<_AP_W2, _AP_I2, false, _AP_Q2, _AP_O2, _AP_N2>)op) { + } + + template + INLINE ap_uint( + const volatile ap_fixed<_AP_W2, _AP_I2, _AP_Q2, _AP_O2, _AP_N2>& op) + : Base((ap_fixed_base<_AP_W2, _AP_I2, true, _AP_Q2, _AP_O2, _AP_N2>)op) {} + + template + INLINE ap_uint( + const volatile ap_ufixed<_AP_W2, _AP_I2, _AP_Q2, _AP_O2, _AP_N2>& op) + : Base((ap_fixed_base<_AP_W2, _AP_I2, false, _AP_Q2, _AP_O2, _AP_N2>)op) { + } + + template + INLINE ap_uint(const ap_int_base<_AP_W2, _AP_S2>& op) { + Base::V = op.V; + } + + template + INLINE ap_uint( + const af_bit_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2>& op) + : Base(op) {} + + template + INLINE ap_uint( + const af_range_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2>& op) + : Base(op) {} + + template + INLINE ap_uint( + const ap_fixed_base<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2>& op) + : Base(op) {} + +#define CTOR(TYPE) \ + INLINE ap_uint(TYPE val) { Base::V = val; } + CTOR(bool) + CTOR(char) + CTOR(signed char) + CTOR(unsigned char) + CTOR(short) + CTOR(unsigned short) + CTOR(int) + CTOR(unsigned int) + CTOR(long) + CTOR(unsigned long) + CTOR(ap_slong) + CTOR(ap_ulong) +#undef CTOR + ap_uint(double val) : Base(val) {} + ap_uint(float val) : Base(val) {} +#if _AP_ENABLE_HALF_ == 1 + ap_uint(half val) : Base(val) {} +#endif + + // ap_int_base will guess radix if radix is not provided. + INLINE ap_uint(const char* s) : Base(s) {} + + INLINE ap_uint(const char* s, signed char rd) : Base(s, rd) {} + + // Assignment + /* XXX ctor will be used when right is not of proper type. */ + + INLINE ap_uint& operator=(const ap_uint<_AP_W>& op2) { + Base::V = op2.V; + return *this; + } + + /* cannot bind volatile reference to non-volatile type. */ + INLINE ap_uint& operator=(const volatile ap_uint<_AP_W>& op2) { + Base::V = op2.V; + return *this; + } + + /* cannot return volatile *this. */ + INLINE void operator=(const ap_uint<_AP_W>& op2) volatile { Base::V = op2.V; } + + INLINE void operator=(const volatile ap_uint<_AP_W>& op2) volatile { + Base::V = op2.V; + } + +}; // struct ap_uint. + +#define ap_bigint ap_int +#define ap_biguint ap_uint + +#if !defined(__SYNTHESIS__) && (defined(SYSTEMC_H) || defined(SYSTEMC_INCLUDED)) +// XXX sc_trace overload for ap_fixed is already included in +// "ap_sysc/ap_sc_extras.h", so do not define in synthesis. +template +INLINE void sc_trace(sc_core::sc_trace_file* tf, const ap_int<_AP_W>& op, + const std::string& name) { + if (tf) tf->trace(sc_dt::sc_lv<_AP_W>(op.to_string(2).c_str()), name); +} + +template +INLINE void sc_trace(sc_core::sc_trace_file* tf, const ap_uint<_AP_W>& op, + const std::string& name) { + if (tf) tf->trace(sc_dt::sc_lv<_AP_W>(op.to_string(2).c_str()), name); +} +#endif // System C sim + +#include + +#endif // ifndef __AP_INT_H__ else + +// FIXME user should include ap_fixed.h when using ap_fixed. +// to avoid circular inclusion, must check whether this is required by +// ap_fixed.h +#ifndef __AP_FIXED_H__ +#include +#endif + +// -*- cpp -*- diff --git a/TrigScint/include/TrigScint/ap_int_base.h b/TrigScint/include/TrigScint/ap_int_base.h new file mode 100644 index 000000000..795d20717 --- /dev/null +++ b/TrigScint/include/TrigScint/ap_int_base.h @@ -0,0 +1,1885 @@ +/* + * Copyright 2011-2019 Xilinx, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef __AP_INT_BASE_H__ +#define __AP_INT_BASE_H__ + +#ifndef __AP_INT_H__ +#error "Only ap_fixed.h and ap_int.h can be included directly in user code." +#endif + +#ifndef __cplusplus +#error "C++ is required to include this header file" +#else + +#include +#ifndef __SYNTHESIS__ +#if _AP_ENABLE_HALF_ == 1 +#include +#endif +#include +#include +#endif + +/* ---------------------------------------------------------------- + * ap_int_base: AutoPilot integer/Arbitrary precision integer. + * ---------------------------------------------------------------- + */ + +/* helper trait. Selecting the smallest C type that can hold the value, + * return 64 bit C type if not possible. + */ +template +struct retval; + +// at least 64 bit +template +struct retval<_AP_N, true> { + typedef ap_slong Type; +}; + +template +struct retval<_AP_N, false> { + typedef ap_ulong Type; +}; + +// at least 8 bit +template <> +struct retval<1, true> { + typedef signed char Type; +}; + +template <> +struct retval<1, false> { + typedef unsigned char Type; +}; + +// at least 16 bit +template <> +struct retval<2, true> { + typedef short Type; +}; + +template <> +struct retval<2, false> { + typedef unsigned short Type; +}; + +// at least 32 bit +template <> +struct retval<3, true> { + typedef long Type; +}; + +template <> +struct retval<3, false> { + typedef unsigned long Type; +}; + +template <> +struct retval<4, true> { + typedef long Type; +}; + +template <> +struct retval<4, false> { + typedef unsigned long Type; +}; + +// trait for letting base class to return derived class. +// Notice that derived class template is incomplete, and we cannot use +// the member of the derived class. +template +struct _ap_int_factory; +template +struct _ap_int_factory<_AP_W2,true> { typedef ap_int<_AP_W2> type; }; +template +struct _ap_int_factory<_AP_W2,false> { typedef ap_uint<_AP_W2> type; }; + +template +struct ap_int_base : public _AP_ROOT_TYPE<_AP_W, _AP_S> { + public: + typedef _AP_ROOT_TYPE<_AP_W, _AP_S> Base; + + /* ap_int_base<_AP_W, _AP_S, true> + * typedef typename retval<(_AP_W + 7) / 8, _AP_S>::Type RetType; + * + * ap_int_base<_AP_W, _AP_S, false> + * typedef typename retval<8, _AP_S>::Type RetType; + */ + typedef typename retval::Type RetType; + + static const int width = _AP_W; + + template + struct RType { + enum { + mult_w = _AP_W + _AP_W2, + mult_s = _AP_S || _AP_S2, + plus_w = + AP_MAX(_AP_W + (_AP_S2 && !_AP_S), _AP_W2 + (_AP_S && !_AP_S2)) + 1, + plus_s = _AP_S || _AP_S2, + minus_w = + AP_MAX(_AP_W + (_AP_S2 && !_AP_S), _AP_W2 + (_AP_S && !_AP_S2)) + 1, + minus_s = true, + div_w = _AP_W + _AP_S2, + div_s = _AP_S || _AP_S2, + mod_w = AP_MIN(_AP_W, _AP_W2 + (!_AP_S2 && _AP_S)), + mod_s = _AP_S, + logic_w = AP_MAX(_AP_W + (_AP_S2 && !_AP_S), _AP_W2 + (_AP_S && !_AP_S2)), + logic_s = _AP_S || _AP_S2 + }; + + + typedef ap_int_base mult_base; + typedef ap_int_base plus_base; + typedef ap_int_base minus_base; + typedef ap_int_base logic_base; + typedef ap_int_base div_base; + typedef ap_int_base mod_base; + typedef ap_int_base<_AP_W, _AP_S> arg1_base; + + typedef typename _ap_int_factory::type mult; + typedef typename _ap_int_factory::type plus; + typedef typename _ap_int_factory::type minus; + typedef typename _ap_int_factory::type logic; + typedef typename _ap_int_factory::type div; + typedef typename _ap_int_factory::type mod; + typedef typename _ap_int_factory<_AP_W, _AP_S>::type arg1; + typedef bool reduce; + }; + + /* Constructors. + * ---------------------------------------------------------------- + */ + /// default ctor + INLINE ap_int_base() { + /* + #ifdef __SC_COMPATIBLE__ + Base::V = 0; + #endif + */ + } + + /// copy ctor + template + INLINE ap_int_base(const ap_int_base<_AP_W2, _AP_S2>& op) { + Base::V = op.V; + } + + /// volatile copy ctor + template + INLINE ap_int_base(const volatile ap_int_base<_AP_W2, _AP_S2>& op) { + Base::V = op.V; + } + +// XXX C++11 feature. +// The explicit specifier specifies that a constructor or conversion function +// (since C++11) doesn't allow implicit conversions or copy-initialization. +// ap_int_base x = 1; +// ap_int_base foo() { return 1; } +// but allows +// ap_int_base x(1); +// ap_int_base y {1}; + +/// from all c types. +#define CTOR_FROM_INT(Type, Size, Signed) \ + INLINE ap_int_base(const Type op) { Base::V = op; } + + CTOR_FROM_INT(bool, 1, false) + CTOR_FROM_INT(char, 8, CHAR_IS_SIGNED) + CTOR_FROM_INT(signed char, 8, true) + CTOR_FROM_INT(unsigned char, 8, false) + CTOR_FROM_INT(short, _AP_SIZE_short, true) + CTOR_FROM_INT(unsigned short, _AP_SIZE_short, false) + CTOR_FROM_INT(int, _AP_SIZE_int, true) + CTOR_FROM_INT(unsigned int, _AP_SIZE_int, false) + CTOR_FROM_INT(long, _AP_SIZE_long, true) + CTOR_FROM_INT(unsigned long, _AP_SIZE_long, false) + CTOR_FROM_INT(ap_slong, _AP_SIZE_ap_slong, true) + CTOR_FROM_INT(ap_ulong, _AP_SIZE_ap_slong, false) +#undef CTOR_FROM_INT + +#if _AP_ENABLE_HALF_ == 1 + /// ctor from half. + // TODO optimize + INLINE ap_int_base(half op) { + ap_int_base<_AP_W, _AP_S> t((float)op); + Base::V = t.V; + } +#endif + + /// ctor from float. + INLINE ap_int_base(float op) { + const int BITS = FLOAT_MAN + FLOAT_EXP + 1; + ap_int_base reg; + reg.V = floatToRawBits(op); + bool is_neg = _AP_ROOT_op_get_bit(reg.V, BITS - 1); + + ap_int_base exp = 0; + exp.V = _AP_ROOT_op_get_range(reg.V, FLOAT_MAN, BITS - 2); + exp = exp - FLOAT_BIAS; + + ap_int_base man; + man.V = _AP_ROOT_op_get_range(reg.V, 0, FLOAT_MAN - 1); + // check for NaN + _AP_WARNING(exp == ((unsigned char)(FLOAT_BIAS + 1)) && man.V != 0, + "assign NaN to ap integer value"); + // set leading 1. + man.V = _AP_ROOT_op_set_bit(man.V, FLOAT_MAN, 1); + //if (is_neg) man = -man; + + if ((reg.V & 0x7ffffffful) == 0) { + Base::V = 0; + } else { + int sh_amt = FLOAT_MAN - exp.V; + if (sh_amt == 0) { + Base::V = man.V; + } else if (sh_amt > 0) { + if (sh_amt < FLOAT_MAN + 2) { + Base::V = man.V >> sh_amt; + } else { + if (is_neg) + Base::V = -1; + else + Base::V = 0; + } + } else { + sh_amt = -sh_amt; + if (sh_amt < _AP_W) { + Base::V = man.V; + Base::V <<= sh_amt; + } else { + Base::V = 0; + } + } + } + if (is_neg) *this = -(*this); + } + + /// ctor from double. + INLINE ap_int_base(double op) { + const int BITS = DOUBLE_MAN + DOUBLE_EXP + 1; + ap_int_base reg; + reg.V = doubleToRawBits(op); + bool is_neg = _AP_ROOT_op_get_bit(reg.V, BITS - 1); + + ap_int_base exp = 0; + exp.V = _AP_ROOT_op_get_range(reg.V, DOUBLE_MAN, BITS - 2); + exp = exp - DOUBLE_BIAS; + + ap_int_base man; + man.V = _AP_ROOT_op_get_range(reg.V, 0, DOUBLE_MAN - 1); + // check for NaN + _AP_WARNING(exp == ((unsigned char)(DOUBLE_BIAS + 1)) && man.V != 0, + "assign NaN to ap integer value"); + // set leading 1. + man.V = _AP_ROOT_op_set_bit(man.V, DOUBLE_MAN, 1); + //if (is_neg) man = -man; + + if ((reg.V & 0x7fffffffffffffffull) == 0) { + Base::V = 0; + } else { + int sh_amt = DOUBLE_MAN - exp.V; + if (sh_amt == 0) { + Base::V = man.V; + } else if (sh_amt > 0) { + if (sh_amt < DOUBLE_MAN + 2) { + Base::V = man.V >> sh_amt; + } else { + if (is_neg) + Base::V = -1; + else + Base::V = 0; + } + } else { + sh_amt = -sh_amt; + if (sh_amt < _AP_W) { + Base::V = man.V; + Base::V <<= sh_amt; + } else { + Base::V = 0; + } + } + } + if (is_neg) *this = -(*this); + } + + /// from higer rank type. + template + INLINE ap_int_base( + const ap_fixed_base<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2>& op) { + Base::V = op.to_ap_int_base().V; + } + + template + INLINE ap_int_base(const ap_range_ref<_AP_W2, _AP_S2>& ref) { + Base::V = (ref.get()).V; + } + + template + INLINE ap_int_base(const ap_bit_ref<_AP_W2, _AP_S2>& ref) { + Base::V = ref.operator bool(); + } + + template + INLINE ap_int_base(const ap_concat_ref<_AP_W2, _AP_T2, _AP_W3, _AP_T3>& ref) { + const ap_int_base::_AP_WR, + false> + tmp = ref.get(); + Base::V = tmp.V; + } + + /* radix has default value in set */ + +#ifndef __SYNTHESIS__ + INLINE ap_int_base(const char* s, signed char rd = 0) { + if (rd == 0) + rd = guess_radix(s); + unsigned int length = strlen(s); + Base::V.fromString(s, length, rd); + } +#else + // XXX __builtin_bit_from_string(...) requires const C string and radix. + INLINE ap_int_base(const char* s) { + typeof(Base::V) t; + _ssdm_string2bits((void*)(&t), (const char*)(s), 10, _AP_W, _AP_S, + AP_TRN, AP_WRAP, 0, _AP_C99); + Base::V = t; + } + INLINE ap_int_base(const char* s, signed char rd) { + typeof(Base::V) t; + _ssdm_string2bits((void*)(&t), (const char*)(s), rd, _AP_W, _AP_S, + AP_TRN, AP_WRAP, 0, _AP_C99); + Base::V = t; + } +#endif + + template + INLINE ap_int_base( + const af_range_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2>& val) { + Base::V = (val.get()).V; + } + + template + INLINE ap_int_base( + const af_bit_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2>& val) { + Base::V = val.operator bool(); + } + + INLINE ap_int_base read() volatile { + /*AP_DEBUG(printf("call read %d\n", Base::V););*/ + ap_int_base ret; + ret.V = Base::V; + return ret; + } + + INLINE void write(const ap_int_base<_AP_W, _AP_S>& op2) volatile { + /*AP_DEBUG(printf("call write %d\n", op2.V););*/ + Base::V = op2.V; + } + + /* Another form of "write".*/ + template + INLINE void operator=( + const volatile ap_int_base<_AP_W2, _AP_S2>& op2) volatile { + Base::V = op2.V; + } + + INLINE void operator=( + const volatile ap_int_base<_AP_W, _AP_S>& op2) volatile { + Base::V = op2.V; + } + + template + INLINE void operator=(const ap_int_base<_AP_W2, _AP_S2>& op2) volatile { + Base::V = op2.V; + } + + INLINE void operator=(const ap_int_base<_AP_W, _AP_S>& op2) volatile { + Base::V = op2.V; + } + + template + INLINE ap_int_base& operator=( + const volatile ap_int_base<_AP_W2, _AP_S2>& op2) { + Base::V = op2.V; + return *this; + } + + template + INLINE ap_int_base& operator=(const ap_int_base<_AP_W2, _AP_S2>& op2) { + Base::V = op2.V; + return *this; + } + + INLINE ap_int_base& operator=(const volatile ap_int_base<_AP_W, _AP_S>& op2) { + Base::V = op2.V; + return *this; + } + + INLINE ap_int_base& operator=(const ap_int_base<_AP_W, _AP_S>& op2) { + Base::V = op2.V; + return *this; + } + + +#define ASSIGN_OP_FROM_INT(Type, Size, Signed) \ + INLINE ap_int_base& operator=(Type op) { \ + Base::V = op; \ + return *this; \ + } + + ASSIGN_OP_FROM_INT(bool, 1, false) + ASSIGN_OP_FROM_INT(char, 8, CHAR_IS_SIGNED) + ASSIGN_OP_FROM_INT(signed char, 8, true) + ASSIGN_OP_FROM_INT(unsigned char, 8, false) + ASSIGN_OP_FROM_INT(short, _AP_SIZE_short, true) + ASSIGN_OP_FROM_INT(unsigned short, _AP_SIZE_short, false) + ASSIGN_OP_FROM_INT(int, _AP_SIZE_int, true) + ASSIGN_OP_FROM_INT(unsigned int, _AP_SIZE_int, false) + ASSIGN_OP_FROM_INT(long, _AP_SIZE_long, true) + ASSIGN_OP_FROM_INT(unsigned long, _AP_SIZE_long, false) + ASSIGN_OP_FROM_INT(ap_slong, _AP_SIZE_ap_slong, true) + ASSIGN_OP_FROM_INT(ap_ulong, _AP_SIZE_ap_slong, false) + +#undef ASSIGN_OP_FROM_INT + + template + INLINE ap_int_base& operator=(const ap_bit_ref<_AP_W2, _AP_S2>& op2) { + Base::V = (bool)op2; + return *this; + } + + template + INLINE ap_int_base& operator=(const ap_range_ref<_AP_W2, _AP_S2>& op2) { + Base::V = (ap_int_base<_AP_W2, false>(op2)).V; + return *this; + } + + template + INLINE ap_int_base& operator=( + const ap_concat_ref<_AP_W2, _AP_T2, _AP_W3, _AP_T3>& op2) { + Base::V = op2.get().V; + return *this; + } + + template + INLINE ap_int_base& operator=( + const ap_fixed_base<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2>& op) { + Base::V = op.to_ap_int_base().V; + return *this; + } + + template + INLINE ap_int_base& operator=( + const af_bit_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2>& op) { + Base::V = (bool)op; + return *this; + } + + template + INLINE ap_int_base& operator=( + const af_range_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2>& op) { + Base::V = ((const ap_int_base<_AP_W2, false>)(op)).V; + return *this; + } + + // FIXME: UG902 has clearly required user to use to_int() to convert to built-in + // types, but this implicit conversion is relied on in hls_cordic.h and hls_rsr.h. + // For example: + // int d_exp = fps_x.exp - fps_y.exp; + INLINE operator RetType() const { return (RetType)(Base::V); } + + /* Explicit conversions to C types. + * ---------------------------------------------------------------- + */ + INLINE bool to_bool() const { return (bool)(Base::V); } + INLINE char to_char() const { return (char)(Base::V); } + INLINE signed char to_schar() const { return (signed char)(Base::V); } + INLINE unsigned char to_uchar() const { return (unsigned char)(Base::V); } + INLINE short to_short() const { return (short)(Base::V); } + INLINE unsigned short to_ushort() const { return (unsigned short)(Base::V); } + INLINE int to_int() const { return (int)(Base::V); } + INLINE unsigned to_uint() const { return (unsigned)(Base::V); } + INLINE long to_long() const { return (long)(Base::V); } + INLINE unsigned long to_ulong() const { return (unsigned long)(Base::V); } + INLINE ap_slong to_int64() const { return (ap_slong)(Base::V); } + INLINE ap_ulong to_uint64() const { return (ap_ulong)(Base::V); } + INLINE float to_float() const { return (float)(Base::V); } + INLINE double to_double() const { return (double)(Base::V); } + + // TODO decide if user-defined conversion should be provided. +#if 0 + INLINE operator char() const { return (char)(Base::V); } + INLINE operator signed char() const { return (signed char)(Base::V); } + INLINE operator unsigned char() const { return (unsigned char)(Base::V); } + INLINE operator short() const { return (short)(Base::V); } + INLINE operator unsigned short() const { return (unsigned short)(Base::V); } + INLINE operator int() const { return (int)(Base::V); } + INLINE operator unsigned int () const { return (unsigned)(Base::V); } + INLINE operator long () const { return (long)(Base::V); } + INLINE operator unsigned long () const { return (unsigned long)(Base::V); } + INLINE operator ap_slong () { return (ap_slong)(Base::V); } + INLINE operator ap_ulong () { return (ap_ulong)(Base::V); } +#endif + + /* Helper methods. + ---------------------------------------------------------------- + */ + /* we cannot call a non-volatile function on a volatile instance. + * but calling a volatile function is ok. + * XXX deleted non-volatile version. + */ + INLINE int length() const volatile { return _AP_W; } + + /*Return true if the value of ap_int_base instance is zero*/ + INLINE bool iszero() const { return Base::V == 0; } + + /*Return true if the value of ap_int_base instance is zero*/ + INLINE bool is_zero() const { return Base::V == 0; } + + /* x < 0 */ + INLINE bool sign() const { + if (_AP_S && + _AP_ROOT_op_get_bit(Base::V, _AP_W - 1)) + return true; + else + return false; + } + + /* x[i] = 0 */ + INLINE void clear(int i) { + AP_ASSERT(i >= 0 && i < _AP_W, "position out of range"); + Base::V = _AP_ROOT_op_set_bit(Base::V, i, 0); + } + + /* x[i] = !x[i]*/ + INLINE void invert(int i) { + AP_ASSERT(i >= 0 && i < _AP_W, "position out of range"); + bool val = _AP_ROOT_op_get_bit(Base::V, i); + if (val) + Base::V = _AP_ROOT_op_set_bit(Base::V, i, 0); + else + Base::V = _AP_ROOT_op_set_bit(Base::V, i, 1); + } + + INLINE bool test(int i) const { + AP_ASSERT(i >= 0 && i < _AP_W, "position out of range"); + return _AP_ROOT_op_get_bit(Base::V, i); + } + + // Get self. For ap_concat_ref expansion. + INLINE ap_int_base& get() { return *this; } + + // Set the ith bit into 1 + INLINE void set(int i) { + AP_ASSERT(i >= 0 && i < _AP_W, "position out of range"); + Base::V = _AP_ROOT_op_set_bit(Base::V, i, 1); + } + + // Set the ith bit into v + INLINE void set(int i, bool v) { + AP_ASSERT(i >= 0 && i < _AP_W, "position out of range"); + Base::V = _AP_ROOT_op_set_bit(Base::V, i, v); + } + + // This is used for sc_lv and sc_bv, which is implemented by sc_uint + // Rotate an ap_int_base object n places to the left + INLINE ap_int_base& lrotate(int n) { + AP_ASSERT(n >= 0 && n < _AP_W, "shift value out of range"); + // TODO unify this. +#ifdef __SYNTHESIS__ + typeof(Base::V) l_p = Base::V << n; + typeof(Base::V) r_p = Base::V >> (_AP_W - n); + Base::V = l_p | r_p; +#else + Base::V.lrotate(n); +#endif + return *this; + } + + // This is used for sc_lv and sc_bv, which is implemented by sc_uint + // Rotate an ap_int_base object n places to the right + INLINE ap_int_base& rrotate(int n) { + AP_ASSERT(n >= 0 && n < _AP_W, "shift value out of range"); + // TODO unify this. +#ifdef __SYNTHESIS__ + typeof(Base::V) l_p = Base::V << (_AP_W - n); + typeof(Base::V) r_p = Base::V >> n; + Base::V = l_p | r_p; +#else + Base::V.rrotate(n); +#endif + return *this; + } + + // Reverse the contents of ap_int_base instance. + // I.e. LSB becomes MSB and vise versa. + INLINE ap_int_base& reverse() { + Base::V = _AP_ROOT_op_get_range(Base::V, _AP_W - 1, 0); + return *this; + } + + // Set the ith bit into v + INLINE void set_bit(int i, bool v) { + Base::V = _AP_ROOT_op_set_bit(Base::V, i, v); + } + + // Get the value of ith bit + INLINE bool get_bit(int i) const { + return (bool)_AP_ROOT_op_get_bit(Base::V, i); + } + + // complements every bit + INLINE void b_not() { Base::V = ~Base::V; } + +#define OP_ASSIGN_AP(Sym) \ + template \ + INLINE ap_int_base& operator Sym(const ap_int_base<_AP_W2, _AP_S2>& op2) { \ + Base::V Sym op2.V; \ + return *this; \ + } + + /* Arithmetic assign. + * ---------------------------------------------------------------- + */ + OP_ASSIGN_AP(*=) + OP_ASSIGN_AP(+=) + OP_ASSIGN_AP(-=) + OP_ASSIGN_AP(/=) + OP_ASSIGN_AP(%=) +#undef OP_ASSIGN_AP + + /* Bitwise assign: and, or, xor. + * ---------------------------------------------------------------- + */ +#define OP_ASSIGN_AP_CHK(Sym) \ + template \ + INLINE ap_int_base& operator Sym(const ap_int_base<_AP_W2, _AP_S2>& op2) { \ + _AP_WARNING((_AP_W != _AP_W2), \ + "Bitsize mismatch for ap_[u]int" #Sym "ap_[u]int."); \ + Base::V Sym op2.V; \ + return *this; \ + } + OP_ASSIGN_AP_CHK(&=) + OP_ASSIGN_AP_CHK(|=) + OP_ASSIGN_AP_CHK(^=) +#undef OP_ASSIGN_AP_CHK + + /* Prefix increment, decrement. + * ---------------------------------------------------------------- + */ + INLINE ap_int_base& operator++() { + operator+=((ap_int_base<1, false>)1); + return *this; + } + INLINE ap_int_base& operator--() { + operator-=((ap_int_base<1, false>)1); + return *this; + } + + /* Postfix increment, decrement + * ---------------------------------------------------------------- + */ + INLINE const typename RType<_AP_W,_AP_S>::arg1 operator++(int) { + ap_int_base t = *this; + operator+=((ap_int_base<1, false>)1); + return t; + } + INLINE const typename RType<_AP_W,_AP_S>::arg1 operator--(int) { + ap_int_base t = *this; + operator-=((ap_int_base<1, false>)1); + return t; + } + + /* Unary arithmetic. + * ---------------------------------------------------------------- + */ + INLINE typename RType<_AP_W,_AP_S>::arg1 operator+() const { return *this; } + + // TODO used to be W>64 only... need check. + INLINE typename RType<1, false>::minus operator-() const { + return ap_int_base<1, false>(0) - *this; + } + + /* Not (!) + * ---------------------------------------------------------------- + */ + INLINE bool operator!() const { return Base::V == 0; } + + /* Bitwise (arithmetic) unary: complement + ---------------------------------------------------------------- + */ + // XXX different from Mentor's ac_int! + INLINE typename RType<_AP_W,_AP_S>::arg1 operator~() const { + ap_int_base<_AP_W, _AP_S> r; + r.V = ~Base::V; + return r; + } + + /* Shift (result constrained by left operand). + * ---------------------------------------------------------------- + */ + template + INLINE typename RType<_AP_W,_AP_S>::arg1 operator<<(const ap_int_base<_AP_W2, true>& op2) const { + bool isNeg = _AP_ROOT_op_get_bit(op2.V, _AP_W2 - 1); + ap_int_base<_AP_W2, false> sh = op2; + if (isNeg) { + sh = -op2; + return operator>>(sh); + } else + return operator<<(sh); + } + + template + INLINE typename RType<_AP_W,_AP_S>::arg1 operator<<(const ap_int_base<_AP_W2, false>& op2) const { + ap_int_base r; + r.V = Base::V << op2.to_uint(); + return r; + } + + template + INLINE typename RType<_AP_W,_AP_S>::arg1 operator>>(const ap_int_base<_AP_W2, true>& op2) const { + bool isNeg = _AP_ROOT_op_get_bit(op2.V, _AP_W2 - 1); + ap_int_base<_AP_W2, false> sh = op2; + if (isNeg) { + sh = -op2; + return operator<<(sh); + } + return operator>>(sh); + } + + template + INLINE typename RType<_AP_W,_AP_S>::arg1 operator>>(const ap_int_base<_AP_W2, false>& op2) const { + ap_int_base r; + r.V = Base::V >> op2.to_uint(); + return r; + } + + // FIXME we standalone operator>> for ap_int_base and ap_range_ref. +#if 0 + template + INLINE ap_int_base operator<<(const ap_range_ref<_AP_W2, _AP_S2>& op2) const { + return *this << (op2.operator ap_int_base<_AP_W2, false>()); + } + + template + INLINE ap_int_base operator>>(const ap_range_ref<_AP_W2, _AP_S2>& op2) const { + return *this >> (op2.operator ap_int_base<_AP_W2, false>()); + } +#endif + + /* Shift assign + * ---------------------------------------------------------------- + */ + template + INLINE ap_int_base& operator<<=(const ap_int_base<_AP_W2, true>& op2) { + bool isNeg = _AP_ROOT_op_get_bit(op2.V, _AP_W2 - 1); + ap_int_base<_AP_W2, false> sh = op2; + if (isNeg) { + sh = -op2; + return operator>>=(sh); + } else + return operator<<=(sh); + } + + template + INLINE ap_int_base& operator<<=(const ap_int_base<_AP_W2, false>& op2) { + Base::V <<= op2.to_uint(); + return *this; + } + + template + INLINE ap_int_base& operator>>=(const ap_int_base<_AP_W2, true>& op2) { + bool isNeg = _AP_ROOT_op_get_bit(op2.V, _AP_W2 - 1); + ap_int_base<_AP_W2, false> sh = op2; + if (isNeg) { + sh = -op2; + return operator<<=(sh); + } + return operator>>=(sh); + } + + template + INLINE ap_int_base& operator>>=(const ap_int_base<_AP_W2, false>& op2) { + Base::V >>= op2.to_uint(); + return *this; + } + + // FIXME we standalone operator>> for ap_int_base and ap_range_ref. +#if 0 + template + INLINE ap_int_base& operator<<=(const ap_range_ref<_AP_W2, _AP_S2>& op2) { + return *this <<= (op2.operator ap_int_base<_AP_W2, false>()); + } + template + INLINE ap_int_base& operator>>=(const ap_range_ref<_AP_W2, _AP_S2>& op2) { + return *this >>= (op2.operator ap_int_base<_AP_W2, false>()); + } +#endif + + /* Equality and Relational. + * ---------------------------------------------------------------- + */ + template + INLINE bool operator==(const ap_int_base<_AP_W2, _AP_S2>& op2) const { + return Base::V == op2.V; + } + template + INLINE bool operator!=(const ap_int_base<_AP_W2, _AP_S2>& op2) const { + return !(Base::V == op2.V); + } + template + INLINE bool operator<(const ap_int_base<_AP_W2, _AP_S2>& op2) const { + return Base::V < op2.V; + } + template + INLINE bool operator>=(const ap_int_base<_AP_W2, _AP_S2>& op2) const { + return Base::V >= op2.V; + } + template + INLINE bool operator>(const ap_int_base<_AP_W2, _AP_S2>& op2) const { + return Base::V > op2.V; + } + template + INLINE bool operator<=(const ap_int_base<_AP_W2, _AP_S2>& op2) const { + return Base::V <= op2.V; + } + + /* Bit and Part Select + * ---------------------------------------------------------------- + */ + INLINE ap_range_ref<_AP_W, _AP_S> range(int Hi, int Lo) { + _AP_ERROR(Hi >= _AP_W, "Hi(%d)out of bound(%d) in range()", Hi, _AP_W); + _AP_ERROR(Lo >= _AP_W, "Lo(%d)out of bound(%d) in range()", Lo, _AP_W); + return ap_range_ref<_AP_W, _AP_S>(this, Hi, Lo); + } + + // This is a must to strip constness to produce reference type. + INLINE ap_range_ref<_AP_W, _AP_S> range(int Hi, int Lo) const { + _AP_ERROR(Hi >= _AP_W, "Hi(%d)out of bound(%d) in range()", Hi, _AP_W); + _AP_ERROR(Lo >= _AP_W, "Lo(%d)out of bound(%d) in range()", Lo, _AP_W); + return ap_range_ref<_AP_W, _AP_S>(const_cast(this), Hi, Lo); + } + + template + INLINE ap_range_ref<_AP_W, _AP_S> range( + const ap_int_base<_AP_W2, _AP_S2>& HiIdx, + const ap_int_base<_AP_W3, _AP_S3>& LoIdx) { + int Hi = HiIdx.to_int(); + int Lo = LoIdx.to_int(); + return this->range(Hi, Lo); + } + + template + INLINE ap_range_ref<_AP_W, _AP_S> range( + const ap_int_base<_AP_W2, _AP_S2>& HiIdx, + const ap_int_base<_AP_W3, _AP_S3>& LoIdx) const { + int Hi = HiIdx.to_int(); + int Lo = LoIdx.to_int(); + return this->range(Hi, Lo); + } + + INLINE ap_range_ref<_AP_W, _AP_S> range() { + return this->range(_AP_W - 1, 0); + } + + INLINE ap_range_ref<_AP_W, _AP_S> range() const { + return this->range(_AP_W - 1, 0); + } + + INLINE ap_range_ref<_AP_W, _AP_S> operator()(int Hi, int Lo) { + return this->range(Hi, Lo); + } + + INLINE ap_range_ref<_AP_W, _AP_S> operator()(int Hi, int Lo) const { + return this->range(Hi, Lo); + } + + template + INLINE ap_range_ref<_AP_W, _AP_S> operator()( + const ap_int_base<_AP_W2, _AP_S2>& HiIdx, + const ap_int_base<_AP_W3, _AP_S3>& LoIdx) { + int Hi = HiIdx.to_int(); + int Lo = LoIdx.to_int(); + return this->range(Hi, Lo); + } + + template + INLINE ap_range_ref<_AP_W, _AP_S> operator()( + const ap_int_base<_AP_W2, _AP_S2>& HiIdx, + const ap_int_base<_AP_W3, _AP_S3>& LoIdx) const { + int Hi = HiIdx.to_int(); + int Lo = LoIdx.to_int(); + return this->range(Hi, Lo); + } + +#if 0 + template + INLINE ap_int_base slice() const { + AP_ASSERT(Hi >= Lo && Hi < _AP_W && Lo < _AP_W, "Out of bounds in slice()"); + ap_int_base tmp ; + tmp.V = _AP_ROOT_op_get_range(Base::V, Lo, Hi); + return tmp; + } + + INLINE ap_bit_ref<_AP_W,_AP_S> operator [] ( unsigned int uindex) { + AP_ASSERT(uindex < _AP_W, "Attempting to read bit beyond MSB"); + ap_bit_ref<_AP_W,_AP_S> bvh( this, uindex ); + return bvh; + } +#endif + + INLINE ap_bit_ref<_AP_W, _AP_S> operator[](int index) { + AP_ASSERT(index >= 0, "Attempting to read bit with negative index"); + AP_ASSERT(index < _AP_W, "Attempting to read bit beyond MSB"); + ap_bit_ref<_AP_W, _AP_S> bvh(this, index); + return bvh; + } + + template + INLINE ap_bit_ref<_AP_W, _AP_S> operator[]( + const ap_int_base<_AP_W2, _AP_S2>& index) { + AP_ASSERT(index >= 0, "Attempting to read bit with negative index"); + AP_ASSERT(index < _AP_W, "Attempting to read bit beyond MSB"); + ap_bit_ref<_AP_W, _AP_S> bvh(this, index.to_int()); + return bvh; + } + + INLINE bool operator[](int index) const { + AP_ASSERT(index >= 0, "Attempting to read bit with negative index"); + AP_ASSERT(index < _AP_W, "Attempting to read bit beyond MSB"); + ap_bit_ref<_AP_W, _AP_S> br(this, index); + return br.to_bool(); + } + template + INLINE bool operator[](const ap_int_base<_AP_W2, _AP_S2>& index) const { + AP_ASSERT(index < _AP_W, "Attempting to read bit beyond MSB"); + ap_bit_ref<_AP_W, _AP_S> br(this, index.to_int()); + return br.to_bool(); + } + + INLINE ap_bit_ref<_AP_W, _AP_S> bit(int index) { + AP_ASSERT(index >= 0, "Attempting to read bit with negative index"); + AP_ASSERT(index < _AP_W, "Attempting to read bit beyond MSB"); + ap_bit_ref<_AP_W, _AP_S> bvh(this, index); + return bvh; + } + template + INLINE ap_bit_ref<_AP_W, _AP_S> bit( + const ap_int_base<_AP_W2, _AP_S2>& index) { + AP_ASSERT(index >= 0, "Attempting to read bit with negative index"); + AP_ASSERT(index < _AP_W, "Attempting to read bit beyond MSB"); + ap_bit_ref<_AP_W, _AP_S> bvh(this, index.to_int()); + return bvh; + } + + INLINE bool bit(int index) const { + AP_ASSERT(index >= 0, "Attempting to read bit with negative index"); + AP_ASSERT(index < _AP_W, "Attempting to read bit beyond MSB"); + ap_bit_ref<_AP_W, _AP_S> br(this, index); + return br.to_bool(); + } + + template + INLINE bool bit(const ap_int_base<_AP_W2, _AP_S2>& index) const { + return bit(index.to_int()); + } + +#if 0 + template + INLINE bool operator[](_AP_T index) const { + AP_ASSERT(index < _AP_W, "Attempting to read bit beyond MSB"); + ap_bit_ref<_AP_W,_AP_S> br = operator[](index); + return br.to_bool(); + } +#endif + + // Count the number of zeros from the most significant bit + // to the first one bit. + INLINE int countLeadingZeros() { +#ifdef __SYNTHESIS__ + if (_AP_W <= 32) { + ap_int_base<32, false> t(-1UL), x; + x.V = _AP_ROOT_op_get_range(this->V, _AP_W - 1, 0); // reverse + t.V = _AP_ROOT_op_set_range(t.V, 0, _AP_W - 1, x.V); + return __builtin_ctz(t.V); // count trailing zeros. + } else if (_AP_W <= 64) { + ap_int_base<64, false> t(-1ULL); + ap_int_base<64, false> x; + x.V = _AP_ROOT_op_get_range(this->V, _AP_W - 1, 0); // reverse + t.V = _AP_ROOT_op_set_range(t.V, 0, _AP_W - 1, x.V); + return __builtin_ctzll(t.V); // count trailing zeros. + } else { + enum { __N = (_AP_W + 63) / 64 }; + int NZeros = 0; + int i = 0; + bool hitNonZero = false; + for (i = 0; i < __N - 1; ++i) { + ap_int_base<64, false> t; + t.V = _AP_ROOT_op_get_range(this->V, _AP_W - i * 64 - 64, _AP_W - i * 64 - 1); + NZeros += hitNonZero ? 0 : __builtin_clzll(t.V); // count leading zeros. + hitNonZero |= (t.V != 0); + } + if (!hitNonZero) { + ap_int_base<64, false> t(-1ULL); + enum { REST = (_AP_W - 1) % 64 }; + ap_int_base<64, false> x; + x.V = _AP_ROOT_op_get_range(this->V, 0, REST); + t.V = _AP_ROOT_op_set_range(t.V, 63 - REST, 63, x.V); + NZeros += __builtin_clzll(t.V); + } + return NZeros; + } +#else + return (Base::V).countLeadingZeros(); +#endif + } // countLeadingZeros + + template + INLINE ap_concat_ref<_AP_W, ap_int_base, _AP_W2, ap_int_base<_AP_W2, _AP_S2> > + concat(const ap_int_base<_AP_W2, _AP_S2>& a2) const { + return ap_concat_ref<_AP_W, ap_int_base, _AP_W2, + ap_int_base<_AP_W2, _AP_S2> >( + const_cast&>(*this), + const_cast&>(a2)); + } + + template + INLINE ap_concat_ref<_AP_W, ap_int_base, _AP_W2, ap_int_base<_AP_W2, _AP_S2> > + concat(ap_int_base<_AP_W2, _AP_S2>& a2) { + return ap_concat_ref<_AP_W, ap_int_base, _AP_W2, + ap_int_base<_AP_W2, _AP_S2> >(*this, a2); + } + + template + INLINE + ap_concat_ref<_AP_W, ap_int_base, _AP_W2, ap_range_ref<_AP_W2, _AP_S2> > + operator,(const ap_range_ref<_AP_W2, _AP_S2> &a2) const { + return ap_concat_ref<_AP_W, ap_int_base, _AP_W2, + ap_range_ref<_AP_W2, _AP_S2> >( + const_cast&>(*this), + const_cast&>(a2)); + } + + template + INLINE + ap_concat_ref<_AP_W, ap_int_base, _AP_W2, ap_range_ref<_AP_W2, _AP_S2> > + operator,(ap_range_ref<_AP_W2, _AP_S2> &a2) { + return ap_concat_ref<_AP_W, ap_int_base, _AP_W2, + ap_range_ref<_AP_W2, _AP_S2> >(*this, a2); + } + + template + INLINE ap_concat_ref<_AP_W, ap_int_base, _AP_W2, ap_int_base<_AP_W2, _AP_S2> > + operator,(const ap_int_base<_AP_W2, _AP_S2> &a2) { + return ap_concat_ref<_AP_W, ap_int_base, _AP_W2, + ap_int_base<_AP_W2, _AP_S2> >( + *this, const_cast&>(a2)); + } + + template + INLINE ap_concat_ref<_AP_W, ap_int_base, _AP_W2, ap_int_base<_AP_W2, _AP_S2> > + operator,(ap_int_base<_AP_W2, _AP_S2> &a2) const { + return ap_concat_ref<_AP_W, ap_int_base, _AP_W2, + ap_int_base<_AP_W2, _AP_S2> >( + const_cast&>(*this), a2); + } + + template + INLINE ap_concat_ref<_AP_W, ap_int_base, _AP_W2, ap_int_base<_AP_W2, _AP_S2> > + operator,(const ap_int_base<_AP_W2, _AP_S2> &a2) const { + return ap_concat_ref<_AP_W, ap_int_base, _AP_W2, + ap_int_base<_AP_W2, _AP_S2> >( + const_cast&>(*this), + const_cast&>(a2)); + } + + template + INLINE ap_concat_ref<_AP_W, ap_int_base, _AP_W2, ap_int_base<_AP_W2, _AP_S2> > + operator,(ap_int_base<_AP_W2, _AP_S2> &a2) { + return ap_concat_ref<_AP_W, ap_int_base, _AP_W2, + ap_int_base<_AP_W2, _AP_S2> >(*this, a2); + } + + template + INLINE ap_concat_ref<_AP_W, ap_int_base, 1, ap_bit_ref<_AP_W2, _AP_S2> > + operator,(const ap_bit_ref<_AP_W2, _AP_S2> &a2) const { + return ap_concat_ref<_AP_W, ap_int_base, 1, ap_bit_ref<_AP_W2, _AP_S2> >( + const_cast&>(*this), + const_cast&>(a2)); + } + + template + INLINE ap_concat_ref<_AP_W, ap_int_base, 1, ap_bit_ref<_AP_W2, _AP_S2> > + operator,(ap_bit_ref<_AP_W2, _AP_S2> &a2) { + return ap_concat_ref<_AP_W, ap_int_base, 1, ap_bit_ref<_AP_W2, _AP_S2> >( + *this, a2); + } + + template + INLINE ap_concat_ref<_AP_W, ap_int_base, _AP_W2 + _AP_W3, + ap_concat_ref<_AP_W2, _AP_T2, _AP_W3, _AP_T3> > + operator,(const ap_concat_ref<_AP_W2, _AP_T2, _AP_W3, _AP_T3> &a2) { + return ap_concat_ref<_AP_W, ap_int_base, _AP_W2 + _AP_W3, + ap_concat_ref<_AP_W2, _AP_T2, _AP_W3, _AP_T3> >( + const_cast&>(*this), + const_cast&>(a2)); + } + + template + INLINE ap_concat_ref<_AP_W, ap_int_base, _AP_W2 + _AP_W3, + ap_concat_ref<_AP_W2, _AP_T2, _AP_W3, _AP_T3> > + operator,(ap_concat_ref<_AP_W2, _AP_T2, _AP_W3, _AP_T3> &a2) { + return ap_concat_ref<_AP_W, ap_int_base, _AP_W2 + _AP_W3, + ap_concat_ref<_AP_W2, _AP_T2, _AP_W3, _AP_T3> >(*this, + a2); + } + + template + INLINE ap_concat_ref< + _AP_W, ap_int_base, _AP_W2, + af_range_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2> > + operator,(const af_range_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2> + &a2) const { + return ap_concat_ref< + _AP_W, ap_int_base, _AP_W2, + af_range_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2> >( + const_cast&>(*this), + const_cast< + af_range_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2>&>(a2)); + } + + template + INLINE ap_concat_ref< + _AP_W, ap_int_base, _AP_W2, + af_range_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2> > + operator,(af_range_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2> &a2) { + return ap_concat_ref< + _AP_W, ap_int_base, _AP_W2, + af_range_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2> >(*this, + a2); + } + + template + INLINE + ap_concat_ref<_AP_W, ap_int_base, 1, + af_bit_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2> > + operator,(const af_bit_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2> + &a2) const { + return ap_concat_ref< + _AP_W, ap_int_base, 1, + af_bit_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2> >( + const_cast&>(*this), + const_cast&>( + a2)); + } + + template + INLINE + ap_concat_ref<_AP_W, ap_int_base, 1, + af_bit_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2> > + operator,( + af_bit_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2> &a2) { + return ap_concat_ref< + _AP_W, ap_int_base, 1, + af_bit_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2> >(*this, a2); + } + + template + INLINE ap_int_base operator&( + const ap_concat_ref<_AP_W2, _AP_T2, _AP_W3, _AP_T3>& a2) { + return *this & a2.get(); + } + + template + INLINE ap_int_base operator|( + const ap_concat_ref<_AP_W2, _AP_T2, _AP_W3, _AP_T3>& a2) { + return *this | a2.get(); + } + + template + INLINE ap_int_base operator^( + const ap_concat_ref<_AP_W2, _AP_T2, _AP_W3, _AP_T3>& a2) { + return *this ^ a2.get(); + } + + template + INLINE void set(const ap_int_base<_AP_W3, false>& val) { + Base::V = val.V; + } + + /* Reduce operations. + * ---------------------------------------------------------------- + */ + // XXX non-const version deleted. + INLINE bool and_reduce() const { return _AP_ROOT_op_reduce(and, Base::V); } + INLINE bool nand_reduce() const { return _AP_ROOT_op_reduce(nand, Base::V); } + INLINE bool or_reduce() const { return _AP_ROOT_op_reduce(or, Base::V); } + INLINE bool nor_reduce() const { return !(_AP_ROOT_op_reduce(or, Base::V)); } + INLINE bool xor_reduce() const { return _AP_ROOT_op_reduce (xor, Base::V); } + INLINE bool xnor_reduce() const { + return !(_AP_ROOT_op_reduce (xor, Base::V)); + } + + /* Output as a string. + * ---------------------------------------------------------------- + */ +#ifndef __SYNTHESIS__ + std::string to_string(signed char rd = 2, bool sign = _AP_S) const { + // XXX in autosim/autowrap.tcl "(${name}).to_string(2).c_str()" is used to + // initialize sc_lv, which seems incapable of handling format "-0b". + if (rd == 2) sign = false; + return (Base::V).to_string(rd, sign); + } +#else + INLINE char* to_string(signed char rd = 2, bool sign = _AP_S) const { + return 0; + } +#endif +}; // struct ap_int_base + +// XXX apcc cannot handle global std::ios_base::Init() brought in by +#ifndef AP_AUTOCC +#ifndef __SYNTHESIS__ +template +INLINE std::ostream& operator<<(std::ostream& os, + const ap_int_base<_AP_W, _AP_S>& x) { + std::ios_base::fmtflags ff = std::cout.flags(); + if (ff & std::cout.hex) { + os << x.to_string(16); // don't print sign + } else if (ff & std::cout.oct) { + os << x.to_string(8); // don't print sign + } else { + os << x.to_string(10); + } + return os; +} +#endif // ifndef __SYNTHESIS__ + +#ifndef __SYNTHESIS__ +template +INLINE std::istream& operator>>(std::istream& in, + ap_int_base<_AP_W, _AP_S>& op) { + std::string str; + in >> str; + const std::ios_base::fmtflags basefield = in.flags() & std::ios_base::basefield; + unsigned radix = (basefield == std::ios_base::dec) ? 0 : ( + (basefield == std::ios_base::oct) ? 8 : ( + (basefield == std::ios_base::hex) ? 16 : 0)); + op = ap_int_base<_AP_W, _AP_S>(str.c_str(), radix); + return in; +} +#endif // ifndef __SYNTHESIS__ +#endif // ifndef AP_AUTOCC + +/* Operators with another ap_int_base. + * ---------------------------------------------------------------- + */ +#define OP_BIN_AP(Sym, Rty) \ + template \ + INLINE \ + typename ap_int_base<_AP_W, _AP_S>::template RType<_AP_W2, _AP_S2>::Rty \ + operator Sym(const ap_int_base<_AP_W, _AP_S>& op, \ + const ap_int_base<_AP_W2, _AP_S2>& op2) { \ + typename ap_int_base<_AP_W, _AP_S>::template RType< \ + _AP_W2, _AP_S2>::Rty##_base lhs(op); \ + typename ap_int_base<_AP_W, _AP_S>::template RType< \ + _AP_W2, _AP_S2>::Rty##_base rhs(op2); \ + typename ap_int_base<_AP_W, _AP_S>::template RType< \ + _AP_W2, _AP_S2>::Rty##_base ret; \ + ret.V = lhs.V Sym rhs.V; \ + return ret; \ + } + +OP_BIN_AP(*, mult) +OP_BIN_AP(+, plus) +OP_BIN_AP(-, minus) +OP_BIN_AP(&, logic) +OP_BIN_AP(|, logic) +OP_BIN_AP(^, logic) + +#define OP_BIN_AP2(Sym, Rty) \ + template \ + INLINE \ + typename ap_int_base<_AP_W, _AP_S>::template RType<_AP_W2, _AP_S2>::Rty \ + operator Sym(const ap_int_base<_AP_W, _AP_S>& op, \ + const ap_int_base<_AP_W2, _AP_S2>& op2) { \ + typename ap_int_base<_AP_W, _AP_S>::template RType< \ + _AP_W2, _AP_S2>::Rty##_base ret; \ + ret.V = op.V Sym op2.V; \ + return ret; \ + } + +OP_BIN_AP2(/, div) +OP_BIN_AP2(%, mod) + +// shift operators are defined inside class. +// compound assignment operators are defined inside class. + +/* Operators with a pointer type. + * ---------------------------------------------------------------- + * char a[100]; + * char* ptr = a; + * ap_int<2> n = 3; + * char* ptr2 = ptr + n*2; + * avoid ambiguous errors. + */ +#define OP_BIN_WITH_PTR(BIN_OP) \ + template \ + INLINE PTR_TYPE* operator BIN_OP(PTR_TYPE* i_op, \ + const ap_int_base<_AP_W, _AP_S>& op) { \ + ap_slong op2 = op.to_int64(); /* Not all implementation */ \ + return i_op BIN_OP op2; \ + } \ + template \ + INLINE PTR_TYPE* operator BIN_OP(const ap_int_base<_AP_W, _AP_S>& op, \ + PTR_TYPE* i_op) { \ + ap_slong op2 = op.to_int64(); /* Not all implementation */ \ + return op2 BIN_OP i_op; \ + } + +OP_BIN_WITH_PTR(+) +OP_BIN_WITH_PTR(-) + +/* Operators with a native floating point types. + * ---------------------------------------------------------------- + */ +// float OP ap_int +// when ap_int's width > 64, then trunc ap_int to ap_int<64> +#define OP_BIN_WITH_FLOAT(BIN_OP, C_TYPE) \ + template \ + INLINE C_TYPE operator BIN_OP(C_TYPE i_op, \ + const ap_int_base<_AP_W, _AP_S>& op) { \ + typename ap_int_base<_AP_W, _AP_S>::RetType op2 = op; \ + return i_op BIN_OP op2; \ + } \ + template \ + INLINE C_TYPE operator BIN_OP(const ap_int_base<_AP_W, _AP_S>& op, \ + C_TYPE i_op) { \ + typename ap_int_base<_AP_W, _AP_S>::RetType op2 = op; \ + return op2 BIN_OP i_op; \ + } + +#define ALL_OP_WITH_FLOAT(C_TYPE) \ + OP_BIN_WITH_FLOAT(*, C_TYPE) \ + OP_BIN_WITH_FLOAT(/, C_TYPE) \ + OP_BIN_WITH_FLOAT(+, C_TYPE) \ + OP_BIN_WITH_FLOAT(-, C_TYPE) + +#if _AP_ENABLE_HALF_ == 1 +ALL_OP_WITH_FLOAT(half) +#endif +ALL_OP_WITH_FLOAT(float) +ALL_OP_WITH_FLOAT(double) + +// TODO no shift? + +/* Operators with a native integral types. + * ---------------------------------------------------------------- + */ +// arithmetic and bitwise operators. +#define OP_BIN_WITH_INT(BIN_OP, C_TYPE, _AP_W2, _AP_S2, RTYPE) \ + template \ + INLINE typename ap_int_base<_AP_W, _AP_S>::template RType<_AP_W2, \ + _AP_S2>::RTYPE \ + operator BIN_OP(C_TYPE i_op, const ap_int_base<_AP_W, _AP_S>& op) { \ + return ap_int_base<_AP_W2, _AP_S2>(i_op) BIN_OP(op); \ + } \ + template \ + INLINE typename ap_int_base<_AP_W, _AP_S>::template RType<_AP_W2, \ + _AP_S2>::RTYPE \ + operator BIN_OP(const ap_int_base<_AP_W, _AP_S>& op, C_TYPE i_op) { \ + return op BIN_OP ap_int_base<_AP_W2, _AP_S2>(i_op); \ + } + +#define ALL_OP_BIN_WITH_INT(C_TYPE, _AP_W2, _AP_S2) \ + OP_BIN_WITH_INT(*, C_TYPE, _AP_W2, _AP_S2, mult) \ + OP_BIN_WITH_INT(+, C_TYPE, _AP_W2, _AP_S2, plus) \ + OP_BIN_WITH_INT(-, C_TYPE, _AP_W2, _AP_S2, minus) \ + OP_BIN_WITH_INT(/, C_TYPE, _AP_W2, _AP_S2, div) \ + OP_BIN_WITH_INT(%, C_TYPE, _AP_W2, _AP_S2, mod) \ + OP_BIN_WITH_INT(&, C_TYPE, _AP_W2, _AP_S2, logic) \ + OP_BIN_WITH_INT(|, C_TYPE, _AP_W2, _AP_S2, logic) \ + OP_BIN_WITH_INT(^, C_TYPE, _AP_W2, _AP_S2, logic) + +ALL_OP_BIN_WITH_INT(bool, 1, false) +ALL_OP_BIN_WITH_INT(char, 8, CHAR_IS_SIGNED) +ALL_OP_BIN_WITH_INT(signed char, 8, true) +ALL_OP_BIN_WITH_INT(unsigned char, 8, false) +ALL_OP_BIN_WITH_INT(short, _AP_SIZE_short, true) +ALL_OP_BIN_WITH_INT(unsigned short, _AP_SIZE_short, false) +ALL_OP_BIN_WITH_INT(int, _AP_SIZE_int, true) +ALL_OP_BIN_WITH_INT(unsigned int, _AP_SIZE_int, false) +ALL_OP_BIN_WITH_INT(long, _AP_SIZE_long, true) +ALL_OP_BIN_WITH_INT(unsigned long, _AP_SIZE_long, false) +ALL_OP_BIN_WITH_INT(ap_slong, _AP_SIZE_ap_slong, true) +ALL_OP_BIN_WITH_INT(ap_ulong, _AP_SIZE_ap_slong, false) + +#undef OP_BIN_WITH_INT +#undef ALL_OP_BIN_WITH_INT + +// shift operators. +#define ALL_OP_SHIFT_WITH_INT(C_TYPE, _AP_W2, _AP_S2) \ + template \ + INLINE typename ap_int_base<_AP_W, _AP_S>::template RType<_AP_W,_AP_S>::arg1 operator<<( \ + const ap_int_base<_AP_W, _AP_S>& op, C_TYPE op2) { \ + ap_int_base<_AP_W, _AP_S> r; \ + if (_AP_S2) \ + r.V = op2 >= 0 ? (op.V << op2) : (op.V >> (-op2)); \ + else \ + r.V = op.V << op2; \ + return r; \ + } \ + template \ + INLINE typename ap_int_base<_AP_W, _AP_S>::template RType<_AP_W,_AP_S>::arg1 operator>>( \ + const ap_int_base<_AP_W, _AP_S>& op, C_TYPE op2) { \ + ap_int_base<_AP_W, _AP_S> r; \ + if (_AP_S2) \ + r.V = op2 >= 0 ? (op.V >> op2) : (op.V << (-op2)); \ + else \ + r.V = op.V >> op2; \ + return r; \ + } + +ALL_OP_SHIFT_WITH_INT(char, 8, CHAR_IS_SIGNED) +ALL_OP_SHIFT_WITH_INT(signed char, 8, true) +ALL_OP_SHIFT_WITH_INT(short, _AP_SIZE_short, true) +ALL_OP_SHIFT_WITH_INT(int, _AP_SIZE_int, true) +ALL_OP_SHIFT_WITH_INT(long, _AP_SIZE_long, true) +ALL_OP_SHIFT_WITH_INT(ap_slong, _AP_SIZE_ap_slong, true) + +#undef ALL_OP_SHIFT_WITH_INT + +#define ALL_OP_SHIFT_WITH_INT(C_TYPE, _AP_W2, _AP_S2) \ + template \ + INLINE typename ap_int_base<_AP_W, _AP_S>::template RType<_AP_W,_AP_S>::arg1 operator<<( \ + const ap_int_base<_AP_W, _AP_S>& op, C_TYPE op2) { \ + ap_int_base<_AP_W, _AP_S> r; \ + r.V = op.V << op2; \ + return r; \ + } \ + template \ + INLINE typename ap_int_base<_AP_W, _AP_S>::template RType<_AP_W,_AP_S>::arg1 operator>>( \ + const ap_int_base<_AP_W, _AP_S>& op, C_TYPE op2) { \ + ap_int_base<_AP_W, _AP_S> r; \ + r.V = op.V >> op2; \ + return r; \ + } +ALL_OP_SHIFT_WITH_INT(bool, 1, false) +ALL_OP_SHIFT_WITH_INT(unsigned char, 8, false) +ALL_OP_SHIFT_WITH_INT(unsigned short, _AP_SIZE_short, false) +ALL_OP_SHIFT_WITH_INT(unsigned int, _AP_SIZE_int, false) +ALL_OP_SHIFT_WITH_INT(unsigned long, _AP_SIZE_long, false) +ALL_OP_SHIFT_WITH_INT(ap_ulong, _AP_SIZE_ap_slong, false) + +#undef ALL_OP_SHIFT_WITH_INT + +// compound assign operators. +#define OP_ASSIGN_WITH_INT(ASSIGN_OP, C_TYPE, _AP_W2, _AP_S2) \ + template \ + INLINE ap_int_base<_AP_W, _AP_S>& operator ASSIGN_OP( \ + ap_int_base<_AP_W, _AP_S>& op, C_TYPE op2) { \ + return op ASSIGN_OP ap_int_base<_AP_W2, _AP_S2>(op2); \ + } + +// TODO int a; ap_int<16> b; a += b; + +#define ALL_OP_ASSIGN_WITH_INT(C_TYPE, _AP_W2, _AP_S2) \ + OP_ASSIGN_WITH_INT(+=, C_TYPE, _AP_W2, _AP_S2) \ + OP_ASSIGN_WITH_INT(-=, C_TYPE, _AP_W2, _AP_S2) \ + OP_ASSIGN_WITH_INT(*=, C_TYPE, _AP_W2, _AP_S2) \ + OP_ASSIGN_WITH_INT(/=, C_TYPE, _AP_W2, _AP_S2) \ + OP_ASSIGN_WITH_INT(%=, C_TYPE, _AP_W2, _AP_S2) \ + OP_ASSIGN_WITH_INT(&=, C_TYPE, _AP_W2, _AP_S2) \ + OP_ASSIGN_WITH_INT(|=, C_TYPE, _AP_W2, _AP_S2) \ + OP_ASSIGN_WITH_INT(^=, C_TYPE, _AP_W2, _AP_S2) \ + OP_ASSIGN_WITH_INT(>>=, C_TYPE, _AP_W2, _AP_S2) \ + OP_ASSIGN_WITH_INT(<<=, C_TYPE, _AP_W2, _AP_S2) + +ALL_OP_ASSIGN_WITH_INT(bool, 1, false) +ALL_OP_ASSIGN_WITH_INT(char, 8, CHAR_IS_SIGNED) +ALL_OP_ASSIGN_WITH_INT(signed char, 8, true) +ALL_OP_ASSIGN_WITH_INT(unsigned char, 8, false) +ALL_OP_ASSIGN_WITH_INT(short, _AP_SIZE_short, true) +ALL_OP_ASSIGN_WITH_INT(unsigned short, _AP_SIZE_short, false) +ALL_OP_ASSIGN_WITH_INT(int, _AP_SIZE_int, true) +ALL_OP_ASSIGN_WITH_INT(unsigned int, _AP_SIZE_int, false) +ALL_OP_ASSIGN_WITH_INT(long, _AP_SIZE_long, true) +ALL_OP_ASSIGN_WITH_INT(unsigned long, _AP_SIZE_long, false) +ALL_OP_ASSIGN_WITH_INT(ap_slong, _AP_SIZE_ap_slong, true) +ALL_OP_ASSIGN_WITH_INT(ap_ulong, _AP_SIZE_ap_slong, false) + +#undef OP_ASSIGN_WITH_INT +#undef ALL_OP_ASSIGN_WITH_INT + +// equality and relational operators. +#define OP_REL_WITH_INT(REL_OP, C_TYPE, _AP_W2, _AP_S2) \ + template \ + INLINE bool operator REL_OP(C_TYPE i_op, \ + const ap_int_base<_AP_W, _AP_S>& op) { \ + return ap_int_base<_AP_W2, _AP_S2>(i_op) REL_OP op; \ + } \ + template \ + INLINE bool operator REL_OP(const ap_int_base<_AP_W, _AP_S>& op, \ + C_TYPE op2) { \ + return op REL_OP ap_int_base<_AP_W2, _AP_S2>(op2); \ + } + +#define ALL_OP_REL_WITH_INT(C_TYPE, _AP_W2, _AP_S2) \ + OP_REL_WITH_INT(>, C_TYPE, _AP_W2, _AP_S2) \ + OP_REL_WITH_INT(<, C_TYPE, _AP_W2, _AP_S2) \ + OP_REL_WITH_INT(>=, C_TYPE, _AP_W2, _AP_S2) \ + OP_REL_WITH_INT(<=, C_TYPE, _AP_W2, _AP_S2) \ + OP_REL_WITH_INT(==, C_TYPE, _AP_W2, _AP_S2) \ + OP_REL_WITH_INT(!=, C_TYPE, _AP_W2, _AP_S2) + +ALL_OP_REL_WITH_INT(bool, 1, false) +ALL_OP_REL_WITH_INT(char, 8, CHAR_IS_SIGNED) +ALL_OP_REL_WITH_INT(signed char, 8, true) +ALL_OP_REL_WITH_INT(unsigned char, 8, false) +ALL_OP_REL_WITH_INT(short, _AP_SIZE_short, true) +ALL_OP_REL_WITH_INT(unsigned short, _AP_SIZE_short, false) +ALL_OP_REL_WITH_INT(int, _AP_SIZE_int, true) +ALL_OP_REL_WITH_INT(unsigned int, _AP_SIZE_int, false) +ALL_OP_REL_WITH_INT(long, _AP_SIZE_long, true) +ALL_OP_REL_WITH_INT(unsigned long, _AP_SIZE_long, false) +ALL_OP_REL_WITH_INT(ap_slong, _AP_SIZE_ap_slong, true) +ALL_OP_REL_WITH_INT(ap_ulong, _AP_SIZE_ap_slong, false) + +#undef OP_REL_WITH_INT +#undef ALL_OP_BIN_WITH_INT + +#define OP_REL_WITH_DOUBLE_OR_FLOAT(Sym) \ + template \ + INLINE bool operator Sym(const ap_int_base<_AP_W, _AP_S>& op1, \ + double op2) { \ + return op1.to_double() Sym op2 ; \ + } \ + template \ + INLINE bool operator Sym(double op1, \ + const ap_int_base<_AP_W, _AP_S>& op2) { \ + return op1 Sym op2.to_double() ; \ + } \ + template \ + INLINE bool operator Sym(const ap_int_base<_AP_W, _AP_S>& op1, \ + float op2) { \ + return op1.to_double() Sym op2 ; \ + } \ + template \ + INLINE bool operator Sym(float op1, \ + const ap_int_base<_AP_W, _AP_S>& op2) { \ + return op1 Sym op2.to_double() ; \ + } + OP_REL_WITH_DOUBLE_OR_FLOAT(>) + OP_REL_WITH_DOUBLE_OR_FLOAT(<) + OP_REL_WITH_DOUBLE_OR_FLOAT(>=) + OP_REL_WITH_DOUBLE_OR_FLOAT(<=) + OP_REL_WITH_DOUBLE_OR_FLOAT(==) + OP_REL_WITH_DOUBLE_OR_FLOAT(!=) + +#undef OP_REL_WITH_DOUBLE_OR_FLOAT + + +/* Operators with ap_bit_ref. + * ------------------------------------------------------------ + */ +// arithmetic, bitwise and shift operators. +#define OP_BIN_WITH_RANGE(BIN_OP, RTYPE) \ + template \ + INLINE typename ap_int_base<_AP_W1, _AP_S1>::template RType<_AP_W2, \ + _AP_S2>::RTYPE \ + operator BIN_OP(const ap_range_ref<_AP_W1, _AP_S1>& op1, \ + const ap_int_base<_AP_W2, _AP_S2>& op2) { \ + return ap_int_base<_AP_W1, false>(op1) BIN_OP op2; \ + } \ + template \ + INLINE typename ap_int_base<_AP_W1, _AP_S1>::template RType<_AP_W2, \ + _AP_S2>::RTYPE \ + operator BIN_OP(const ap_int_base<_AP_W1, _AP_S1>& op1, \ + const ap_range_ref<_AP_W2, _AP_S2>& op2) { \ + return op1 BIN_OP ap_int_base<_AP_W2, false>(op2); \ + } + +OP_BIN_WITH_RANGE(+, plus) +OP_BIN_WITH_RANGE(-, minus) +OP_BIN_WITH_RANGE(*, mult) +OP_BIN_WITH_RANGE(/, div) +OP_BIN_WITH_RANGE(%, mod) +OP_BIN_WITH_RANGE(&, logic) +OP_BIN_WITH_RANGE(|, logic) +OP_BIN_WITH_RANGE(^, logic) +OP_BIN_WITH_RANGE(>>, arg1) +OP_BIN_WITH_RANGE(<<, arg1) + +#undef OP_BIN_WITH_RANGE + +// compound assignment operators. +#define OP_ASSIGN_WITH_RANGE(ASSIGN_OP) \ + template \ + INLINE ap_int_base<_AP_W1, _AP_S1>& operator ASSIGN_OP( \ + ap_int_base<_AP_W1, _AP_S1>& op1, ap_range_ref<_AP_W2, _AP_S2>& op2) { \ + return op1 ASSIGN_OP ap_int_base<_AP_W2, false>(op2); \ + } \ + template \ + INLINE ap_range_ref<_AP_W1, _AP_S1>& operator ASSIGN_OP( \ + ap_range_ref<_AP_W1, _AP_S1>& op1, ap_int_base<_AP_W2, _AP_S2>& op2) { \ + ap_int_base<_AP_W1, false> tmp(op1); \ + tmp ASSIGN_OP op2; \ + op1 = tmp; \ + return op1; \ + } + +OP_ASSIGN_WITH_RANGE(+=) +OP_ASSIGN_WITH_RANGE(-=) +OP_ASSIGN_WITH_RANGE(*=) +OP_ASSIGN_WITH_RANGE(/=) +OP_ASSIGN_WITH_RANGE(%=) +OP_ASSIGN_WITH_RANGE(&=) +OP_ASSIGN_WITH_RANGE(|=) +OP_ASSIGN_WITH_RANGE(^=) +OP_ASSIGN_WITH_RANGE(>>=) +OP_ASSIGN_WITH_RANGE(<<=) + +#undef OP_ASSIGN_WITH_RANGE + +// equality and relational operators +#define OP_REL_WITH_RANGE(REL_OP) \ + template \ + INLINE bool operator REL_OP(const ap_range_ref<_AP_W1, _AP_S1>& op1, \ + const ap_int_base<_AP_W2, _AP_S2>& op2) { \ + return ap_int_base<_AP_W1, false>(op1).operator REL_OP(op2); \ + } \ + template \ + INLINE bool operator REL_OP(const ap_int_base<_AP_W1, _AP_S1>& op1, \ + const ap_range_ref<_AP_W2, _AP_S2>& op2) { \ + return op1.operator REL_OP(op2.operator ap_int_base<_AP_W2, false>()); \ + } + +OP_REL_WITH_RANGE(==) +OP_REL_WITH_RANGE(!=) +OP_REL_WITH_RANGE(>) +OP_REL_WITH_RANGE(>=) +OP_REL_WITH_RANGE(<) +OP_REL_WITH_RANGE(<=) + +#undef OP_REL_WITH_RANGE + +/* Operators with ap_bit_ref. + * ------------------------------------------------------------ + */ +// arithmetic, bitwise and shift operators. +#define OP_BIN_WITH_BIT(BIN_OP, RTYPE) \ + template \ + INLINE typename ap_int_base<_AP_W1, _AP_S1>::template RType<1, false>::RTYPE \ + operator BIN_OP(const ap_int_base<_AP_W1, _AP_S1>& op1, \ + const ap_bit_ref<_AP_W2, _AP_S2>& op2) { \ + return op1 BIN_OP ap_int_base<1, false>(op2); \ + } \ + template \ + INLINE typename ap_int_base<1, false>::template RType<_AP_W2, _AP_S2>::RTYPE \ + operator BIN_OP(const ap_bit_ref<_AP_W1, _AP_S1>& op1, \ + const ap_int_base<_AP_W2, _AP_S2>& op2) { \ + return ap_int_base<1, false>(op1) BIN_OP op2; \ + } + +OP_BIN_WITH_BIT(+, plus) +OP_BIN_WITH_BIT(-, minus) +OP_BIN_WITH_BIT(*, mult) +OP_BIN_WITH_BIT(/, div) +OP_BIN_WITH_BIT(%, mod) +OP_BIN_WITH_BIT(&, logic) +OP_BIN_WITH_BIT(|, logic) +OP_BIN_WITH_BIT(^, logic) +OP_BIN_WITH_BIT(>>, arg1) +OP_BIN_WITH_BIT(<<, arg1) + +#undef OP_BIN_WITH_BIT + +// compound assignment operators. +#define OP_ASSIGN_WITH_BIT(ASSIGN_OP) \ + template \ + INLINE ap_int_base<_AP_W1, _AP_S1>& operator ASSIGN_OP( \ + ap_int_base<_AP_W1, _AP_S1>& op1, ap_bit_ref<_AP_W2, _AP_S2>& op2) { \ + return op1 ASSIGN_OP ap_int_base<1, false>(op2); \ + } \ + template \ + INLINE ap_bit_ref<_AP_W1, _AP_S1>& operator ASSIGN_OP( \ + ap_bit_ref<_AP_W1, _AP_S1>& op1, ap_int_base<_AP_W2, _AP_S2>& op2) { \ + ap_int_base<1, false> tmp(op1); \ + tmp ASSIGN_OP op2; \ + op1 = tmp; \ + return op1; \ + } + +OP_ASSIGN_WITH_BIT(+=) +OP_ASSIGN_WITH_BIT(-=) +OP_ASSIGN_WITH_BIT(*=) +OP_ASSIGN_WITH_BIT(/=) +OP_ASSIGN_WITH_BIT(%=) +OP_ASSIGN_WITH_BIT(&=) +OP_ASSIGN_WITH_BIT(|=) +OP_ASSIGN_WITH_BIT(^=) +OP_ASSIGN_WITH_BIT(>>=) +OP_ASSIGN_WITH_BIT(<<=) + +#undef OP_ASSIGN_WITH_BIT + +// equality and relational operators. +#define OP_REL_WITH_BIT(REL_OP) \ + template \ + INLINE bool operator REL_OP(const ap_int_base<_AP_W1, _AP_S1>& op1, \ + const ap_bit_ref<_AP_W2, _AP_S2>& op2) { \ + return op1 REL_OP ap_int_base<1, false>(op2); \ + } \ + template \ + INLINE bool operator REL_OP(const ap_bit_ref<_AP_W1, _AP_S1>& op1, \ + const ap_int_base<_AP_W2, _AP_S2>& op2) { \ + return ap_int_base<1, false>(op1) REL_OP op2; \ + } + +OP_REL_WITH_BIT(==) +OP_REL_WITH_BIT(!=) +OP_REL_WITH_BIT(>) +OP_REL_WITH_BIT(>=) +OP_REL_WITH_BIT(<) +OP_REL_WITH_BIT(<=) + +#undef OP_REL_WITH_BIT + + +/* Operators with ap_concat_ref. + * ------------------------------------------------------------ + */ +// arithmetic, bitwise and shift operators. +// bitwise operators are defined in struct. +// TODO specify whether to define arithmetic and bitwise operators. +#if 0 +#define OP_BIN_WITH_CONCAT(BIN_OP, RTYPE) \ + template \ + INLINE typename ap_int_base<_AP_W3, _AP_S3>::template RType<_AP_W1 + _AP_W2, \ + false>::RTYPE \ + operator BIN_OP(const ap_int_base<_AP_W3, _AP_S3>& op1, \ + const ap_concat_ref<_AP_W1, _AP_T1, _AP_W2, _AP_T2>& op2) { \ + /* convert ap_concat_ref to ap_int_base */ \ + return op1 BIN_OP op2.get(); \ + } \ + template \ + INLINE typename ap_int_base<_AP_W1 + _AP_W2, \ + false>::template RType<_AP_W3, _AP_S3>::RTYPE \ + operator BIN_OP(const ap_concat_ref<_AP_W1, _AP_T1, _AP_W2, _AP_T2>& op1, \ + const ap_int_base<_AP_W3, _AP_S3>& op2) { \ + /* convert ap_concat_ref to ap_int_base */ \ + return op1.get() BIN_OP op2; \ + } + +OP_BIN_WITH_CONCAT(+, plus) +OP_BIN_WITH_CONCAT(-, minus) +OP_BIN_WITH_CONCAT(*, mult) +OP_BIN_WITH_CONCAT(/, div) +OP_BIN_WITH_CONCAT(%, mod) +OP_BIN_WITH_CONCAT(&, logic) +OP_BIN_WITH_CONCAT(|, logic) +OP_BIN_WITH_CONCAT(^, logic) +OP_BIN_WITH_CONCAT(>>, arg1) +OP_BIN_WITH_CONCAT(<<, arg1) + +#undef OP_BIN_WITH_CONCAT + +// compound assignment operators. +#define OP_ASSIGN_WITH_CONCAT(ASSIGN_OP) \ + template \ + INLINE typename ap_int_base<_AP_W3, _AP_S3>::template RType<_AP_W1 + _AP_W2, \ + false>::RTYPE \ + operator ASSIGN_OP( \ + const ap_int_base<_AP_W3, _AP_S3>& op1, \ + const ap_concat_ref<_AP_W1, _AP_T1, _AP_W2, _AP_T2>& op2) { \ + /* convert ap_concat_ref to ap_int_base */ \ + return op1 ASSIGN_OP op2.get(); \ + } \ + template \ + INLINE typename ap_int_base<_AP_W1 + _AP_W2, \ + false>::template RType<_AP_W3, _AP_S3>::RTYPE \ + operator ASSIGN_OP(const ap_concat_ref<_AP_W1, _AP_T1, _AP_W2, _AP_T2>& op1, \ + const ap_int_base<_AP_W3, _AP_S3>& op2) { \ + /* convert ap_concat_ref to ap_int_base */ \ + ap_int_base<_AP_W1 + _AP_W2, false> tmp = op1.get(); \ + tmp ASSIGN_OP op2; \ + op1 = tmp; \ + return op1; \ + } + +OP_ASSIGN_WITH_CONCAT(+=) +OP_ASSIGN_WITH_CONCAT(-=) +OP_ASSIGN_WITH_CONCAT(*=) +OP_ASSIGN_WITH_CONCAT(/=) +OP_ASSIGN_WITH_CONCAT(%=) +OP_ASSIGN_WITH_CONCAT(&=) +OP_ASSIGN_WITH_CONCAT(|=) +OP_ASSIGN_WITH_CONCAT(^=) +OP_ASSIGN_WITH_CONCAT(>>=) +OP_ASSIGN_WITH_CONCAT(<<=) + +#undef OP_ASSIGN_WITH_CONCAT +#endif + +// equality and relational operators. +#define OP_REL_WITH_CONCAT(REL_OP) \ + template \ + INLINE bool operator REL_OP( \ + const ap_int_base<_AP_W3, _AP_S3>& op1, \ + const ap_concat_ref<_AP_W1, _AP_T1, _AP_W2, _AP_T2>& op2) { \ + /* convert ap_concat_ref to ap_int_base */ \ + return op1 REL_OP op2.get(); \ + } \ + template \ + INLINE bool operator REL_OP( \ + const ap_concat_ref<_AP_W1, _AP_T1, _AP_W2, _AP_T2>& op1, \ + const ap_int_base<_AP_W3, _AP_S3>& op2) { \ + /* convert ap_concat_ref to ap_int_base */ \ + return op1.get() REL_OP op2; \ + } + +OP_REL_WITH_CONCAT(==) +OP_REL_WITH_CONCAT(!=) +OP_REL_WITH_CONCAT(>) +OP_REL_WITH_CONCAT(>=) +OP_REL_WITH_CONCAT(<) +OP_REL_WITH_CONCAT(<=) + +#undef OP_REL_WITH_CONCAT + +#endif // ifndef __cplusplus +#endif // ifndef __AP_INT_BASE_H__ + +// -*- cpp -*- diff --git a/TrigScint/include/TrigScint/ap_int_ref.h b/TrigScint/include/TrigScint/ap_int_ref.h new file mode 100644 index 000000000..421f09fda --- /dev/null +++ b/TrigScint/include/TrigScint/ap_int_ref.h @@ -0,0 +1,1346 @@ +/* + * Copyright 2011-2019 Xilinx, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef __AP_INT_REF_H__ +#define __AP_INT_REF_H__ + +#ifndef __AP_INT_H__ +#error "Only ap_fixed.h and ap_int.h can be included directly in user code." +#endif + +#ifndef __cplusplus +#error "C++ is required to include this header file" + +#else + +#ifndef __SYNTHESIS__ +#include +#endif + +/* Concatination reference. + ---------------------------------------------------------------- +*/ +template +struct ap_concat_ref { + enum { + _AP_WR = _AP_W1 + _AP_W2, + }; + + _AP_T1& mbv1; + _AP_T2& mbv2; + + INLINE ap_concat_ref(const ap_concat_ref<_AP_W1, _AP_T1, _AP_W2, _AP_T2>& ref) + : mbv1(ref.mbv1), mbv2(ref.mbv2) {} + + INLINE ap_concat_ref(_AP_T1& bv1, _AP_T2& bv2) : mbv1(bv1), mbv2(bv2) {} + + template + INLINE ap_concat_ref& operator=(const ap_int_base<_AP_W3, _AP_S3>& val) { + ap_int_base<_AP_W1 + _AP_W2, false> vval(val); + int W_ref1 = mbv1.length(); + int W_ref2 = mbv2.length(); + ap_int_base<_AP_W1, false> Part1; + Part1.V = _AP_ROOT_op_get_range(vval.V, W_ref2, W_ref1 + W_ref2 - 1); + mbv1.set(Part1); + ap_int_base<_AP_W2, false> Part2; + Part2.V = _AP_ROOT_op_get_range(vval.V, 0, W_ref2 - 1); + mbv2.set(Part2); + return *this; + } + + // assign op from hls supported C integral types. + // FIXME disabled to support legacy code directly assign from sc_signal + //template + //INLINE typename _ap_type::enable_if<_ap_type::is_integral::value, + // ap_concat_ref&>::type + //operator=(T val) { + // ap_int_base<_AP_W1 + _AP_W2, false> tmpVal(val); + // return operator=(tmpVal); + //} +#define ASSIGN_WITH_CTYPE(_Tp) \ + INLINE ap_concat_ref& operator=(_Tp val) { \ + ap_int_base<_AP_W1 + _AP_W2, false> tmpVal(val); \ + return operator=(tmpVal); \ + } + + ASSIGN_WITH_CTYPE(bool) + ASSIGN_WITH_CTYPE(char) + ASSIGN_WITH_CTYPE(signed char) + ASSIGN_WITH_CTYPE(unsigned char) + ASSIGN_WITH_CTYPE(short) + ASSIGN_WITH_CTYPE(unsigned short) + ASSIGN_WITH_CTYPE(int) + ASSIGN_WITH_CTYPE(unsigned int) + ASSIGN_WITH_CTYPE(long) + ASSIGN_WITH_CTYPE(unsigned long) + ASSIGN_WITH_CTYPE(ap_slong) + ASSIGN_WITH_CTYPE(ap_ulong) +#if _AP_ENABLE_HALF_ == 1 + ASSIGN_WITH_CTYPE(half) +#endif + ASSIGN_WITH_CTYPE(float) + ASSIGN_WITH_CTYPE(double) + +#undef ASSIGN_WITH_CTYPE + + // Be explicit to prevent it from being deleted, as field d_bv + // is of reference type. + INLINE ap_concat_ref& operator=( + const ap_concat_ref<_AP_W1, _AP_T1, _AP_W2, _AP_T2>& val) { + ap_int_base<_AP_W1 + _AP_W2, false> tmpVal(val); + return operator=(tmpVal); + } + + template + INLINE ap_concat_ref& operator=( + const ap_concat_ref<_AP_W3, _AP_T3, _AP_W4, _AP_T4>& val) { + ap_int_base<_AP_W1 + _AP_W2, false> tmpVal(val); + return operator=(tmpVal); + } + + template + INLINE ap_concat_ref& operator=(const ap_bit_ref<_AP_W3, _AP_S3>& val) { + ap_int_base<_AP_W1 + _AP_W2, false> tmpVal(val); + return operator=(tmpVal); + } + template + INLINE ap_concat_ref& operator=(const ap_range_ref<_AP_W3, _AP_S3>& val) { + ap_int_base<_AP_W1 + _AP_W2, false> tmpVal(val); + return operator=(tmpVal); + } + + template + INLINE ap_concat_ref& operator=( + const af_range_ref<_AP_W3, _AP_I3, _AP_S3, _AP_Q3, _AP_O3, _AP_N3>& val) { + return operator=((const ap_int_base<_AP_W3, false>)(val)); + } + + template + INLINE ap_concat_ref& operator=( + const ap_fixed_base<_AP_W3, _AP_I3, _AP_S3, _AP_Q3, _AP_O3, _AP_N3>& + val) { + return operator=(val.to_ap_int_base()); + } + + template + INLINE ap_concat_ref& operator=( + const af_bit_ref<_AP_W3, _AP_I3, _AP_S3, _AP_Q3, _AP_O3, _AP_N3>& val) { + return operator=((ap_ulong)(bool)(val)); + } + + INLINE operator ap_int_base<_AP_WR, false>() const { return get(); } + + INLINE operator ap_ulong() const { return get().to_uint64(); } + + template + INLINE ap_concat_ref<_AP_WR, ap_concat_ref, _AP_W3, + ap_range_ref<_AP_W3, _AP_S3> > + operator,(const ap_range_ref<_AP_W3, _AP_S3> &a2) { + return ap_concat_ref<_AP_WR, ap_concat_ref, _AP_W3, + ap_range_ref<_AP_W3, _AP_S3> >( + *this, const_cast&>(a2)); + } + + template + INLINE + ap_concat_ref<_AP_WR, ap_concat_ref, _AP_W3, ap_int_base<_AP_W3, _AP_S3> > + operator,(ap_int_base<_AP_W3, _AP_S3> &a2) { + return ap_concat_ref<_AP_WR, ap_concat_ref, _AP_W3, + ap_int_base<_AP_W3, _AP_S3> >(*this, a2); + } + + template + INLINE + ap_concat_ref<_AP_WR, ap_concat_ref, _AP_W3, ap_int_base<_AP_W3, _AP_S3> > + operator,(volatile ap_int_base<_AP_W3, _AP_S3> &a2) { + return ap_concat_ref<_AP_WR, ap_concat_ref, _AP_W3, + ap_int_base<_AP_W3, _AP_S3> >( + *this, const_cast&>(a2)); + } + + template + INLINE + ap_concat_ref<_AP_WR, ap_concat_ref, _AP_W3, ap_int_base<_AP_W3, _AP_S3> > + operator,(const ap_int_base<_AP_W3, _AP_S3> &a2) { + return ap_concat_ref<_AP_WR, ap_concat_ref, _AP_W3, + ap_int_base<_AP_W3, _AP_S3> >( + *this, const_cast&>(a2)); + } + + template + INLINE + ap_concat_ref<_AP_WR, ap_concat_ref, _AP_W3, ap_int_base<_AP_W3, _AP_S3> > + operator,(const volatile ap_int_base<_AP_W3, _AP_S3> &a2) { + // FIXME op's life does not seem long enough + ap_int_base<_AP_W3, _AP_S3> op(a2); + return ap_concat_ref<_AP_WR, ap_concat_ref, _AP_W3, + ap_int_base<_AP_W3, _AP_S3> >( + *this, const_cast&>(op)); + } + + template + INLINE ap_concat_ref<_AP_WR, ap_concat_ref, 1, ap_bit_ref<_AP_W3, _AP_S3> > + operator,(const ap_bit_ref<_AP_W3, _AP_S3> &a2) { + return ap_concat_ref<_AP_WR, ap_concat_ref, 1, ap_bit_ref<_AP_W3, _AP_S3> >( + *this, const_cast&>(a2)); + } + + template + INLINE ap_concat_ref<_AP_WR, ap_concat_ref, _AP_W3 + _AP_W4, + ap_concat_ref<_AP_W3, _AP_T3, _AP_W4, _AP_T4> > + operator,(const ap_concat_ref<_AP_W3, _AP_T3, _AP_W4, _AP_T4> &a2) { + return ap_concat_ref<_AP_WR, ap_concat_ref, _AP_W3 + _AP_W4, + ap_concat_ref<_AP_W3, _AP_T3, _AP_W4, _AP_T4> >( + *this, const_cast&>(a2)); + } + + template + INLINE ap_concat_ref< + _AP_WR, ap_concat_ref, _AP_W3, + af_range_ref<_AP_W3, _AP_I3, _AP_S3, _AP_Q3, _AP_O3, _AP_N3> > + operator,( + const af_range_ref<_AP_W3, _AP_I3, _AP_S3, _AP_Q3, _AP_O3, _AP_N3> &a2) { + return ap_concat_ref< + _AP_WR, ap_concat_ref, _AP_W3, + af_range_ref<_AP_W3, _AP_I3, _AP_S3, _AP_Q3, _AP_O3, _AP_N3> >( + *this, + const_cast< + af_range_ref<_AP_W3, _AP_I3, _AP_S3, _AP_Q3, _AP_O3, _AP_N3>&>(a2)); + } + + template + INLINE + ap_concat_ref<_AP_WR, ap_concat_ref, 1, + af_bit_ref<_AP_W3, _AP_I3, _AP_S3, _AP_Q3, _AP_O3, _AP_N3> > + operator,(const af_bit_ref<_AP_W3, _AP_I3, _AP_S3, _AP_Q3, _AP_O3, _AP_N3> + &a2) { + return ap_concat_ref< + _AP_WR, ap_concat_ref, 1, + af_bit_ref<_AP_W3, _AP_I3, _AP_S3, _AP_Q3, _AP_O3, _AP_N3> >( + *this, + const_cast&>( + a2)); + } + + template + INLINE ap_int_base operator&( + const ap_int_base<_AP_W3, _AP_S3>& a2) { + return get() & a2; + } + + template + INLINE ap_int_base operator|( + const ap_int_base<_AP_W3, _AP_S3>& a2) { + return get() | a2; + } + + template + INLINE ap_int_base operator^( + const ap_int_base<_AP_W3, _AP_S3>& a2) { + return get() ^ a2; + } + +#if 0 + template + INLINE ap_int_base slice() { + ap_int_base<_AP_WR, false> bv = get(); + return bv.slice(); + } +#endif + + INLINE ap_int_base<_AP_WR, false> get() const { + ap_int_base<_AP_WR, false> tmpVal(0); + int W_ref1 = mbv1.length(); + int W_ref2 = mbv2.length(); + ap_int_base<_AP_W2, false> v2(mbv2); + ap_int_base<_AP_W1, false> v1(mbv1); + tmpVal.V = _AP_ROOT_op_set_range(tmpVal.V, 0, W_ref2 - 1, v2.V); + tmpVal.V = + _AP_ROOT_op_set_range(tmpVal.V, W_ref2, W_ref1 + W_ref2 - 1, v1.V); + return tmpVal; + } + + template + INLINE void set(const ap_int_base<_AP_W3, false>& val) { + ap_int_base<_AP_W1 + _AP_W2, false> vval(val); + int W_ref1 = mbv1.length(); + int W_ref2 = mbv2.length(); + ap_int_base<_AP_W1, false> tmpVal1; + tmpVal1.V = _AP_ROOT_op_get_range(vval.V, W_ref2, W_ref1 + W_ref2 - 1); + mbv1.set(tmpVal1); + ap_int_base<_AP_W2, false> tmpVal2; + tmpVal2.V = _AP_ROOT_op_get_range(vval.V, 0, W_ref2 - 1); + mbv2.set(tmpVal2); + } + + INLINE int length() const { return mbv1.length() + mbv2.length(); } +}; // struct ap_concat_ref + +/* Range (slice) reference. + ---------------------------------------------------------------- +*/ +template +struct ap_range_ref { + // struct ssdm_int or its sim model. + // TODO make it possible to reference to ap_fixed_base/ap_fixed/ap_ufixed + // and then we can retire af_range_ref. + typedef ap_int_base<_AP_W, _AP_S> ref_type; + ref_type& d_bv; + int l_index; + int h_index; + + public: + INLINE ap_range_ref(const ap_range_ref<_AP_W, _AP_S>& ref) + : d_bv(ref.d_bv), l_index(ref.l_index), h_index(ref.h_index) {} + + INLINE ap_range_ref(ref_type* bv, int h, int l) + : d_bv(*bv), l_index(l), h_index(h) {} + + INLINE ap_range_ref(const ref_type* bv, int h, int l) + : d_bv(*const_cast(bv)), l_index(l), h_index(h) {} + + INLINE operator ap_int_base<_AP_W, false>() const { + ap_int_base<_AP_W, false> ret; + ret.V = _AP_ROOT_op_get_range(d_bv.V, l_index, h_index); + return ret; + } + + INLINE operator ap_ulong() const { return to_uint64(); } + + /// @name assign operators + // @{ + + // FIXME disabled to work-around lagacy code assigning from sc_signal, + // which dependes on implicit type conversion. + // + // /// assign from hls supported C integral types. + // template + // INLINE typename _ap_type::enable_if<_ap_type::is_integral::value, + // ap_range_ref&>::type + // operator=(T val) { + // ap_int_base<_AP_W, false> tmp(val); + // d_bv.V = _AP_ROOT_op_set_range(d_bv.V, l_index, h_index, tmp.V); + // return *this; + // } +#define ASSIGN_WITH_CTYPE(_Tp) \ + INLINE ap_range_ref& operator=(_Tp val) { \ + ap_int_base<_AP_W, false> tmp(val); \ + d_bv.V = _AP_ROOT_op_set_range(d_bv.V, l_index, h_index, tmp.V); \ + return *this; \ + } + + ASSIGN_WITH_CTYPE(bool) + ASSIGN_WITH_CTYPE(char) + ASSIGN_WITH_CTYPE(signed char) + ASSIGN_WITH_CTYPE(unsigned char) + ASSIGN_WITH_CTYPE(short) + ASSIGN_WITH_CTYPE(unsigned short) + ASSIGN_WITH_CTYPE(int) + ASSIGN_WITH_CTYPE(unsigned int) + ASSIGN_WITH_CTYPE(long) + ASSIGN_WITH_CTYPE(unsigned long) + ASSIGN_WITH_CTYPE(ap_slong) + ASSIGN_WITH_CTYPE(ap_ulong) +#if _AP_ENABLE_HALF_ == 1 + ASSIGN_WITH_CTYPE(half) +#endif + ASSIGN_WITH_CTYPE(float) + ASSIGN_WITH_CTYPE(double) + +#undef ASSIGN_WITH_CTYPE + + /// assign using string. XXX crucial for cosim. + INLINE ap_range_ref& operator=(const char* val) { + const ap_int_base<_AP_W, false> tmp(val); // XXX figure out radix + d_bv.V = _AP_ROOT_op_set_range(d_bv.V, l_index, h_index, tmp.V); + return *this; + } + + /// assign from ap_int_base. + template + INLINE ap_range_ref& operator=(const ap_int_base<_AP_W2, _AP_S2>& val) { + ap_int_base<_AP_W, false> tmp(val); + d_bv.V = _AP_ROOT_op_set_range(d_bv.V, l_index, h_index, tmp.V); + return *this; + } + + /// copy assign operator + // XXX Be explicit to prevent it from being deleted, as field d_bv + // is of reference type. + INLINE ap_range_ref& operator=(const ap_range_ref& val) { + return operator=((const ap_int_base<_AP_W, false>)val); + } + + /// assign from range reference to ap_int_base. + template + INLINE ap_range_ref& operator=(const ap_range_ref<_AP_W2, _AP_S2>& val) { + return operator=((const ap_int_base<_AP_W2, false>)val); + } + + /// assign from bit reference to ap_int_base. + template + INLINE ap_range_ref& operator=(const ap_bit_ref<_AP_W2, _AP_S2>& val) { + return operator=((ap_ulong)(bool)(val)); + } + + /// assign from ap_fixed_base. + template + INLINE ap_range_ref& operator=( + const ap_fixed_base<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2>& + val) { + return operator=(val.to_ap_int_base()); + } + + /// assign from range reference to ap_fixed_base. + template + INLINE ap_range_ref& operator=( + const af_range_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2>& val) { + return operator=((const ap_int_base<_AP_W2, false>)val); + } + + /// assign from bit reference to ap_fixed_base. + template + INLINE ap_range_ref& operator=( + const af_bit_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2>& val) { + return operator=((ap_ulong)(bool)(val)); + } + + /// assign from compound reference. + template + INLINE ap_range_ref& operator=( + const ap_concat_ref<_AP_W2, _AP_T3, _AP_W3, _AP_T3>& val) { + return operator=((const ap_int_base<_AP_W2 + _AP_W3, false>)(val)); + } + // @} + + template + INLINE + ap_concat_ref<_AP_W, ap_range_ref, _AP_W2, ap_range_ref<_AP_W2, _AP_S2> > + operator,(const ap_range_ref<_AP_W2, _AP_S2> &a2) { + return ap_concat_ref<_AP_W, ap_range_ref, _AP_W2, + ap_range_ref<_AP_W2, _AP_S2> >( + *this, const_cast&>(a2)); + } + + template + INLINE + ap_concat_ref<_AP_W, ap_range_ref, _AP_W2, ap_int_base<_AP_W2, _AP_S2> > + operator,(ap_int_base<_AP_W2, _AP_S2> &a2) { + return ap_concat_ref<_AP_W, ap_range_ref, _AP_W2, + ap_int_base<_AP_W2, _AP_S2> >(*this, a2); + } + + INLINE + ap_concat_ref<_AP_W, ap_range_ref, _AP_W, ap_int_base<_AP_W, _AP_S> > + operator,(ap_int_base<_AP_W, _AP_S>& a2) { + return ap_concat_ref<_AP_W, ap_range_ref, _AP_W, + ap_int_base<_AP_W, _AP_S> >(*this, a2); + } + + template + INLINE + ap_concat_ref<_AP_W, ap_range_ref, _AP_W2, ap_int_base<_AP_W2, _AP_S2> > + operator,(volatile ap_int_base<_AP_W2, _AP_S2> &a2) { + return ap_concat_ref<_AP_W, ap_range_ref, _AP_W2, + ap_int_base<_AP_W2, _AP_S2> >( + *this, const_cast&>(a2)); + } + + template + INLINE + ap_concat_ref<_AP_W, ap_range_ref, _AP_W2, ap_int_base<_AP_W2, _AP_S2> > + operator,(const ap_int_base<_AP_W2, _AP_S2> &a2) { + return ap_concat_ref<_AP_W, ap_range_ref, _AP_W2, + ap_int_base<_AP_W2, _AP_S2> >( + *this, const_cast&>(a2)); + } + + template + INLINE + ap_concat_ref<_AP_W, ap_range_ref, _AP_W2, ap_int_base<_AP_W2, _AP_S2> > + operator,(const volatile ap_int_base<_AP_W2, _AP_S2> &a2) { + return ap_concat_ref<_AP_W, ap_range_ref, _AP_W2, + ap_int_base<_AP_W2, _AP_S2> >( + *this, const_cast&>(a2)); + } + + template + INLINE ap_concat_ref<_AP_W, ap_range_ref, 1, ap_bit_ref<_AP_W2, _AP_S2> > + operator,(const ap_bit_ref<_AP_W2, _AP_S2> &a2) { + return ap_concat_ref<_AP_W, ap_range_ref, 1, ap_bit_ref<_AP_W2, _AP_S2> >( + *this, const_cast&>(a2)); + } + + template + INLINE ap_concat_ref<_AP_W, ap_range_ref, _AP_W2 + _AP_W3, + ap_concat_ref<_AP_W2, _AP_T2, _AP_W3, _AP_T3> > + operator,(const ap_concat_ref<_AP_W2, _AP_T2, _AP_W3, _AP_T3> &a2) { + return ap_concat_ref<_AP_W, ap_range_ref, _AP_W2 + _AP_W3, + ap_concat_ref<_AP_W2, _AP_T2, _AP_W3, _AP_T3> >( + *this, const_cast&>(a2)); + } + + template + INLINE ap_concat_ref< + _AP_W, ap_range_ref, _AP_W2, + af_range_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2> > + operator,( + const af_range_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2> a2) { + return ap_concat_ref< + _AP_W, ap_range_ref, _AP_W2, + af_range_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2> >( + *this, + const_cast< + af_range_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2>&>(a2)); + } + + template + INLINE + ap_concat_ref<_AP_W, ap_range_ref, 1, + af_bit_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2> > + operator,(const af_bit_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2> + &a2) { + return ap_concat_ref< + _AP_W, ap_range_ref, 1, + af_bit_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2> >( + *this, + const_cast&>( + a2)); + } + + template + INLINE bool operator==(const ap_range_ref<_AP_W2, _AP_S2>& op2) { + ap_int_base<_AP_W, false> lop(*this); + ap_int_base<_AP_W2, false> hop(op2); + return lop == hop; + } + + template + INLINE bool operator!=(const ap_range_ref<_AP_W2, _AP_S2>& op2) { + return !(operator==(op2)); + } + + template + INLINE bool operator<(const ap_range_ref<_AP_W2, _AP_S2>& op2) { + ap_int_base<_AP_W, false> lop(*this); + ap_int_base<_AP_W2, false> hop(op2); + return lop < hop; + } + + template + INLINE bool operator<=(const ap_range_ref<_AP_W2, _AP_S2>& op2) { + ap_int_base<_AP_W, false> lop(*this); + ap_int_base<_AP_W2, false> hop(op2); + return lop <= hop; + } + + template + INLINE bool operator>(const ap_range_ref<_AP_W2, _AP_S2>& op2) { + return !(operator<=(op2)); + } + + template + INLINE bool operator>=(const ap_range_ref<_AP_W2, _AP_S2>& op2) { + return !(operator<(op2)); + } + + template + INLINE ap_range_ref<_AP_W, _AP_S>& operator|=( + const ap_range_ref<_AP_W2, _AP_S2>& op2) { + (this->d_bv).V |= (op2.d_bv).V; + return *this; + }; + + template + INLINE ap_range_ref<_AP_W, _AP_S>& operator|=( + const ap_int_base<_AP_W2, _AP_S2>& op2) { + (this->d_bv).V |= op2.V; + return *this; + }; + + template + INLINE ap_range_ref<_AP_W, _AP_S>& operator&=( + const ap_range_ref<_AP_W2, _AP_S2>& op2) { + (this->d_bv).V &= (op2.d_bv).V; + return *this; + }; + + template + INLINE ap_range_ref<_AP_W, _AP_S>& operator&=( + const ap_int_base<_AP_W2, _AP_S2>& op2) { + (this->d_bv).V &= op2.V; + return *this; + }; + + template + INLINE ap_range_ref<_AP_W, _AP_S>& operator^=( + const ap_range_ref<_AP_W2, _AP_S2>& op2) { + (this->d_bv).V ^= (op2.d_bv).V; + return *this; + }; + + template + INLINE ap_range_ref<_AP_W, _AP_S>& operator^=( + const ap_int_base<_AP_W2, _AP_S2>& op2) { + (this->d_bv).V ^= op2.V; + return *this; + }; + + INLINE ap_int_base<_AP_W, false> get() const { + ap_int_base<_AP_W, false> ret; + ret.V = _AP_ROOT_op_get_range(d_bv.V, l_index, h_index); + return ret; + } + + template + INLINE void set(const ap_int_base<_AP_W2, false>& val) { + d_bv.V = _AP_ROOT_op_set_range(d_bv.V, l_index, h_index, val.V); + } + + INLINE int length() const { + return h_index >= l_index ? h_index - l_index + 1 : l_index - h_index + 1; + } + + INLINE int to_int() const { + return (int)(_AP_ROOT_op_get_range(d_bv.V, l_index, h_index)); + } + + INLINE unsigned to_uint() const { + return (unsigned)(_AP_ROOT_op_get_range(d_bv.V, l_index, h_index)); + } + + INLINE long to_long() const { + return (long)(_AP_ROOT_op_get_range(d_bv.V, l_index, h_index)); + } + + INLINE unsigned long to_ulong() const { + return (unsigned long)(_AP_ROOT_op_get_range(d_bv.V, l_index, h_index)); + } + + INLINE ap_slong to_int64() const { + return (ap_slong)(_AP_ROOT_op_get_range(d_bv.V, l_index, h_index)); + } + + INLINE ap_ulong to_uint64() const { + return (ap_ulong)(_AP_ROOT_op_get_range(d_bv.V, l_index, h_index)); + } + + INLINE bool and_reduce() const { + bool ret = true; + bool reverse = l_index > h_index; + unsigned low = reverse ? h_index : l_index; + unsigned high = reverse ? l_index : h_index; + for (unsigned i = low; i != high; ++i) { +#ifdef __SYNTHESIS__ +#pragma HLS unroll +#endif + ret &= _AP_ROOT_op_get_bit(d_bv.V, i); + } + return ret; + } + + INLINE bool or_reduce() const { + bool ret = false; + bool reverse = l_index > h_index; + unsigned low = reverse ? h_index : l_index; + unsigned high = reverse ? l_index : h_index; + for (unsigned i = low; i != high; ++i) { +#ifdef __SYNTHESIS__ +#pragma HLS unroll +#endif + ret |= _AP_ROOT_op_get_bit(d_bv.V, i); + } + return ret; + } + + INLINE bool xor_reduce() const { + bool ret = false; + bool reverse = l_index > h_index; + unsigned low = reverse ? h_index : l_index; + unsigned high = reverse ? l_index : h_index; + for (unsigned i = low; i != high; ++i) { +#ifdef __SYNTHESIS__ +#pragma HLS unroll +#endif + ret ^= _AP_ROOT_op_get_bit(d_bv.V, i); + } + return ret; + } +#ifndef __SYNTHESIS__ + std::string to_string(signed char radix = 2) const { + ap_int_base<_AP_W, false> ret; + ret.V = _AP_ROOT_op_get_range(d_bv.V, l_index, h_index); + return ret.to_string(radix); + } +#else + // XXX HLS will delete this in synthesis + INLINE char* to_string(signed char radix = 2) const { + return 0; + } +#endif +}; // struct ap_range_ref + +// XXX apcc cannot handle global std::ios_base::Init() brought in by +#ifndef AP_AUTOCC +#ifndef __SYNTHESIS__ +template +INLINE std::ostream& operator<<(std::ostream& os, + const ap_range_ref<_AP_W, _AP_S>& x) { + std::ios_base::fmtflags ff = std::cout.flags(); + if (ff & std::cout.hex) { + os << x.to_string(16); // don't print sign + } else if (ff & std::cout.oct) { + os << x.to_string(8); // don't print sign + } else { + os << x.to_string(10); + } + return os; +} +#endif // ifndef __SYNTHESIS__ + +#ifndef __SYNTHESIS__ +template +INLINE std::istream& operator>>(std::istream& in, + ap_range_ref<_AP_W, _AP_S>& op) { + std::string str; + in >> str; + op = ap_int_base<_AP_W, _AP_S>(str.c_str()); + return in; +} +#endif // ifndef __SYNTHESIS__ +#endif // ifndef AP_AUTOCC + +/* Bit reference. + ---------------------------------------------------------------- +*/ +template +struct ap_bit_ref { + // struct ssdm_int or its sim model. + // TODO make it possible to reference to ap_fixed_base/ap_fixed/ap_ufixed + // and then we can retire af_bit_ref. + typedef ap_int_base<_AP_W, _AP_S> ref_type; + ref_type& d_bv; + int d_index; + + public: + // copy ctor + INLINE ap_bit_ref(const ap_bit_ref<_AP_W, _AP_S>& ref) + : d_bv(ref.d_bv), d_index(ref.d_index) {} + + INLINE ap_bit_ref(ref_type* bv, int index = 0) : d_bv(*bv), d_index(index) {} + + INLINE ap_bit_ref(const ref_type* bv, int index = 0) + : d_bv(*const_cast(bv)), d_index(index) {} + + INLINE operator bool() const { return _AP_ROOT_op_get_bit(d_bv.V, d_index); } + INLINE bool to_bool() const { return _AP_ROOT_op_get_bit(d_bv.V, d_index); } + + // assign op from hls supported C integral types. + // FIXME disabled to support sc_signal. + // NOTE this used to be unsigned long long. + //template + //INLINE typename _ap_type::enable_if<_ap_type::is_integral::value, + // ap_bit_ref&>::type + //operator=(T val) { + // d_bv.V = _AP_ROOT_op_set_bit(d_bv.V, d_index, val); + // return *this; + //} +#define ASSIGN_WITH_CTYPE(_Tp) \ + INLINE ap_bit_ref& operator=(_Tp val) { \ + d_bv.V = _AP_ROOT_op_set_bit(d_bv.V, d_index, val); \ + return *this; \ + } + + ASSIGN_WITH_CTYPE(bool) + ASSIGN_WITH_CTYPE(char) + ASSIGN_WITH_CTYPE(signed char) + ASSIGN_WITH_CTYPE(unsigned char) + ASSIGN_WITH_CTYPE(short) + ASSIGN_WITH_CTYPE(unsigned short) + ASSIGN_WITH_CTYPE(int) + ASSIGN_WITH_CTYPE(unsigned int) + ASSIGN_WITH_CTYPE(long) + ASSIGN_WITH_CTYPE(unsigned long) + ASSIGN_WITH_CTYPE(ap_slong) + ASSIGN_WITH_CTYPE(ap_ulong) + +#undef ASSIGN_WITH_CTYPE + +#define ASSIGN_WITH_CTYPE_FP(_Tp) \ + INLINE ap_bit_ref& operator=(_Tp val) { \ + bool tmp_val = val; \ + d_bv.V = _AP_ROOT_op_set_bit(d_bv.V, d_index,tmp_val); \ + return *this; \ + } + +#if _AP_ENABLE_HALF_ == 1 + ASSIGN_WITH_CTYPE_FP(half) +#endif + ASSIGN_WITH_CTYPE_FP(float) + ASSIGN_WITH_CTYPE_FP(double) + +#undef ASSIGN_WITH_CTYPE_FP + + + template + INLINE ap_bit_ref& operator=(const ap_int_base<_AP_W2, _AP_S2>& val) { + return operator=((ap_ulong)(val.V != 0)); + } + + template + INLINE ap_bit_ref& operator=(const ap_range_ref<_AP_W2, _AP_S2>& val) { + return operator=((ap_int_base<_AP_W2, false>)val); + } + + // Be explicit to prevent it from being deleted, as field d_bv + // is of reference type. + INLINE ap_bit_ref& operator=(const ap_bit_ref& val) { + return operator=((ap_ulong)(bool)val); + } + + template + INLINE ap_bit_ref& operator=(const ap_bit_ref<_AP_W2, _AP_S2>& val) { + return operator=((ap_ulong)(bool)val); + } + + template + INLINE ap_bit_ref& operator=( + const af_range_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2>& val) { + return operator=((const ap_int_base<_AP_W2, false>)val); + } + + template + INLINE ap_bit_ref& operator=( + const af_bit_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2>& val) { + return operator=((ap_ulong)(bool)val); + } + + template + INLINE ap_bit_ref& operator=( + const ap_concat_ref<_AP_W2, _AP_T3, _AP_W3, _AP_T3>& val) { + return operator=((const ap_int_base<_AP_W2 + _AP_W3, false>)val); + } + + template + INLINE ap_concat_ref<1, ap_bit_ref, _AP_W2, ap_int_base<_AP_W2, _AP_S2> > + operator,(ap_int_base<_AP_W2, _AP_S2> &a2) { + return ap_concat_ref<1, ap_bit_ref, _AP_W2, ap_int_base<_AP_W2, _AP_S2> >( + *this, a2); + } + + template + INLINE ap_concat_ref<1, ap_bit_ref, _AP_W2, ap_int_base<_AP_W2, _AP_S2> > + operator,(volatile ap_int_base<_AP_W2, _AP_S2> &a2) { + return ap_concat_ref<1, ap_bit_ref, _AP_W2, ap_int_base<_AP_W2, _AP_S2> >( + *this, const_cast&>(a2)); + } + + template + INLINE ap_concat_ref<1, ap_bit_ref, _AP_W2, ap_int_base<_AP_W2, _AP_S2> > + operator,(const ap_int_base<_AP_W2, _AP_S2> &a2) { + ap_int_base<_AP_W2, _AP_S2> op(a2); + return ap_concat_ref<1, ap_bit_ref, _AP_W2, ap_int_base<_AP_W2, _AP_S2> >( + *this, const_cast&>(op)); + } + + template + INLINE ap_concat_ref<1, ap_bit_ref, _AP_W2, ap_int_base<_AP_W2, _AP_S2> > + operator,(const volatile ap_int_base<_AP_W2, _AP_S2> &a2) { + ap_int_base<_AP_W2, _AP_S2> op(a2); + return ap_concat_ref<1, ap_bit_ref, _AP_W2, ap_int_base<_AP_W2, _AP_S2> >( + *this, const_cast&>(op)); + } + + template + INLINE ap_concat_ref<1, ap_bit_ref, _AP_W2, ap_range_ref<_AP_W2, _AP_S2> > + operator,(const ap_range_ref<_AP_W2, _AP_S2> &a2) { + return ap_concat_ref<1, ap_bit_ref, _AP_W2, ap_range_ref<_AP_W2, _AP_S2> >( + *this, const_cast&>(a2)); + } + + template + INLINE ap_concat_ref<1, ap_bit_ref, 1, ap_bit_ref<_AP_W2, _AP_S2> > operator,( + const ap_bit_ref<_AP_W2, _AP_S2> &a2) { + return ap_concat_ref<1, ap_bit_ref, 1, ap_bit_ref<_AP_W2, _AP_S2> >( + *this, const_cast&>(a2)); + } + + template + INLINE ap_concat_ref<1, ap_bit_ref, _AP_W2 + _AP_W3, + ap_concat_ref<_AP_W2, _AP_T2, _AP_W3, _AP_T3> > + operator,(const ap_concat_ref<_AP_W2, _AP_T2, _AP_W3, _AP_T3> &a2) { + return ap_concat_ref<1, ap_bit_ref, _AP_W2 + _AP_W3, + ap_concat_ref<_AP_W2, _AP_T2, _AP_W3, _AP_T3> >( + *this, const_cast&>(a2)); + } + + template + INLINE ap_concat_ref< + 1, ap_bit_ref, _AP_W2, + af_range_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2> > + operator,( + const af_range_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2> &a2) { + return ap_concat_ref< + 1, ap_bit_ref, _AP_W2, + af_range_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2> >( + *this, + const_cast< + af_range_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2>&>(a2)); + } + + template + INLINE ap_concat_ref<1, ap_bit_ref, 1, af_bit_ref<_AP_W2, _AP_I2, _AP_S2, + _AP_Q2, _AP_O2, _AP_N2> > + operator,( + const af_bit_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2> &a2) { + return ap_concat_ref<1, ap_bit_ref, 1, af_bit_ref<_AP_W2, _AP_I2, _AP_S2, + _AP_Q2, _AP_O2, _AP_N2> >( + *this, + const_cast&>( + a2)); + } + + template + INLINE bool operator==(const ap_bit_ref<_AP_W2, _AP_S2>& op) { + return get() == op.get(); + } + + template + INLINE bool operator!=(const ap_bit_ref<_AP_W2, _AP_S2>& op) { + return get() != op.get(); + } + + INLINE bool get() const { return _AP_ROOT_op_get_bit(d_bv.V, d_index); } + + INLINE bool get() { return _AP_ROOT_op_get_bit(d_bv.V, d_index); } + + template + INLINE void set(const ap_int_base<_AP_W3, false>& val) { + operator=(val); + } + + INLINE bool operator~() const { + bool bit = _AP_ROOT_op_get_bit(d_bv.V, d_index); + return bit ? false : true; + } + + INLINE int length() const { return 1; } + +#ifndef __SYNTHESIS__ + std::string to_string() const { return get() ? "1" : "0"; } +#else + // XXX HLS will delete this in synthesis + INLINE char* to_string() const { return 0; } +#endif +}; // struct ap_bit_ref + +/* ap_range_ref with int. + * ------------------------------------------------------------ + */ +// equality and relational operators. +#define REF_REL_OP_WITH_INT(REL_OP, C_TYPE, _AP_W2, _AP_S2) \ + template \ + INLINE bool operator REL_OP(const ap_range_ref<_AP_W, _AP_S>& op, \ + C_TYPE op2) { \ + return ap_int_base<_AP_W, false>(op) \ + REL_OP ap_int_base<_AP_W2, _AP_S2>(op2); \ + } \ + template \ + INLINE bool operator REL_OP(const ap_bit_ref<_AP_W, _AP_S>& op, \ + C_TYPE op2) { \ + return bool(op) REL_OP op2; \ + } \ + template \ + INLINE bool operator REL_OP(C_TYPE op2, \ + const ap_bit_ref<_AP_W, _AP_S>& op) { \ + return op2 REL_OP bool(op); \ + } \ + template \ + INLINE bool operator REL_OP( \ + const ap_concat_ref<_AP_W, _AP_T, _AP_W1, _AP_T1>& op, C_TYPE op2) { \ + return ap_int_base<_AP_W + _AP_W1, false>(op) \ + REL_OP ap_int_base<_AP_W2, _AP_S2>(op2); \ + } + +// Make the line shorter than 5000 chars +#define REF_REL_WITH_INT_1(C_TYPE, _AP_WI, _AP_SI) \ + REF_REL_OP_WITH_INT(>, C_TYPE, _AP_WI, _AP_SI) \ + REF_REL_OP_WITH_INT(<, C_TYPE, _AP_WI, _AP_SI) \ + REF_REL_OP_WITH_INT(>=, C_TYPE, _AP_WI, _AP_SI) \ + REF_REL_OP_WITH_INT(<=, C_TYPE, _AP_WI, _AP_SI) + +REF_REL_WITH_INT_1(bool, 1, false) +REF_REL_WITH_INT_1(char, 8, CHAR_IS_SIGNED) +REF_REL_WITH_INT_1(signed char, 8, true) +REF_REL_WITH_INT_1(unsigned char, 8, false) +REF_REL_WITH_INT_1(short, _AP_SIZE_short, true) +REF_REL_WITH_INT_1(unsigned short, _AP_SIZE_short, false) +REF_REL_WITH_INT_1(int, _AP_SIZE_int, true) +REF_REL_WITH_INT_1(unsigned int, _AP_SIZE_int, false) +REF_REL_WITH_INT_1(long, _AP_SIZE_long, true) +REF_REL_WITH_INT_1(unsigned long, _AP_SIZE_long, false) +REF_REL_WITH_INT_1(ap_slong, _AP_SIZE_ap_slong, true) +REF_REL_WITH_INT_1(ap_ulong, _AP_SIZE_ap_slong, false) + +// Make the line shorter than 5000 chars +#define REF_REL_WITH_INT_2(C_TYPE, _AP_WI, _AP_SI) \ + REF_REL_OP_WITH_INT(==, C_TYPE, _AP_WI, _AP_SI) \ + REF_REL_OP_WITH_INT(!=, C_TYPE, _AP_WI, _AP_SI) + +REF_REL_WITH_INT_2(bool, 1, false) +REF_REL_WITH_INT_2(char, 8, CHAR_IS_SIGNED) +REF_REL_WITH_INT_2(signed char, 8, true) +REF_REL_WITH_INT_2(unsigned char, 8, false) +REF_REL_WITH_INT_2(short, _AP_SIZE_short, true) +REF_REL_WITH_INT_2(unsigned short, _AP_SIZE_short, false) +REF_REL_WITH_INT_2(int, _AP_SIZE_int, true) +REF_REL_WITH_INT_2(unsigned int, _AP_SIZE_int, false) +REF_REL_WITH_INT_2(long, _AP_SIZE_long, true) +REF_REL_WITH_INT_2(unsigned long, _AP_SIZE_long, false) +REF_REL_WITH_INT_2(ap_slong, _AP_SIZE_ap_slong, true) +REF_REL_WITH_INT_2(ap_ulong, _AP_SIZE_ap_slong, false) + +#undef REF_REL_OP_WITH_INT +#undef REF_REL_WITH_INT_1 +#undef REF_REL_WITH_INT_2 + +#define REF_BIN_OP_WITH_INT(BIN_OP, RTYPE, C_TYPE, _AP_W2, _AP_S2) \ + template \ + INLINE typename ap_int_base<_AP_W, false>::template RType<_AP_W2, \ + _AP_S2>::RTYPE \ + operator BIN_OP(const ap_range_ref<_AP_W, _AP_S>& op, C_TYPE op2) { \ + return ap_int_base<_AP_W, false>(op) \ + BIN_OP ap_int_base<_AP_W2, _AP_S2>(op2); \ + } \ + template \ + INLINE typename ap_int_base<_AP_W2, _AP_S2>::template RType<_AP_W, \ + false>::RTYPE \ + operator BIN_OP(C_TYPE op2, const ap_range_ref<_AP_W, _AP_S>& op) { \ + return ap_int_base<_AP_W2, _AP_S2>(op2) \ + BIN_OP ap_int_base<_AP_W, false>(op); \ + } + +// arithmetic operators. +#define REF_BIN_OP_WITH_INT_ARITH(C_TYPE, _AP_W2, _AP_S2) \ + REF_BIN_OP_WITH_INT(+, plus, C_TYPE, (_AP_W2), (_AP_S2)) \ + REF_BIN_OP_WITH_INT(-, minus, C_TYPE, (_AP_W2), (_AP_S2)) \ + REF_BIN_OP_WITH_INT(*, mult, C_TYPE, (_AP_W2), (_AP_S2)) \ + REF_BIN_OP_WITH_INT(/, div, C_TYPE, (_AP_W2), (_AP_S2)) \ + REF_BIN_OP_WITH_INT(%, mod, C_TYPE, (_AP_W2), (_AP_S2)) + +REF_BIN_OP_WITH_INT_ARITH(bool, 1, false) +REF_BIN_OP_WITH_INT_ARITH(char, 8, CHAR_IS_SIGNED) +REF_BIN_OP_WITH_INT_ARITH(signed char, 8, true) +REF_BIN_OP_WITH_INT_ARITH(unsigned char, 8, false) +REF_BIN_OP_WITH_INT_ARITH(short, _AP_SIZE_short, true) +REF_BIN_OP_WITH_INT_ARITH(unsigned short, _AP_SIZE_short, false) +REF_BIN_OP_WITH_INT_ARITH(int, _AP_SIZE_int, true) +REF_BIN_OP_WITH_INT_ARITH(unsigned int, _AP_SIZE_int, false) +REF_BIN_OP_WITH_INT_ARITH(long, _AP_SIZE_long, true) +REF_BIN_OP_WITH_INT_ARITH(unsigned long, _AP_SIZE_long, false) +REF_BIN_OP_WITH_INT_ARITH(ap_slong, _AP_SIZE_ap_slong, true) +REF_BIN_OP_WITH_INT_ARITH(ap_ulong, _AP_SIZE_ap_slong, false) + +#undef REF_BIN_OP_WITH_INT_ARITH + +// bitwise and shift operators +#define REF_BIN_OP_WITH_INT_BITS(C_TYPE, _AP_W2, _AP_S2) \ + REF_BIN_OP_WITH_INT(&, logic, C_TYPE, (_AP_W2), (_AP_S2)) \ + REF_BIN_OP_WITH_INT(|, logic, C_TYPE, (_AP_W2), (_AP_S2)) \ + REF_BIN_OP_WITH_INT(^, logic, C_TYPE, (_AP_W2), (_AP_S2)) \ + REF_BIN_OP_WITH_INT(>>, arg1, C_TYPE, (_AP_W2), (_AP_S2)) \ + REF_BIN_OP_WITH_INT(<<, arg1, C_TYPE, (_AP_W2), (_AP_S2)) + +REF_BIN_OP_WITH_INT_BITS(bool, 1, false) +REF_BIN_OP_WITH_INT_BITS(char, 8, CHAR_IS_SIGNED) +REF_BIN_OP_WITH_INT_BITS(signed char, 8, true) +REF_BIN_OP_WITH_INT_BITS(unsigned char, 8, false) +REF_BIN_OP_WITH_INT_BITS(short, _AP_SIZE_short, true) +REF_BIN_OP_WITH_INT_BITS(unsigned short, _AP_SIZE_short, false) +REF_BIN_OP_WITH_INT_BITS(int, _AP_SIZE_int, true) +REF_BIN_OP_WITH_INT_BITS(unsigned int, _AP_SIZE_int, false) +REF_BIN_OP_WITH_INT_BITS(long, _AP_SIZE_long, true) +REF_BIN_OP_WITH_INT_BITS(unsigned long, _AP_SIZE_long, false) +REF_BIN_OP_WITH_INT_BITS(ap_slong, _AP_SIZE_ap_slong, true) +REF_BIN_OP_WITH_INT_BITS(ap_ulong, _AP_SIZE_ap_slong, false) + +#undef REF_BIN_OP_WITH_INT_BITS + +/* ap_range_ref with ap_range_ref + * ------------------------------------------------------------ + */ +#define REF_BIN_OP(BIN_OP, RTYPE) \ + template \ + INLINE \ + typename ap_int_base<_AP_W, false>::template RType<_AP_W2, false>::RTYPE \ + operator BIN_OP(const ap_range_ref<_AP_W, _AP_S>& lhs, \ + const ap_range_ref<_AP_W2, _AP_S2>& rhs) { \ + return (lhs.operator ap_int_base<_AP_W, false>())BIN_OP( \ + rhs.operator ap_int_base<_AP_W2, false>()); \ + } + +REF_BIN_OP(+, plus) +REF_BIN_OP(-, minus) +REF_BIN_OP(*, mult) +REF_BIN_OP(/, div) +REF_BIN_OP(%, mod) +REF_BIN_OP(&, logic) +REF_BIN_OP(|, logic) +REF_BIN_OP(^, logic) +REF_BIN_OP(>>, arg1) +REF_BIN_OP(<<, arg1) + +/* ap_concat_ref with ap_concat_ref. + * ------------------------------------------------------------ + */ + +//************************************************************************ +// Implement +// ap_int_base = ap_concat_ref OP ap_concat_ref +// for operators +, -, *, /, %, >>, <<, &, |, ^ +// Without these operators the operands are converted to int64 and +// larger results lose informations (higher order bits). +// +// operand OP +// / | +// left-concat right-concat +// / | / | +// +// +// _AP_LW1, _AP_LT1 (width and type of left-concat's left side) +// _AP_LW2, _AP_LT2 (width and type of left-concat's right side) +// Similarly for RHS of operand OP: _AP_RW1, AP_RW2, _AP_RT1, _AP_RT2 +// +// In Verilog 2001 result of concatenation is always unsigned even +// when both sides are signed. +//************************************************************************ + +#undef SYN_CONCAT_REF_BIN_OP + +#define SYN_CONCAT_REF_BIN_OP(BIN_OP, RTYPE) \ + template \ + INLINE typename ap_int_base<_AP_LW1 + _AP_LW2, false>::template RType< \ + _AP_RW1 + _AP_RW2, false>::RTYPE \ + operator BIN_OP( \ + const ap_concat_ref<_AP_LW1, _AP_LT1, _AP_LW2, _AP_LT2>& lhs, \ + const ap_concat_ref<_AP_RW1, _AP_RT1, _AP_RW2, _AP_RT2>& rhs) { \ + return lhs.get() BIN_OP rhs.get(); \ + } + +SYN_CONCAT_REF_BIN_OP(+, plus) +SYN_CONCAT_REF_BIN_OP(-, minus) +SYN_CONCAT_REF_BIN_OP(*, mult) +SYN_CONCAT_REF_BIN_OP(/, div) +SYN_CONCAT_REF_BIN_OP(%, mod) +SYN_CONCAT_REF_BIN_OP(&, logic) +SYN_CONCAT_REF_BIN_OP(|, logic) +SYN_CONCAT_REF_BIN_OP(^, logic) +SYN_CONCAT_REF_BIN_OP(>>, arg1) +SYN_CONCAT_REF_BIN_OP(<<, arg1) + +#undef SYN_CONCAT_REF_BIN_OP + +#define CONCAT_OP_WITH_INT(C_TYPE, _AP_WI, _AP_SI) \ + template \ + INLINE ap_int_base<_AP_W + _AP_WI, false> operator,( \ + const ap_int_base<_AP_W, _AP_S> &op1, C_TYPE op2) { \ + ap_int_base<_AP_WI + _AP_W, false> val(op2); \ + ap_int_base<_AP_WI + _AP_W, false> ret(op1); \ + ret <<= _AP_WI; \ + if (_AP_SI) { \ + val <<= _AP_W; \ + val >>= _AP_W; \ + } \ + ret |= val; \ + return ret; \ + } \ + template \ + INLINE ap_int_base<_AP_W + _AP_WI, false> operator,( \ + C_TYPE op1, const ap_int_base<_AP_W, _AP_S> &op2) { \ + ap_int_base<_AP_WI + _AP_W, false> val(op1); \ + ap_int_base<_AP_WI + _AP_W, false> ret(op2); \ + if (_AP_S) { \ + ret <<= _AP_WI; \ + ret >>= _AP_WI; \ + } \ + ret |= val << _AP_W; \ + return ret; \ + } \ + template \ + INLINE ap_int_base<_AP_W + _AP_WI, false> operator,( \ + const ap_range_ref<_AP_W, _AP_S> &op1, C_TYPE op2) { \ + ap_int_base<_AP_WI + _AP_W, false> val(op2); \ + ap_int_base<_AP_WI + _AP_W, false> ret(op1); \ + ret <<= _AP_WI; \ + if (_AP_SI) { \ + val <<= _AP_W; \ + val >>= _AP_W; \ + } \ + ret |= val; \ + return ret; \ + } \ + template \ + INLINE ap_int_base<_AP_W + _AP_WI, false> operator,( \ + C_TYPE op1, const ap_range_ref<_AP_W, _AP_S> &op2) { \ + ap_int_base<_AP_WI + _AP_W, false> val(op1); \ + ap_int_base<_AP_WI + _AP_W, false> ret(op2); \ + int len = op2.length(); \ + val <<= len; \ + ret |= val; \ + return ret; \ + } \ + template \ + INLINE ap_int_base<_AP_WI + 1, false> operator,( \ + const ap_bit_ref<_AP_W, _AP_S> &op1, C_TYPE op2) { \ + ap_int_base<_AP_WI + 1, false> val(op2); \ + val[_AP_WI] = op1; \ + return val; \ + } \ + template \ + INLINE ap_int_base<_AP_WI + 1, false> operator,( \ + C_TYPE op1, const ap_bit_ref<_AP_W, _AP_S> &op2) { \ + ap_int_base<_AP_WI + 1, false> val(op1); \ + val <<= 1; \ + val[0] = op2; \ + return val; \ + } \ + template \ + INLINE ap_int_base<_AP_W + _AP_W2 + _AP_WI, false> operator,( \ + const ap_concat_ref<_AP_W, _AP_T, _AP_W2, _AP_T2> &op1, C_TYPE op2) { \ + ap_int_base<_AP_WI + _AP_W + _AP_W2, _AP_SI> val(op2); \ + ap_int_base<_AP_WI + _AP_W + _AP_W2, _AP_SI> ret(op1); \ + if (_AP_SI) { \ + val <<= _AP_W + _AP_W2; \ + val >>= _AP_W + _AP_W2; \ + } \ + ret <<= _AP_WI; \ + ret |= val; \ + return ret; \ + } \ + template \ + INLINE ap_int_base<_AP_W + _AP_W2 + _AP_WI, false> operator,( \ + C_TYPE op1, const ap_concat_ref<_AP_W, _AP_T, _AP_W2, _AP_T2> &op2) { \ + ap_int_base<_AP_WI + _AP_W + _AP_W2, _AP_SI> val(op1); \ + ap_int_base<_AP_WI + _AP_W + _AP_W2, _AP_SI> ret(op2); \ + int len = op2.length(); \ + val <<= len; \ + ret |= val; \ + return ret; \ + } \ + template \ + INLINE ap_int_base<_AP_W + _AP_WI, false> operator,( \ + const af_range_ref<_AP_W, _AP_I, _AP_S, _AP_Q, _AP_O, _AP_N> &op1, \ + C_TYPE op2) { \ + ap_int_base<_AP_WI + _AP_W, false> val(op2); \ + ap_int_base<_AP_WI + _AP_W, false> ret(op1); \ + if (_AP_SI) { \ + val <<= _AP_W; \ + val >>= _AP_W; \ + } \ + ret <<= _AP_WI; \ + ret |= val; \ + return ret; \ + } \ + template \ + INLINE ap_int_base<_AP_W + _AP_WI, false> operator,( \ + C_TYPE op1, \ + const af_range_ref<_AP_W, _AP_I, _AP_S, _AP_Q, _AP_O, _AP_N> &op2) { \ + ap_int_base<_AP_WI + _AP_W, false> val(op1); \ + ap_int_base<_AP_WI + _AP_W, false> ret(op2); \ + int len = op2.length(); \ + val <<= len; \ + ret |= val; \ + return ret; \ + } \ + template \ + INLINE ap_int_base<1 + _AP_WI, false> operator,( \ + const af_bit_ref<_AP_W, _AP_I, _AP_S, _AP_Q, _AP_O, _AP_N> &op1, \ + C_TYPE op2) { \ + ap_int_base<_AP_WI + 1, _AP_SI> val(op2); \ + val[_AP_WI] = op1; \ + return val; \ + } \ + template \ + INLINE ap_int_base<1 + _AP_WI, false> operator,( \ + C_TYPE op1, \ + const af_bit_ref<_AP_W, _AP_I, _AP_S, _AP_Q, _AP_O, _AP_N> &op2) { \ + ap_int_base<_AP_WI + 1, _AP_SI> val(op1); \ + val <<= 1; \ + val[0] = op2; \ + return val; \ + } + +CONCAT_OP_WITH_INT(bool, 1, false) +CONCAT_OP_WITH_INT(char, 8, CHAR_IS_SIGNED) +CONCAT_OP_WITH_INT(signed char, 8, true) +CONCAT_OP_WITH_INT(unsigned char, 8, false) +CONCAT_OP_WITH_INT(short, _AP_SIZE_short, true) +CONCAT_OP_WITH_INT(unsigned short, _AP_SIZE_short, false) +CONCAT_OP_WITH_INT(int, _AP_SIZE_int, true) +CONCAT_OP_WITH_INT(unsigned int, _AP_SIZE_int, false) +CONCAT_OP_WITH_INT(long, _AP_SIZE_long, true) +CONCAT_OP_WITH_INT(unsigned long, _AP_SIZE_long, false) +CONCAT_OP_WITH_INT(ap_slong, _AP_SIZE_ap_slong, true) +CONCAT_OP_WITH_INT(ap_ulong, _AP_SIZE_ap_slong, false) + +#undef CONCAT_OP_WITH_INT + +#define CONCAT_SHIFT_WITH_INT(C_TYPE, OP) \ + template \ + INLINE ap_uint<_AP_W + _AP_W1> operator OP( \ + const ap_concat_ref<_AP_W, _AP_T, _AP_W1, _AP_T1> lhs, C_TYPE rhs) { \ + return ap_uint<_AP_W + _AP_W1>(lhs).get() OP int(rhs); \ + } + +// FIXME int(rhs) may loose precision. + +CONCAT_SHIFT_WITH_INT(int, <<) +CONCAT_SHIFT_WITH_INT(unsigned int, <<) +CONCAT_SHIFT_WITH_INT(long, <<) +CONCAT_SHIFT_WITH_INT(unsigned long, <<) +CONCAT_SHIFT_WITH_INT(ap_slong, <<) +CONCAT_SHIFT_WITH_INT(ap_ulong, <<) + +CONCAT_SHIFT_WITH_INT(int, >>) +CONCAT_SHIFT_WITH_INT(unsigned int, >>) +CONCAT_SHIFT_WITH_INT(long, >>) +CONCAT_SHIFT_WITH_INT(unsigned long, >>) +CONCAT_SHIFT_WITH_INT(ap_slong, >>) +CONCAT_SHIFT_WITH_INT(ap_ulong, >>) + +#endif // ifndef __cplusplus +#endif // ifndef __AP_INT_REF_H__ + +// -*- cpp -*- diff --git a/TrigScint/include/TrigScint/ap_int_special.h b/TrigScint/include/TrigScint/ap_int_special.h new file mode 100644 index 000000000..3afc6192b --- /dev/null +++ b/TrigScint/include/TrigScint/ap_int_special.h @@ -0,0 +1,223 @@ +/* + * Copyright 2011-2019 Xilinx, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef __AP_INT_SPECIAL_H__ +#define __AP_INT_SPECIAL_H__ + +#ifndef __AP_INT_H__ +#error "Only ap_fixed.h and ap_int.h can be included directly in user code." +#endif + +#ifndef __SYNTHESIS__ +#include +#include +#endif +// FIXME AP_AUTOCC cannot handle many standard headers, so declare instead of +// include. +// #include +namespace std { +template class complex; +} + +/* + TODO: Modernize the code using C++11/C++14 + 1. constexpr http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2016/p0415r0.html + 2. move constructor +*/ + +namespace std { +/* + Specialize std::complex to zero initialization ap_int. + + To reduce the area cost, ap_int is not zero initialized, just like basic + types float or double. However, libstdc++ provides specialization for float, + double and long double, initializing image part to 0 when not specified. + + This has become a difficulty in switching legacy code from these C types to + ap_int. To ease the tranform of legacy code, we have to implement + specialization of std::complex<> for our type. + + As ap_int is a template, it is impossible to specialize only the methods + that causes default initialization of value type in std::complex<>. An + explicit full specialization of the template class has to be done, covering + all the member functions and operators of std::complex<> as specified + in standard 26.2.4 and 26.2.5. +*/ +template +class complex > { + public: + typedef ap_int<_AP_W> _Tp; + typedef _Tp value_type; + + // 26.2.4/1 + // Constructor without argument + // Default initialize, so that in dataflow, the variable is only written once. + complex() : _M_real(_Tp()), _M_imag(_Tp()) {} + // Constructor with ap_int. + // Zero initialize image part when not specified, so that `C(1) == C(1,0)` + complex(const _Tp &__r, const _Tp &__i = _Tp(0)) + : _M_real(__r), _M_imag(__i) {} + + // Constructor with another complex number + template + complex(const complex<_Up> &__z) : _M_real(__z.real()), _M_imag(__z.imag()) {} + +#if __cplusplus >= 201103L + const _Tp& real() const { return _M_real; } + const _Tp& imag() const { return _M_imag; } +#else + _Tp& real() { return _M_real; } + const _Tp& real() const { return _M_real; } + _Tp& imag() { return _M_imag; } + const _Tp& imag() const { return _M_imag; } +#endif + + void real(_Tp __val) { _M_real = __val; } + + void imag(_Tp __val) { _M_imag = __val; } + + // Assign this complex number with ap_int. + // Zero initialize image poarrt, so that `C c; c = 1; c == C(1,0);` + complex<_Tp> &operator=(const _Tp __t) { + _M_real = __t; + _M_imag = _Tp(0); + return *this; + } + + // 26.2.5/1 + // Add ap_int to this complex number. + complex<_Tp> &operator+=(const _Tp &__t) { + _M_real += __t; + return *this; + } + + // 26.2.5/3 + // Subtract ap_int from this complex number. + complex<_Tp> &operator-=(const _Tp &__t) { + _M_real -= __t; + return *this; + } + + // 26.2.5/5 + // Multiply this complex number by ap_int. + complex<_Tp> &operator*=(const _Tp &__t) { + _M_real *= __t; + _M_imag *= __t; + return *this; + } + + // 26.2.5/7 + // Divide this complex number by ap_int. + complex<_Tp> &operator/=(const _Tp &__t) { + _M_real /= __t; + _M_imag /= __t; + return *this; + } + + // Assign complex number to this complex number. + template + complex<_Tp> &operator=(const complex<_Up> &__z) { + _M_real = __z.real(); + _M_imag = __z.imag(); + return *this; + } + + // 26.2.5/9 + // Add complex number to this. + template + complex<_Tp> &operator+=(const complex<_Up> &__z) { + _M_real += __z.real(); + _M_imag += __z.imag(); + return *this; + } + + // 26.2.5/11 + // Subtract complex number from this. + template + complex<_Tp> &operator-=(const complex<_Up> &__z) { + _M_real -= __z.real(); + _M_imag -= __z.imag(); + return *this; + } + + // 26.2.5/13 + // Multiply this by complex number. + template + complex<_Tp> &operator*=(const complex<_Up> &__z) { + const _Tp __r = _M_real * __z.real() - _M_imag * __z.imag(); + _M_imag = _M_real * __z.imag() + _M_imag * __z.real(); + _M_real = __r; + return *this; + } + + // 26.2.5/15 + // Divide this by complex number. + template + complex<_Tp> &operator/=(const complex<_Up> &__z) { + complex<_Tp> cj (__z.real(), -__z.imag()); + complex<_Tp> a = (*this) * cj; + complex<_Tp> b = cj * __z; + _M_real = a.real() / b.real(); + _M_imag = a.imag() / b.real(); + return *this; + } + + private: + _Tp _M_real; + _Tp _M_imag; + +}; // class complex > + + +/* + Non-member operations + These operations are not required by standard in 26.2.6, but libstdc++ + defines them for + float, double or long double's specialization. +*/ +// Compare complex number with ap_int. +template +inline bool operator==(const complex > &__x, const ap_int<_AP_W> &__y) { + return __x.real() == __y && + __x.imag() == 0; +} + +// Compare ap_int with complex number. +template +inline bool operator==(const ap_int<_AP_W> &__x, const complex > &__y) { + return __x == __y.real() && + 0 == __y.imag(); +} + +// Compare complex number with ap_int. +template +inline bool operator!=(const complex > &__x, const ap_int<_AP_W> &__y) { + return __x.real() != __y || + __x.imag() != 0; +} + +// Compare ap_int with complex number. +template +inline bool operator!=(const ap_int<_AP_W> &__x, const complex > &__y) { + return __x != __y.real() || + 0 != __y.imag(); +} + +} // namespace std + +#endif // ifndef __AP_INT_SPECIAL_H__ + +// -*- cpp -*- diff --git a/TrigScint/include/TrigScint/clusterproducer.h b/TrigScint/include/TrigScint/clusterproducer.h new file mode 100755 index 000000000..75333d8a1 --- /dev/null +++ b/TrigScint/include/TrigScint/clusterproducer.h @@ -0,0 +1,12 @@ +#ifndef CLUSTERPRODUCER_H +#define CLUSTERPRODUCER_H + +#include "objdef.h" + +void copyHit1(Hit One, Hit Two); +void copyHit2(Hit One, Hit Two); +void clusterproducer_ref(Hit inHit[NHITS],Cluster outClus[NCLUS]); +Cluster* clusterproducer_sw(Hit inHit[NHITS]); +void clusterproducer_hw(Hit inHit[NHITS],Cluster outClus[NCLUS]); + +#endif diff --git a/TrigScint/include/TrigScint/etc/ap_private.h b/TrigScint/include/TrigScint/etc/ap_private.h new file mode 100644 index 000000000..0c29a0ac1 --- /dev/null +++ b/TrigScint/include/TrigScint/etc/ap_private.h @@ -0,0 +1,7199 @@ +/* + * Copyright 2011-2019 Xilinx, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef __AP_PRIVATE_H__ +#define __AP_PRIVATE_H__ + +// common macros and type declarations are now defined in ap_common.h, and +// ap_private becomes part of it. +#ifndef __AP_COMMON_H__ +#error "etc/ap_private.h cannot be included directly." +#endif + +// forward declarations +//template +//class ap_private; // moved to ap_common.h +template +struct _private_range_ref; +template +struct _private_bit_ref; + +// TODO clean up this part. +#ifndef LLVM_SUPPORT_MATHEXTRAS_H +#define LLVM_SUPPORT_MATHEXTRAS_H + +#ifdef _MSC_VER +#if _MSC_VER <= 1500 +typedef __int8 int8_t; +typedef unsigned __int8 uint8_t; +typedef __int16 int16_t; +typedef unsigned __int16 uint16_t; +typedef __int32 int32_t; +typedef unsigned __int32 uint32_t; +typedef __int64 int64_t; +typedef unsigned __int64 uint64_t; +#else +#include +#endif +#else +#include +#endif + +#ifndef INLINE +#define INLINE inline +// Enable to debug ap_int/ap_fixed +// #define INLINE __attribute__((weak)) +#endif + +// NOTE: The following support functions use the _32/_64 extensions instead of +// type overloading so that signed and unsigned integers can be used without +// ambiguity. +namespace AESL_std { +template +DataType INLINE min(DataType a, DataType b) { + return (a >= b) ? b : a; +} + +template +DataType INLINE max(DataType a, DataType b) { + return (a >= b) ? a : b; +} +} // namespace AESL_std + +// TODO clean up included headers. +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace ap_private_ops { +/// Hi_32 - This function returns the high 32 bits of a 64 bit value. +static INLINE uint32_t Hi_32(uint64_t Value) { + return static_cast(Value >> 32); +} + +/// Lo_32 - This function returns the low 32 bits of a 64 bit value. +static INLINE uint32_t Lo_32(uint64_t Value) { + return static_cast(Value); +} + +template +INLINE bool isNegative(const ap_private<_AP_W, false>& a) { + return false; +} + +template +INLINE bool isNegative(const ap_private<_AP_W, true>& a) { + enum { + APINT_BITS_PER_WORD = 64, + _AP_N = (_AP_W + APINT_BITS_PER_WORD - 1) / APINT_BITS_PER_WORD + }; + static const uint64_t sign_mask = 1ULL << ((_AP_W - 1) % APINT_BITS_PER_WORD); + return (sign_mask & a.get_pVal(_AP_N - 1)) != 0; +} + +/// CountLeadingZeros_32 - this function performs the platform optimal form of +/// counting the number of zeros from the most significant bit to the first one +/// bit. Ex. CountLeadingZeros_32(0x00F000FF) == 8. +/// Returns 32 if the word is zero. +static INLINE unsigned CountLeadingZeros_32(uint32_t Value) { + unsigned Count; // result +#if __GNUC__ >= 4 +// PowerPC is defined for __builtin_clz(0) +#if !defined(__ppc__) && !defined(__ppc64__) + if (Value == 0) return 32; +#endif + Count = __builtin_clz(Value); +#else + if (Value == 0) return 32; + Count = 0; + // bisecton method for count leading zeros + for (unsigned Shift = 32 >> 1; Shift; Shift >>= 1) { + uint32_t Tmp = (Value) >> (Shift); + if (Tmp) { + Value = Tmp; + } else { + Count |= Shift; + } + } +#endif + return Count; +} + +/// CountLeadingZeros_64 - This function performs the platform optimal form +/// of counting the number of zeros from the most significant bit to the first +/// one bit (64 bit edition.) +/// Returns 64 if the word is zero. +static INLINE unsigned CountLeadingZeros_64(uint64_t Value) { + unsigned Count; // result +#if __GNUC__ >= 4 +// PowerPC is defined for __builtin_clzll(0) +#if !defined(__ppc__) && !defined(__ppc64__) + if (!Value) return 64; +#endif + Count = __builtin_clzll(Value); +#else + if (sizeof(long) == sizeof(int64_t)) { + if (!Value) return 64; + Count = 0; + // bisecton method for count leading zeros + for (unsigned Shift = 64 >> 1; Shift; Shift >>= 1) { + uint64_t Tmp = (Value) >> (Shift); + if (Tmp) { + Value = Tmp; + } else { + Count |= Shift; + } + } + } else { + // get hi portion + uint32_t Hi = Hi_32(Value); + + // if some bits in hi portion + if (Hi) { + // leading zeros in hi portion plus all bits in lo portion + Count = CountLeadingZeros_32(Hi); + } else { + // get lo portion + uint32_t Lo = Lo_32(Value); + // same as 32 bit value + Count = CountLeadingZeros_32(Lo) + 32; + } + } +#endif + return Count; +} + +/// CountTrailingZeros_64 - This function performs the platform optimal form +/// of counting the number of zeros from the least significant bit to the first +/// one bit (64 bit edition.) +/// Returns 64 if the word is zero. +static INLINE unsigned CountTrailingZeros_64(uint64_t Value) { +#if __GNUC__ >= 4 + return (Value != 0) ? __builtin_ctzll(Value) : 64; +#else + static const unsigned Mod67Position[] = { + 64, 0, 1, 39, 2, 15, 40, 23, 3, 12, 16, 59, 41, 19, 24, 54, 4, + 64, 13, 10, 17, 62, 60, 28, 42, 30, 20, 51, 25, 44, 55, 47, 5, 32, + 65, 38, 14, 22, 11, 58, 18, 53, 63, 9, 61, 27, 29, 50, 43, 46, 31, + 37, 21, 57, 52, 8, 26, 49, 45, 36, 56, 7, 48, 35, 6, 34, 33, 0}; + return Mod67Position[(uint64_t)(-(int64_t)Value & (int64_t)Value) % 67]; +#endif +} + +/// CountPopulation_64 - this function counts the number of set bits in a value, +/// (64 bit edition.) +static INLINE unsigned CountPopulation_64(uint64_t Value) { +#if __GNUC__ >= 4 + return __builtin_popcountll(Value); +#else + uint64_t v = Value - (((Value) >> 1) & 0x5555555555555555ULL); + v = (v & 0x3333333333333333ULL) + (((v) >> 2) & 0x3333333333333333ULL); + v = (v + ((v) >> 4)) & 0x0F0F0F0F0F0F0F0FULL; + return unsigned((uint64_t)(v * 0x0101010101010101ULL) >> 56); +#endif +} + +static INLINE uint32_t countLeadingOnes_64(uint64_t __V, uint32_t skip) { + uint32_t Count = 0; + if (skip) (__V) <<= (skip); + while (__V && (__V & (1ULL << 63))) { + Count++; + (__V) <<= 1; + } + return Count; +} + +static INLINE std::string oct2Bin(char oct) { + switch (oct) { + case '\0': { + return ""; + } + case '.': { + return "."; + } + case '0': { + return "000"; + } + case '1': { + return "001"; + } + case '2': { + return "010"; + } + case '3': { + return "011"; + } + case '4': { + return "100"; + } + case '5': { + return "101"; + } + case '6': { + return "110"; + } + case '7': { + return "111"; + } + } + assert(0 && "Invalid character in digit string"); + return ""; +} + +static INLINE std::string hex2Bin(char hex) { + switch (hex) { + case '\0': { + return ""; + } + case '.': { + return "."; + } + case '0': { + return "0000"; + } + case '1': { + return "0001"; + } + case '2': { + return "0010"; + } + case '3': { + return "0011"; + } + case '4': { + return "0100"; + } + case '5': { + return "0101"; + } + case '6': { + return "0110"; + } + case '7': { + return "0111"; + } + case '8': { + return "1000"; + } + case '9': { + return "1001"; + } + case 'A': + case 'a': { + return "1010"; + } + case 'B': + case 'b': { + return "1011"; + } + case 'C': + case 'c': { + return "1100"; + } + case 'D': + case 'd': { + return "1101"; + } + case 'E': + case 'e': { + return "1110"; + } + case 'F': + case 'f': { + return "1111"; + } + } + assert(0 && "Invalid character in digit string"); + return ""; +} + +static INLINE uint32_t decode_digit(char cdigit, int radix) { + uint32_t digit = 0; + if (radix == 16) { +#define isxdigit(c) \ + (((c) >= '0' && (c) <= '9') || ((c) >= 'a' && (c) <= 'f') || \ + ((c) >= 'A' && (c) <= 'F')) +#define isdigit(c) ((c) >= '0' && (c) <= '9') + if (!isxdigit(cdigit)) assert(0 && "Invalid hex digit in string"); + if (isdigit(cdigit)) + digit = cdigit - '0'; + else if (cdigit >= 'a') + digit = cdigit - 'a' + 10; + else if (cdigit >= 'A') + digit = cdigit - 'A' + 10; + else + assert(0 && "huh? we shouldn't get here"); + } else if (isdigit(cdigit)) { + digit = cdigit - '0'; + } else { + assert(0 && "Invalid character in digit string"); + } +#undef isxdigit +#undef isdigit + return digit; +} + +// Determine the radix of "val". +static INLINE std::string parseString(const std::string& input, unsigned char& radix) { + size_t len = input.length(); + if (len == 0) { + if (radix == 0) radix = 10; + return input; + } + + size_t startPos = 0; + // Trim whitespace + while (input[startPos] == ' ' && startPos < len) startPos++; + while (input[len - 1] == ' ' && startPos < len) len--; + + std::string val = input.substr(startPos, len - startPos); + // std::cout << "val = " << val << "\n"; + len = val.length(); + startPos = 0; + + // If the length of the string is less than 2, then radix + // is decimal and there is no exponent. + if (len < 2) { + if (radix == 0) radix = 10; + return val; + } + + bool isNegative = false; + std::string ans; + + // First check to see if we start with a sign indicator + if (val[0] == '-') { + ans = "-"; + ++startPos; + isNegative = true; + } else if (val[0] == '+') + ++startPos; + + if (len - startPos < 2) { + if (radix == 0) radix = 10; + return val; + } + + if (val.substr(startPos, 2) == "0x" || val.substr(startPos, 2) == "0X") { + // If we start with "0x", then the radix is hex. + radix = 16; + startPos += 2; + } else if (val.substr(startPos, 2) == "0b" || + val.substr(startPos, 2) == "0B") { + // If we start with "0b", then the radix is binary. + radix = 2; + startPos += 2; + } else if (val.substr(startPos, 2) == "0o" || + val.substr(startPos, 2) == "0O") { + // If we start with "0o", then the radix is octal. + radix = 8; + startPos += 2; + } else if (radix == 0) { + radix = 10; + } + + int exp = 0; + if (radix == 10) { + // If radix is decimal, then see if there is an + // exponent indicator. + size_t expPos = val.find('e'); + bool has_exponent = true; + if (expPos == std::string::npos) expPos = val.find('E'); + if (expPos == std::string::npos) { + // No exponent indicator, so the mantissa goes to the end. + expPos = len; + has_exponent = false; + } + // std::cout << "startPos = " << startPos << " " << expPos << "\n"; + + ans += val.substr(startPos, expPos - startPos); + if (has_exponent) { + // Parse the exponent. + std::istringstream iss(val.substr(expPos + 1, len - expPos - 1)); + iss >> exp; + } + } else { + // Check for a binary exponent indicator. + size_t expPos = val.find('p'); + bool has_exponent = true; + if (expPos == std::string::npos) expPos = val.find('P'); + if (expPos == std::string::npos) { + // No exponent indicator, so the mantissa goes to the end. + expPos = len; + has_exponent = false; + } + + // std::cout << "startPos = " << startPos << " " << expPos << "\n"; + + assert(startPos <= expPos); + // Convert to binary as we go. + for (size_t i = startPos; i < expPos; ++i) { + if (radix == 16) { + ans += hex2Bin(val[i]); + } else if (radix == 8) { + ans += oct2Bin(val[i]); + } else { // radix == 2 + ans += val[i]; + } + } + // End in binary + radix = 2; + if (has_exponent) { + // Parse the exponent. + std::istringstream iss(val.substr(expPos + 1, len - expPos - 1)); + iss >> exp; + } + } + if (exp == 0) return ans; + + size_t decPos = ans.find('.'); + if (decPos == std::string::npos) decPos = ans.length(); + if ((int)decPos + exp >= (int)ans.length()) { + int i = decPos; + for (; i < (int)ans.length() - 1; ++i) ans[i] = ans[i + 1]; + for (; i < (int)ans.length(); ++i) ans[i] = '0'; + for (; i < (int)decPos + exp; ++i) ans += '0'; + return ans; + } else if ((int)decPos + exp < (int)isNegative) { + std::string dupAns = "0."; + if (ans[0] == '-') dupAns = "-0."; + for (int i = 0; i < isNegative - (int)decPos - exp; ++i) dupAns += '0'; + for (size_t i = isNegative; i < ans.length(); ++i) + if (ans[i] != '.') dupAns += ans[i]; + return dupAns; + } + + if (exp > 0) + for (size_t i = decPos; i < decPos + exp; ++i) ans[i] = ans[i + 1]; + else { + if (decPos == ans.length()) ans += ' '; + for (int i = decPos; i > (int)decPos + exp; --i) ans[i] = ans[i - 1]; + } + ans[decPos + exp] = '.'; + return ans; +} + +/// sub_1 - This function subtracts a single "digit" (64-bit word), y, from +/// the multi-digit integer array, x[], propagating the borrowed 1 value until +/// no further borrowing is neeeded or it runs out of "digits" in x. The result +/// is 1 if "borrowing" exhausted the digits in x, or 0 if x was not exhausted. +/// In other words, if y > x then this function returns 1, otherwise 0. +/// @returns the borrow out of the subtraction +static INLINE bool sub_1(uint64_t x[], uint32_t len, uint64_t y) { + for (uint32_t i = 0; i < len; ++i) { + uint64_t __X = x[i]; + x[i] -= y; + if (y > __X) + y = 1; // We have to "borrow 1" from next "digit" + else { + y = 0; // No need to borrow + break; // Remaining digits are unchanged so exit early + } + } + return (y != 0); +} + +/// add_1 - This function adds a single "digit" integer, y, to the multiple +/// "digit" integer array, x[]. x[] is modified to reflect the addition and +/// 1 is returned if there is a carry out, otherwise 0 is returned. +/// @returns the carry of the addition. +static INLINE bool add_1(uint64_t dest[], uint64_t x[], uint32_t len, + uint64_t y) { + for (uint32_t i = 0; i < len; ++i) { + dest[i] = y + x[i]; + if (dest[i] < y) + y = 1; // Carry one to next digit. + else { + y = 0; // No need to carry so exit early + break; + } + } + return (y != 0); +} + +/// add - This function adds the integer array x to the integer array Y and +/// places the result in dest. +/// @returns the carry out from the addition +/// @brief General addition of 64-bit integer arrays +static INLINE bool add(uint64_t* dest, const uint64_t* x, const uint64_t* y, + uint32_t destlen, uint32_t xlen, uint32_t ylen, + bool xsigned, bool ysigned) { + bool carry = false; + uint32_t len = AESL_std::min(xlen, ylen); + uint32_t i; + for (i = 0; i < len && i < destlen; ++i) { + uint64_t limit = + AESL_std::min(x[i], y[i]); // must come first in case dest == x + dest[i] = x[i] + y[i] + carry; + carry = dest[i] < limit || (carry && dest[i] == limit); + } + if (xlen > ylen) { + const uint64_t yext = ysigned && int64_t(y[ylen - 1]) < 0 ? -1 : 0; + for (i = ylen; i < xlen && i < destlen; i++) { + uint64_t limit = AESL_std::min(x[i], yext); + dest[i] = x[i] + yext + carry; + carry = (dest[i] < limit) || (carry && dest[i] == limit); + } + } else if (ylen > xlen) { + const uint64_t xext = xsigned && int64_t(x[xlen - 1]) < 0 ? -1 : 0; + for (i = xlen; i < ylen && i < destlen; i++) { + uint64_t limit = AESL_std::min(xext, y[i]); + dest[i] = xext + y[i] + carry; + carry = (dest[i] < limit) || (carry && dest[i] == limit); + } + } + return carry; +} + +/// @returns returns the borrow out. +/// @brief Generalized subtraction of 64-bit integer arrays. +static INLINE bool sub(uint64_t* dest, const uint64_t* x, const uint64_t* y, + uint32_t destlen, uint32_t xlen, uint32_t ylen, + bool xsigned, bool ysigned) { + bool borrow = false; + uint32_t i; + uint32_t len = AESL_std::min(xlen, ylen); + for (i = 0; i < len && i < destlen; ++i) { + uint64_t x_tmp = borrow ? x[i] - 1 : x[i]; + borrow = y[i] > x_tmp || (borrow && x[i] == 0); + dest[i] = x_tmp - y[i]; + } + if (xlen > ylen) { + const uint64_t yext = ysigned && int64_t(y[ylen - 1]) < 0 ? -1 : 0; + for (i = ylen; i < xlen && i < destlen; i++) { + uint64_t x_tmp = borrow ? x[i] - 1 : x[i]; + borrow = yext > x_tmp || (borrow && x[i] == 0); + dest[i] = x_tmp - yext; + } + } else if (ylen > xlen) { + const uint64_t xext = xsigned && int64_t(x[xlen - 1]) < 0 ? -1 : 0; + for (i = xlen; i < ylen && i < destlen; i++) { + uint64_t x_tmp = borrow ? xext - 1 : xext; + borrow = y[i] > x_tmp || (borrow && xext == 0); + dest[i] = x_tmp - y[i]; + } + } + return borrow; +} + +/// Subtracts the RHS ap_private from this ap_private +/// @returns this, after subtraction +/// @brief Subtraction assignment operator. + +/// Multiplies an integer array, x by a a uint64_t integer and places the result +/// into dest. +/// @returns the carry out of the multiplication. +/// @brief Multiply a multi-digit ap_private by a single digit (64-bit) integer. +static INLINE uint64_t mul_1(uint64_t dest[], const uint64_t x[], uint32_t len, + uint64_t y) { + // Split y into high 32-bit part (hy) and low 32-bit part (ly) + uint64_t ly = y & 0xffffffffULL, hy = (y) >> 32; + uint64_t carry = 0; + static const uint64_t two_power_32 = 1ULL << 32; + // For each digit of x. + for (uint32_t i = 0; i < len; ++i) { + // Split x into high and low words + uint64_t lx = x[i] & 0xffffffffULL; + uint64_t hx = (x[i]) >> 32; + // hasCarry - A flag to indicate if there is a carry to the next digit. + // hasCarry == 0, no carry + // hasCarry == 1, has carry + // hasCarry == 2, no carry and the calculation result == 0. + uint8_t hasCarry = 0; + dest[i] = carry + lx * ly; + // Determine if the add above introduces carry. + hasCarry = (dest[i] < carry) ? 1 : 0; + carry = hx * ly + ((dest[i]) >> 32) + (hasCarry ? two_power_32 : 0); + // The upper limit of carry can be (2^32 - 1)(2^32 - 1) + + // (2^32 - 1) + 2^32 = 2^64. + hasCarry = (!carry && hasCarry) ? 1 : (!carry ? 2 : 0); + + carry += (lx * hy) & 0xffffffffULL; + dest[i] = ((carry) << 32) | (dest[i] & 0xffffffffULL); + carry = (((!carry && hasCarry != 2) || hasCarry == 1) ? two_power_32 : 0) + + ((carry) >> 32) + ((lx * hy) >> 32) + hx * hy; + } + return carry; +} + +/// Multiplies integer array x by integer array y and stores the result into +/// the integer array dest. Note that dest's size must be >= xlen + ylen in +/// order to +/// do a full precision computation. If it is not, then only the low-order words +/// are returned. +/// @brief Generalized multiplicate of integer arrays. +static INLINE void mul(uint64_t dest[], const uint64_t x[], uint32_t xlen, + const uint64_t y[], uint32_t ylen, uint32_t destlen) { + assert(xlen > 0); + assert(ylen > 0); + assert(destlen >= xlen + ylen); + if (xlen < destlen) dest[xlen] = mul_1(dest, x, xlen, y[0]); + for (uint32_t i = 1; i < ylen; ++i) { + uint64_t ly = y[i] & 0xffffffffULL, hy = (y[i]) >> 32; + uint64_t carry = 0, lx = 0, hx = 0; + for (uint32_t j = 0; j < xlen; ++j) { + lx = x[j] & 0xffffffffULL; + hx = (x[j]) >> 32; + // hasCarry - A flag to indicate if has carry. + // hasCarry == 0, no carry + // hasCarry == 1, has carry + // hasCarry == 2, no carry and the calculation result == 0. + uint8_t hasCarry = 0; + uint64_t resul = carry + lx * ly; + hasCarry = (resul < carry) ? 1 : 0; + carry = (hasCarry ? (1ULL << 32) : 0) + hx * ly + ((resul) >> 32); + hasCarry = (!carry && hasCarry) ? 1 : (!carry ? 2 : 0); + carry += (lx * hy) & 0xffffffffULL; + resul = ((carry) << 32) | (resul & 0xffffffffULL); + if (i + j < destlen) dest[i + j] += resul; + carry = + (((!carry && hasCarry != 2) || hasCarry == 1) ? (1ULL << 32) : 0) + + ((carry) >> 32) + (dest[i + j] < resul ? 1 : 0) + ((lx * hy) >> 32) + + hx * hy; + } + if (i + xlen < destlen) dest[i + xlen] = carry; + } +} + +/// Implementation of Knuth's Algorithm D (Division of nonnegative integers) +/// from "Art of Computer Programming, Volume 2", section 4.3.1, p. 272. The +/// variables here have the same names as in the algorithm. Comments explain +/// the algorithm and any deviation from it. +static INLINE void KnuthDiv(uint32_t* u, uint32_t* v, uint32_t* q, uint32_t* r, + uint32_t m, uint32_t n) { + assert(u && "Must provide dividend"); + assert(v && "Must provide divisor"); + assert(q && "Must provide quotient"); + assert(u != v && u != q && v != q && "Must us different memory"); + assert(n > 1 && "n must be > 1"); + + // Knuth uses the value b as the base of the number system. In our case b + // is 2^31 so we just set it to -1u. + uint64_t b = uint64_t(1) << 32; + + // DEBUG(cerr << "KnuthDiv: m=" << m << " n=" << n << '\n'); + // DEBUG(cerr << "KnuthDiv: original:"); + // DEBUG(for (int i = m+n; i >=0; i--) cerr << " " << std::setbase(16) << + // u[i]); + // DEBUG(cerr << " by"); + // DEBUG(for (int i = n; i >0; i--) cerr << " " << std::setbase(16) << + // v[i-1]); + // DEBUG(cerr << '\n'); + // D1. [Normalize.] Set d = b / (v[n-1] + 1) and multiply all the digits of + // u and v by d. Note that we have taken Knuth's advice here to use a power + // of 2 value for d such that d * v[n-1] >= b/2 (b is the base). A power of + // 2 allows us to shift instead of multiply and it is easy to determine the + // shift amount from the leading zeros. We are basically normalizing the u + // and v so that its high bits are shifted to the top of v's range without + // overflow. Note that this can require an extra word in u so that u must + // be of length m+n+1. + uint32_t shift = CountLeadingZeros_32(v[n - 1]); + uint32_t v_carry = 0; + uint32_t u_carry = 0; + if (shift) { + for (uint32_t i = 0; i < m + n; ++i) { + uint32_t u_tmp = (u[i]) >> (32 - shift); + u[i] = ((u[i]) << (shift)) | u_carry; + u_carry = u_tmp; + } + for (uint32_t i = 0; i < n; ++i) { + uint32_t v_tmp = (v[i]) >> (32 - shift); + v[i] = ((v[i]) << (shift)) | v_carry; + v_carry = v_tmp; + } + } + u[m + n] = u_carry; + // DEBUG(cerr << "KnuthDiv: normal:"); + // DEBUG(for (int i = m+n; i >=0; i--) cerr << " " << std::setbase(16) << + // u[i]); + // DEBUG(cerr << " by"); + // DEBUG(for (int i = n; i >0; i--) cerr << " " << std::setbase(16) << + // v[i-1]); + // DEBUG(cerr << '\n'); + + // D2. [Initialize j.] Set j to m. This is the loop counter over the places. + int j = m; + do { + // DEBUG(cerr << "KnuthDiv: quotient digit #" << j << '\n'); + // D3. [Calculate q'.]. + // Set qp = (u[j+n]*b + u[j+n-1]) / v[n-1]. (qp=qprime=q') + // Set rp = (u[j+n]*b + u[j+n-1]) % v[n-1]. (rp=rprime=r') + // Now test if qp == b or qp*v[n-2] > b*rp + u[j+n-2]; if so, decrease + // qp by 1, inrease rp by v[n-1], and repeat this test if rp < b. The test + // on v[n-2] determines at high speed most of the cases in which the trial + // value qp is one too large, and it eliminates all cases where qp is two + // too large. + uint64_t dividend = ((uint64_t(u[j + n]) << 32) + u[j + n - 1]); + // DEBUG(cerr << "KnuthDiv: dividend == " << dividend << '\n'); + uint64_t qp = dividend / v[n - 1]; + uint64_t rp = dividend % v[n - 1]; + if (qp == b || qp * v[n - 2] > b * rp + u[j + n - 2]) { + qp--; + rp += v[n - 1]; + if (rp < b && (qp == b || qp * v[n - 2] > b * rp + u[j + n - 2])) qp--; + } + // DEBUG(cerr << "KnuthDiv: qp == " << qp << ", rp == " << rp << '\n'); + + // D4. [Multiply and subtract.] Replace (u[j+n]u[j+n-1]...u[j]) with + // (u[j+n]u[j+n-1]..u[j]) - qp * (v[n-1]...v[1]v[0]). This computation + // consists of a simple multiplication by a one-place number, combined with + // a subtraction. + bool isNeg = false; + for (uint32_t i = 0; i < n; ++i) { + uint64_t u_tmp = uint64_t(u[j + i]) | ((uint64_t(u[j + i + 1])) << 32); + uint64_t subtrahend = uint64_t(qp) * uint64_t(v[i]); + bool borrow = subtrahend > u_tmp; + /*DEBUG(cerr << "KnuthDiv: u_tmp == " << u_tmp + << ", subtrahend == " << subtrahend + << ", borrow = " << borrow << '\n');*/ + + uint64_t result = u_tmp - subtrahend; + uint32_t k = j + i; + u[k++] = (uint32_t)(result & (b - 1)); // subtract low word + u[k++] = (uint32_t)((result) >> 32); // subtract high word + while (borrow && k <= m + n) { // deal with borrow to the left + borrow = u[k] == 0; + u[k]--; + k++; + } + isNeg |= borrow; + /*DEBUG(cerr << "KnuthDiv: u[j+i] == " << u[j+i] << ", u[j+i+1] == " << + u[j+i+1] << '\n');*/ + } + /*DEBUG(cerr << "KnuthDiv: after subtraction:"); + DEBUG(for (int i = m+n; i >=0; i--) cerr << " " << u[i]); + DEBUG(cerr << '\n');*/ + // The digits (u[j+n]...u[j]) should be kept positive; if the result of + // this step is actually negative, (u[j+n]...u[j]) should be left as the + // true value plus b**(n+1), namely as the b's complement of + // the true value, and a "borrow" to the left should be remembered. + // + if (isNeg) { + bool carry = true; // true because b's complement is "complement + 1" + for (uint32_t i = 0; i <= m + n; ++i) { + u[i] = ~u[i] + carry; // b's complement + carry = carry && u[i] == 0; + } + } + /*DEBUG(cerr << "KnuthDiv: after complement:"); + DEBUG(for (int i = m+n; i >=0; i--) cerr << " " << u[i]); + DEBUG(cerr << '\n');*/ + + // D5. [Test remainder.] Set q[j] = qp. If the result of step D4 was + // negative, go to step D6; otherwise go on to step D7. + q[j] = (uint32_t)qp; + if (isNeg) { + // D6. [Add back]. The probability that this step is necessary is very + // small, on the order of only 2/b. Make sure that test data accounts for + // this possibility. Decrease q[j] by 1 + q[j]--; + // and add (0v[n-1]...v[1]v[0]) to (u[j+n]u[j+n-1]...u[j+1]u[j]). + // A carry will occur to the left of u[j+n], and it should be ignored + // since it cancels with the borrow that occurred in D4. + bool carry = false; + for (uint32_t i = 0; i < n; i++) { + uint32_t limit = AESL_std::min(u[j + i], v[i]); + u[j + i] += v[i] + carry; + carry = u[j + i] < limit || (carry && u[j + i] == limit); + } + u[j + n] += carry; + } + /*DEBUG(cerr << "KnuthDiv: after correction:"); + DEBUG(for (int i = m+n; i >=0; i--) cerr <<" " << u[i]); + DEBUG(cerr << "\nKnuthDiv: digit result = " << q[j] << '\n');*/ + + // D7. [Loop on j.] Decrease j by one. Now if j >= 0, go back to D3. + } while (--j >= 0); + + /*DEBUG(cerr << "KnuthDiv: quotient:"); + DEBUG(for (int i = m; i >=0; i--) cerr <<" " << q[i]); + DEBUG(cerr << '\n');*/ + + // D8. [Unnormalize]. Now q[...] is the desired quotient, and the desired + // remainder may be obtained by dividing u[...] by d. If r is non-null we + // compute the remainder (urem uses this). + if (r) { + // The value d is expressed by the "shift" value above since we avoided + // multiplication by d by using a shift left. So, all we have to do is + // shift right here. In order to mak + if (shift) { + uint32_t carry = 0; + // DEBUG(cerr << "KnuthDiv: remainder:"); + for (int i = n - 1; i >= 0; i--) { + r[i] = ((u[i]) >> (shift)) | carry; + carry = (u[i]) << (32 - shift); + // DEBUG(cerr << " " << r[i]); + } + } else { + for (int i = n - 1; i >= 0; i--) { + r[i] = u[i]; + // DEBUG(cerr << " " << r[i]); + } + } + // DEBUG(cerr << '\n'); + } + // DEBUG(cerr << std::setbase(10) << '\n'); +} + +template +void divide(const ap_private<_AP_W, _AP_S>& LHS, uint32_t lhsWords, + const ap_private<_AP_W, _AP_S>& RHS, uint32_t rhsWords, + ap_private<_AP_W, _AP_S>* Quotient, + ap_private<_AP_W, _AP_S>* Remainder) { + assert(lhsWords >= rhsWords && "Fractional result"); + enum { APINT_BITS_PER_WORD = 64 }; + // First, compose the values into an array of 32-bit words instead of + // 64-bit words. This is a necessity of both the "short division" algorithm + // and the the Knuth "classical algorithm" which requires there to be native + // operations for +, -, and * on an m bit value with an m*2 bit result. We + // can't use 64-bit operands here because we don't have native results of + // 128-bits. Furthremore, casting the 64-bit values to 32-bit values won't + // work on large-endian machines. + uint64_t mask = ~0ull >> (sizeof(uint32_t) * 8); + uint32_t n = rhsWords * 2; + uint32_t m = (lhsWords * 2) - n; + + // Allocate space for the temporary values we need either on the stack, if + // it will fit, or on the heap if it won't. + uint32_t SPACE[128]; + uint32_t* __U = 0; + uint32_t* __V = 0; + uint32_t* __Q = 0; + uint32_t* __R = 0; + if ((Remainder ? 4 : 3) * n + 2 * m + 1 <= 128) { + __U = &SPACE[0]; + __V = &SPACE[m + n + 1]; + __Q = &SPACE[(m + n + 1) + n]; + if (Remainder) __R = &SPACE[(m + n + 1) + n + (m + n)]; + } else { + __U = new uint32_t[m + n + 1]; + __V = new uint32_t[n]; + __Q = new uint32_t[m + n]; + if (Remainder) __R = new uint32_t[n]; + } + + // Initialize the dividend + memset(__U, 0, (m + n + 1) * sizeof(uint32_t)); + for (unsigned i = 0; i < lhsWords; ++i) { + uint64_t tmp = LHS.get_pVal(i); + __U[i * 2] = (uint32_t)(tmp & mask); + __U[i * 2 + 1] = (tmp) >> (sizeof(uint32_t) * 8); + } + __U[m + n] = 0; // this extra word is for "spill" in the Knuth algorithm. + + // Initialize the divisor + memset(__V, 0, (n) * sizeof(uint32_t)); + for (unsigned i = 0; i < rhsWords; ++i) { + uint64_t tmp = RHS.get_pVal(i); + __V[i * 2] = (uint32_t)(tmp & mask); + __V[i * 2 + 1] = (tmp) >> (sizeof(uint32_t) * 8); + } + + // initialize the quotient and remainder + memset(__Q, 0, (m + n) * sizeof(uint32_t)); + if (Remainder) memset(__R, 0, n * sizeof(uint32_t)); + + // Now, adjust m and n for the Knuth division. n is the number of words in + // the divisor. m is the number of words by which the dividend exceeds the + // divisor (i.e. m+n is the length of the dividend). These sizes must not + // contain any zero words or the Knuth algorithm fails. + for (unsigned i = n; i > 0 && __V[i - 1] == 0; i--) { + n--; + m++; + } + for (unsigned i = m + n; i > 0 && __U[i - 1] == 0; i--) m--; + + // If we're left with only a single word for the divisor, Knuth doesn't work + // so we implement the short division algorithm here. This is much simpler + // and faster because we are certain that we can divide a 64-bit quantity + // by a 32-bit quantity at hardware speed and short division is simply a + // series of such operations. This is just like doing short division but we + // are using base 2^32 instead of base 10. + assert(n != 0 && "Divide by zero?"); + if (n == 1) { + uint32_t divisor = __V[0]; + uint32_t remainder = 0; + for (int i = m + n - 1; i >= 0; i--) { + uint64_t partial_dividend = (uint64_t(remainder)) << 32 | __U[i]; + if (partial_dividend == 0) { + __Q[i] = 0; + remainder = 0; + } else if (partial_dividend < divisor) { + __Q[i] = 0; + remainder = (uint32_t)partial_dividend; + } else if (partial_dividend == divisor) { + __Q[i] = 1; + remainder = 0; + } else { + __Q[i] = (uint32_t)(partial_dividend / divisor); + remainder = (uint32_t)(partial_dividend - (__Q[i] * divisor)); + } + } + if (__R) __R[0] = remainder; + } else { + // Now we're ready to invoke the Knuth classical divide algorithm. In this + // case n > 1. + KnuthDiv(__U, __V, __Q, __R, m, n); + } + + // If the caller wants the quotient + if (Quotient) { + // Set up the Quotient value's memory. + if (Quotient->BitWidth != LHS.BitWidth) { + if (Quotient->isSingleWord()) Quotient->set_VAL(0); + } else + Quotient->clear(); + + // The quotient is in Q. Reconstitute the quotient into Quotient's low + // order words. + if (lhsWords == 1) { + uint64_t tmp = + uint64_t(__Q[0]) | ((uint64_t(__Q[1])) << (APINT_BITS_PER_WORD / 2)); + Quotient->set_VAL(tmp); + } else { + assert(!Quotient->isSingleWord() && + "Quotient ap_private not large enough"); + for (unsigned i = 0; i < lhsWords; ++i) + Quotient->set_pVal( + i, uint64_t(__Q[i * 2]) | + ((uint64_t(__Q[i * 2 + 1])) << (APINT_BITS_PER_WORD / 2))); + } + Quotient->clearUnusedBits(); + } + + // If the caller wants the remainder + if (Remainder) { + // Set up the Remainder value's memory. + if (Remainder->BitWidth != RHS.BitWidth) { + if (Remainder->isSingleWord()) Remainder->set_VAL(0); + } else + Remainder->clear(); + + // The remainder is in R. Reconstitute the remainder into Remainder's low + // order words. + if (rhsWords == 1) { + uint64_t tmp = + uint64_t(__R[0]) | ((uint64_t(__R[1])) << (APINT_BITS_PER_WORD / 2)); + Remainder->set_VAL(tmp); + } else { + assert(!Remainder->isSingleWord() && + "Remainder ap_private not large enough"); + for (unsigned i = 0; i < rhsWords; ++i) + Remainder->set_pVal( + i, uint64_t(__R[i * 2]) | + ((uint64_t(__R[i * 2 + 1])) << (APINT_BITS_PER_WORD / 2))); + } + Remainder->clearUnusedBits(); + } + + // Clean up the memory we allocated. + if (__U != &SPACE[0]) { + delete[] __U; + delete[] __V; + delete[] __Q; + delete[] __R; + } +} + +template +void divide(const ap_private<_AP_W, _AP_S>& LHS, uint32_t lhsWords, + uint64_t RHS, ap_private<_AP_W, _AP_S>* Quotient, + ap_private<_AP_W, _AP_S>* Remainder) { + uint32_t rhsWords = 1; + assert(lhsWords >= rhsWords && "Fractional result"); + enum { APINT_BITS_PER_WORD = 64 }; + // First, compose the values into an array of 32-bit words instead of + // 64-bit words. This is a necessity of both the "short division" algorithm + // and the the Knuth "classical algorithm" which requires there to be native + // operations for +, -, and * on an m bit value with an m*2 bit result. We + // can't use 64-bit operands here because we don't have native results of + // 128-bits. Furthremore, casting the 64-bit values to 32-bit values won't + // work on large-endian machines. + uint64_t mask = ~0ull >> (sizeof(uint32_t) * 8); + uint32_t n = 2; + uint32_t m = (lhsWords * 2) - n; + + // Allocate space for the temporary values we need either on the stack, if + // it will fit, or on the heap if it won't. + uint32_t SPACE[128]; + uint32_t* __U = 0; + uint32_t* __V = 0; + uint32_t* __Q = 0; + uint32_t* __R = 0; + if ((Remainder ? 4 : 3) * n + 2 * m + 1 <= 128) { + __U = &SPACE[0]; + __V = &SPACE[m + n + 1]; + __Q = &SPACE[(m + n + 1) + n]; + if (Remainder) __R = &SPACE[(m + n + 1) + n + (m + n)]; + } else { + __U = new uint32_t[m + n + 1]; + __V = new uint32_t[n]; + __Q = new uint32_t[m + n]; + if (Remainder) __R = new uint32_t[n]; + } + + // Initialize the dividend + memset(__U, 0, (m + n + 1) * sizeof(uint32_t)); + for (unsigned i = 0; i < lhsWords; ++i) { + uint64_t tmp = LHS.get_pVal(i); + __U[i * 2] = tmp & mask; + __U[i * 2 + 1] = (tmp) >> (sizeof(uint32_t) * 8); + } + __U[m + n] = 0; // this extra word is for "spill" in the Knuth algorithm. + + // Initialize the divisor + memset(__V, 0, (n) * sizeof(uint32_t)); + __V[0] = RHS & mask; + __V[1] = (RHS) >> (sizeof(uint32_t) * 8); + + // initialize the quotient and remainder + memset(__Q, 0, (m + n) * sizeof(uint32_t)); + if (Remainder) memset(__R, 0, n * sizeof(uint32_t)); + + // Now, adjust m and n for the Knuth division. n is the number of words in + // the divisor. m is the number of words by which the dividend exceeds the + // divisor (i.e. m+n is the length of the dividend). These sizes must not + // contain any zero words or the Knuth algorithm fails. + for (unsigned i = n; i > 0 && __V[i - 1] == 0; i--) { + n--; + m++; + } + for (unsigned i = m + n; i > 0 && __U[i - 1] == 0; i--) m--; + + // If we're left with only a single word for the divisor, Knuth doesn't work + // so we implement the short division algorithm here. This is much simpler + // and faster because we are certain that we can divide a 64-bit quantity + // by a 32-bit quantity at hardware speed and short division is simply a + // series of such operations. This is just like doing short division but we + // are using base 2^32 instead of base 10. + assert(n != 0 && "Divide by zero?"); + if (n == 1) { + uint32_t divisor = __V[0]; + uint32_t remainder = 0; + for (int i = m + n - 1; i >= 0; i--) { + uint64_t partial_dividend = (uint64_t(remainder)) << 32 | __U[i]; + if (partial_dividend == 0) { + __Q[i] = 0; + remainder = 0; + } else if (partial_dividend < divisor) { + __Q[i] = 0; + remainder = partial_dividend; + } else if (partial_dividend == divisor) { + __Q[i] = 1; + remainder = 0; + } else { + __Q[i] = partial_dividend / divisor; + remainder = partial_dividend - (__Q[i] * divisor); + } + } + if (__R) __R[0] = remainder; + } else { + // Now we're ready to invoke the Knuth classical divide algorithm. In this + // case n > 1. + KnuthDiv(__U, __V, __Q, __R, m, n); + } + + // If the caller wants the quotient + if (Quotient) { + // Set up the Quotient value's memory. + if (Quotient->BitWidth != LHS.BitWidth) { + if (Quotient->isSingleWord()) Quotient->set_VAL(0); + } else + Quotient->clear(); + + // The quotient is in Q. Reconstitute the quotient into Quotient's low + // order words. + if (lhsWords == 1) { + uint64_t tmp = + uint64_t(__Q[0]) | ((uint64_t(__Q[1])) << (APINT_BITS_PER_WORD / 2)); + Quotient->set_VAL(tmp); + } else { + assert(!Quotient->isSingleWord() && + "Quotient ap_private not large enough"); + for (unsigned i = 0; i < lhsWords; ++i) + Quotient->set_pVal( + i, uint64_t(__Q[i * 2]) | + ((uint64_t(__Q[i * 2 + 1])) << (APINT_BITS_PER_WORD / 2))); + } + Quotient->clearUnusedBits(); + } + + // If the caller wants the remainder + if (Remainder) { + // Set up the Remainder value's memory. + if (Remainder->BitWidth != 64 /* RHS.BitWidth */) { + if (Remainder->isSingleWord()) Remainder->set_VAL(0); + } else + Remainder->clear(); + + // The remainder is in __R. Reconstitute the remainder into Remainder's low + // order words. + if (rhsWords == 1) { + uint64_t tmp = + uint64_t(__R[0]) | ((uint64_t(__R[1])) << (APINT_BITS_PER_WORD / 2)); + Remainder->set_VAL(tmp); + } else { + assert(!Remainder->isSingleWord() && + "Remainder ap_private not large enough"); + for (unsigned i = 0; i < rhsWords; ++i) + Remainder->set_pVal( + i, uint64_t(__R[i * 2]) | + ((uint64_t(__R[i * 2 + 1])) << (APINT_BITS_PER_WORD / 2))); + } + Remainder->clearUnusedBits(); + } + + // Clean up the memory we allocated. + if (__U != &SPACE[0]) { + delete[] __U; + delete[] __V; + delete[] __Q; + delete[] __R; + } +} + +/// @brief Logical right-shift function. +template +INLINE ap_private<_AP_W, _AP_S, _AP_C> lshr( + const ap_private<_AP_W, _AP_S, _AP_C>& LHS, uint32_t shiftAmt) { + return LHS.lshr(shiftAmt); +} + +/// Left-shift the ap_private by shiftAmt. +/// @brief Left-shift function. +template +INLINE ap_private<_AP_W, _AP_S, _AP_C> shl( + const ap_private<_AP_W, _AP_S, _AP_C>& LHS, uint32_t shiftAmt) { + return LHS.shl(shiftAmt); +} + +} // namespace ap_private_ops + +#endif // LLVM_SUPPORT_MATHEXTRAS_H + +/// This enumeration just provides for internal constants used in this +/// translation unit. +enum { + MIN_INT_BITS = 1, ///< Minimum number of bits that can be specified + ///< Note that this must remain synchronized with IntegerType::MIN_INT_BITS + MAX_INT_BITS = (1 << 23) - 1 ///< Maximum number of bits that can be specified + ///< Note that this must remain synchronized with IntegerType::MAX_INT_BITS +}; + +//===----------------------------------------------------------------------===// +// ap_private Class +//===----------------------------------------------------------------------===// + +/// ap_private - This class represents arbitrary precision constant integral +/// values. +/// It is a functional replacement for common case unsigned integer type like +/// "unsigned", "unsigned long" or "uint64_t", but also allows non-byte-width +/// integer sizes and large integer value types such as 3-bits, 15-bits, or more +/// than 64-bits of precision. ap_private provides a variety of arithmetic +/// operators +/// and methods to manipulate integer values of any bit-width. It supports both +/// the typical integer arithmetic and comparison operations as well as bitwise +/// manipulation. +/// +/// The class has several invariants worth noting: +/// * All bit, byte, and word positions are zero-based. +/// * Once the bit width is set, it doesn't change except by the Truncate, +/// SignExtend, or ZeroExtend operations. +/// * All binary operators must be on ap_private instances of the same bit +/// width. +/// Attempting to use these operators on instances with different bit +/// widths will yield an assertion. +/// * The value is stored canonically as an unsigned value. For operations +/// where it makes a difference, there are both signed and unsigned variants +/// of the operation. For example, sdiv and udiv. However, because the bit +/// widths must be the same, operations such as Mul and Add produce the same +/// results regardless of whether the values are interpreted as signed or +/// not. +/// * In general, the class tries to follow the style of computation that LLVM +/// uses in its IR. This simplifies its use for LLVM. +/// +/// @brief Class for arbitrary precision integers. + +#if defined(_MSC_VER) +#if _MSC_VER < 1400 && !defined(for) +#define for if (0); else for +#endif +typedef unsigned __int64 ap_ulong; +typedef signed __int64 ap_slong; +#else +typedef unsigned long long ap_ulong; +typedef signed long long ap_slong; +#endif +template +struct valtype; + +template +struct valtype<_AP_N8, false> { + typedef uint64_t Type; +}; + +template +struct valtype<_AP_N8, true> { + typedef int64_t Type; +}; + +template <> +struct valtype<1, false> { + typedef unsigned char Type; +}; +template <> +struct valtype<2, false> { + typedef unsigned short Type; +}; +template <> +struct valtype<3, false> { + typedef unsigned int Type; +}; +template <> +struct valtype<4, false> { + typedef unsigned int Type; +}; +template <> +struct valtype<1, true> { + typedef signed char Type; +}; +template <> +struct valtype<2, true> { + typedef short Type; +}; +template <> +struct valtype<3, true> { + typedef int Type; +}; +template <> +struct valtype<4, true> { + typedef int Type; +}; + +template +struct ap_private_enable_if {}; +template <> +struct ap_private_enable_if { + static const bool isValid = true; +}; + +// When bitwidth < 64 +template +class ap_private<_AP_W, _AP_S, true> { + // SFINAE pattern. Only consider this class when _AP_W <= 64 + const static bool valid = ap_private_enable_if<_AP_W <= 64>::isValid; + +#ifdef _MSC_VER +#pragma warning(disable : 4521 4522) +#endif + public: + typedef typename valtype<(_AP_W + 7) / 8, _AP_S>::Type ValType; + typedef ap_private<_AP_W, _AP_S> Type; + template + struct RType { + enum { + mult_w = _AP_W + _AP_W2, + mult_s = _AP_S || _AP_S2, + plus_w = + AP_MAX(_AP_W + (_AP_S2 && !_AP_S), _AP_W2 + (_AP_S && !_AP_S2)) + 1, + plus_s = _AP_S || _AP_S2, + minus_w = + AP_MAX(_AP_W + (_AP_S2 && !_AP_S), _AP_W2 + (_AP_S && !_AP_S2)) + 1, + minus_s = true, + div_w = _AP_W + _AP_S2, + div_s = _AP_S || _AP_S2, + mod_w = AP_MIN(_AP_W, _AP_W2 + (!_AP_S2 && _AP_S)), + mod_s = _AP_S, + logic_w = AP_MAX(_AP_W + (_AP_S2 && !_AP_S), _AP_W2 + (_AP_S && !_AP_S2)), + logic_s = _AP_S || _AP_S2 + }; + typedef ap_private mult; + typedef ap_private plus; + typedef ap_private minus; + typedef ap_private logic; + typedef ap_private div; + typedef ap_private mod; + typedef ap_private<_AP_W, _AP_S> arg1; + typedef bool reduce; + }; + enum { APINT_BITS_PER_WORD = sizeof(uint64_t) * 8 }; + enum { + excess_bits = (_AP_W % APINT_BITS_PER_WORD) + ? APINT_BITS_PER_WORD - (_AP_W % APINT_BITS_PER_WORD) + : 0 + }; + static const uint64_t mask = ((uint64_t)~0ULL >> (excess_bits)); + static const uint64_t not_mask = ~mask; + static const uint64_t sign_bit_mask = 1ULL << (APINT_BITS_PER_WORD - 1); + template + struct sign_ext_mask { + static const uint64_t mask = ~0ULL << _AP_W1; + }; + static const int width = _AP_W; + + enum { + BitWidth = _AP_W, + _AP_N = 1, + }; + ValType VAL; ///< Used to store the <= 64 bits integer value. +#ifdef AP_CANARY + ValType CANARY; + void check_canary() { assert(CANARY == (ValType)0xDEADBEEFDEADBEEF); } + void set_canary() { CANARY = (ValType)0xDEADBEEFDEADBEEF; } +#else + void check_canary() {} + void set_canary() {} +#endif + + INLINE ValType& get_VAL(void) { return VAL; } + INLINE ValType get_VAL(void) const { return VAL; } + INLINE ValType get_VAL(void) const volatile { return VAL; } + INLINE void set_VAL(uint64_t value) { VAL = (ValType)value; } + INLINE ValType& get_pVal(int i) { return VAL; } + INLINE ValType get_pVal(int i) const { return VAL; } + INLINE const uint64_t* get_pVal() const { + assert(0 && "invalid usage"); + return 0; + } + INLINE ValType get_pVal(int i) const volatile { return VAL; } + INLINE uint64_t* get_pVal() const volatile { + assert(0 && "invalid usage"); + return 0; + } + INLINE void set_pVal(int i, uint64_t value) { VAL = (ValType)value; } + + INLINE uint32_t getBitWidth() const { return BitWidth; } + + template + ap_private<_AP_W, _AP_S>& operator=(const ap_private<_AP_W1, _AP_S1>& RHS) { + VAL = (ValType)(RHS.get_VAL()); + clearUnusedBits(); + return *this; + } + + template + ap_private<_AP_W, _AP_S>& operator=( + const volatile ap_private<_AP_W1, _AP_S1>& RHS) { + VAL = (ValType)(RHS.get_VAL()); // TODO check here about ap_private + clearUnusedBits(); + return *this; + } + + void operator=(const ap_private& RHS) volatile { + // Don't do anything for X = X + VAL = RHS.get_VAL(); // No need to check because no harm done by copying. + clearUnusedBits(); + } + + ap_private& operator=(const ap_private& RHS) { + // Don't do anything for X = X + VAL = RHS.get_VAL(); // No need to check because no harm done by copying. + clearUnusedBits(); + return *this; + } + + void operator=(const volatile ap_private& RHS) volatile { + // Don't do anything for X = X + VAL = RHS.get_VAL(); // No need to check because no harm done by copying. + clearUnusedBits(); + } + + ap_private& operator=(const volatile ap_private& RHS) { + // Don't do anything for X = X + VAL = RHS.get_VAL(); // No need to check because no harm done by copying. + clearUnusedBits(); + return *this; + } + + template + INLINE ap_private& operator=(const _private_range_ref<_AP_W2, _AP_S2>& op2) { + *this = ap_private<_AP_W2, false>(op2); + return *this; + } + +#define ASSIGN_OP_FROM_INT(C_TYPE) \ + INLINE ap_private& operator=(const C_TYPE v) { \ + set_canary(); \ + this->VAL = (ValType)v; \ + clearUnusedBits(); \ + check_canary(); \ + return *this; \ + } + +ASSIGN_OP_FROM_INT(bool) +ASSIGN_OP_FROM_INT(char) +ASSIGN_OP_FROM_INT(signed char) +ASSIGN_OP_FROM_INT(unsigned char) +ASSIGN_OP_FROM_INT(short) +ASSIGN_OP_FROM_INT(unsigned short) +ASSIGN_OP_FROM_INT(int) +ASSIGN_OP_FROM_INT(unsigned int) +ASSIGN_OP_FROM_INT(long) +ASSIGN_OP_FROM_INT(unsigned long) +ASSIGN_OP_FROM_INT(ap_slong) +ASSIGN_OP_FROM_INT(ap_ulong) +#if 0 +ASSIGN_OP_FROM_INT(half) +ASSIGN_OP_FROM_INT(float) +ASSIGN_OP_FROM_INT(double) +#endif +#undef ASSIGN_OP_FROM_INT + + // XXX This is a must to prevent pointer being converted to bool. + INLINE ap_private& operator=(const char* s) { + ap_private tmp(s); // XXX direct-initialization, as ctor is explicit. + operator=(tmp); + return *this; + } + + private: + explicit INLINE ap_private(uint64_t* val) : VAL(val[0]) { + set_canary(); + clearUnusedBits(); + check_canary(); + } + + INLINE bool isSingleWord() const { return true; } + + public: + INLINE void fromString(const char* strStart, uint32_t slen, uint8_t radix) { + bool isNeg = strStart[0] == '-'; + if (isNeg) { + strStart++; + slen--; + } + + if (strStart[0] == '0' && (strStart[1] == 'b' || strStart[1] == 'B')) { + //if(radix == 0) radix = 2; + _AP_WARNING(radix != 2, "%s seems to have base %d, but %d given.", strStart, 2, radix); + strStart += 2; + slen -=2; + } else if (strStart[0] == '0' && (strStart[1] == 'o' || strStart[1] == 'O')) { + //if (radix == 0) radix = 8; + _AP_WARNING(radix != 8, "%s seems to have base %d, but %d given.", strStart, 8, radix); + strStart += 2; + slen -=2; + } else if (strStart[0] == '0' && (strStart[1] == 'x' || strStart[1] == 'X')) { + //if (radix == 0) radix = 16; + _AP_WARNING(radix != 16, "%s seems to have base %d, but %d given.", strStart, 16, radix); + strStart += 2; + slen -=2; + } else if (strStart[0] == '0' && (strStart[1] == 'd' || strStart[1] == 'D')) { + //if (radix == 0) radix = 10; + _AP_WARNING(radix != 10, "%s seems to have base %d, but %d given.", strStart, 10, radix); + strStart += 2; + slen -=2; + } else if (radix == 0) { + //radix = 2; // XXX default value + } + + // Check our assumptions here + assert((radix == 10 || radix == 8 || radix == 16 || radix == 2) && + "Radix should be 2, 8, 10, or 16!"); + assert(strStart && "String is null?"); + + // Clear bits. + uint64_t tmpVAL = VAL = 0; + + switch (radix) { + case 2: + // sscanf(strStart,"%b",&VAL); + // tmpVAL = *strStart =='1' ? ~0ULL : 0; + for (; *strStart; ++strStart) { + assert((*strStart == '0' || *strStart == '1') && + ("Wrong binary number")); + tmpVAL <<= 1; + tmpVAL |= (*strStart - '0'); + } + break; + case 8: +#ifdef _MSC_VER + sscanf_s(strStart, "%llo", &tmpVAL, slen + 1); +#else +#if defined(__x86_64__) && !defined(__MINGW32__) && !defined(__WIN32__) + sscanf(strStart, "%lo", &tmpVAL); +#else + sscanf(strStart, "%llo", &tmpVAL); +#endif //__x86_64__ +#endif //_MSC_VER + break; + case 10: +#ifdef _MSC_VER + sscanf_s(strStart, "%llu", &tmpVAL, slen + 1); +#else +#if defined(__x86_64__) && !defined(__MINGW32__) && !defined(__WIN32__) + sscanf(strStart, "%lu", &tmpVAL); +#else + sscanf(strStart, "%llu", &tmpVAL); +#endif //__x86_64__ +#endif //_MSC_VER + break; + case 16: +#ifdef _MSC_VER + sscanf_s(strStart, "%llx", &tmpVAL, slen + 1); +#else +#if defined(__x86_64__) && !defined(__MINGW32__) && !defined(__WIN32__) + sscanf(strStart, "%lx", &tmpVAL); +#else + sscanf(strStart, "%llx", &tmpVAL); +#endif //__x86_64__ +#endif //_MSC_VER + break; + default: + assert(true && "Unknown radix"); + // error + } + VAL = isNeg ? (ValType)(-tmpVAL) : (ValType)(tmpVAL); + + clearUnusedBits(); + } + + private: + INLINE ap_private(const std::string& val, uint8_t radix = 2) : VAL(0) { + assert(!val.empty() && "String empty?"); + set_canary(); + fromString(val.c_str(), val.size(), radix); + check_canary(); + } + + INLINE ap_private(const char strStart[], uint32_t slen, uint8_t radix) + : VAL(0) { + set_canary(); + fromString(strStart, slen, radix); + check_canary(); + } + + INLINE ap_private(uint32_t numWords, const uint64_t bigVal[]) + : VAL(bigVal[0]) { + set_canary(); + clearUnusedBits(); + check_canary(); + } + + public: + INLINE ap_private() { + set_canary(); + clearUnusedBits(); + check_canary(); + } + +#define CTOR(TYPE) \ + INLINE ap_private(TYPE v) : VAL((ValType)v) { \ + set_canary(); \ + clearUnusedBits(); \ + check_canary(); \ + } + CTOR(bool) + CTOR(char) + CTOR(signed char) + CTOR(unsigned char) + CTOR(short) + CTOR(unsigned short) + CTOR(int) + CTOR(unsigned int) + CTOR(long) + CTOR(unsigned long) + CTOR(ap_slong) + CTOR(ap_ulong) +#if 0 + CTOR(half) + CTOR(float) + CTOR(double) +#endif +#undef CTOR + + template + INLINE ap_private(const ap_private<_AP_W1, _AP_S1, _AP_OPT>& that) + : VAL((ValType)that.get_VAL()) { + set_canary(); + clearUnusedBits(); + check_canary(); + } + + template + INLINE ap_private(const volatile ap_private<_AP_W1, _AP_S1, _AP_OPT>& that) + : VAL((ValType)that.get_VAL()) { + set_canary(); + clearUnusedBits(); + check_canary(); + } + + explicit INLINE ap_private(const char* val) { + set_canary(); + unsigned char radix = 10; + std::string str = ap_private_ops::parseString(val, radix); // will set radix. + std::string::size_type pos = str.find('.'); + // trunc all fraction part + if (pos != std::string::npos) str = str.substr(pos); + + ap_private<_AP_W, _AP_S> ap_private_val(str, radix); + operator=(ap_private_val); + check_canary(); + } + + INLINE ap_private(const char* val, signed char rd) { + set_canary(); + unsigned char radix = rd; + std::string str = ap_private_ops::parseString(val, radix); // will set radix. + std::string::size_type pos = str.find('.'); + // trunc all fraction part + if (pos != std::string::npos) str = str.substr(pos); + + ap_private<_AP_W, _AP_S> ap_private_val(str, radix); + operator=(ap_private_val); + check_canary(); + } + + INLINE ~ap_private() { check_canary(); } + + INLINE bool isNegative() const { + static const uint64_t sign_mask = 1ULL << (_AP_W - 1); + return _AP_S && (sign_mask & VAL); + } + + INLINE bool isPositive() const { return !isNegative(); } + + INLINE bool isStrictlyPositive() const { return !isNegative() && VAL != 0; } + + INLINE bool isAllOnesValue() const { return (mask & VAL) == mask; } + + INLINE bool operator==(const ap_private<_AP_W, _AP_S>& RHS) const { + return VAL == RHS.get_VAL(); + } + INLINE bool operator==(const ap_private<_AP_W, !_AP_S>& RHS) const { + return (uint64_t)VAL == (uint64_t)RHS.get_VAL(); + } + + INLINE bool operator==(uint64_t Val) const { return ((uint64_t)VAL == Val); } + INLINE bool operator!=(uint64_t Val) const { return ((uint64_t)VAL != Val); } + INLINE bool operator!=(const ap_private<_AP_W, _AP_S>& RHS) const { + return VAL != RHS.get_VAL(); + } + INLINE bool operator!=(const ap_private<_AP_W, !_AP_S>& RHS) const { + return (uint64_t)VAL != (uint64_t)RHS.get_VAL(); + } + + /// postfix increment. + const ap_private operator++(int) { + ap_private orig(*this); + VAL++; + clearUnusedBits(); + return orig; + } + + /// prefix increment. + const ap_private operator++() { + ++VAL; + clearUnusedBits(); + return *this; + } + + /// postfix decrement. + const ap_private operator--(int) { + ap_private orig(*this); + --VAL; + clearUnusedBits(); + return orig; + } + + /// prefix decrement. + const ap_private operator--() { + --VAL; + clearUnusedBits(); + return *this; + } + + /// one's complement. + INLINE ap_private<_AP_W + !_AP_S, true> operator~() const { + ap_private<_AP_W + !_AP_S, true> Result(*this); + Result.flip(); + return Result; + } + + /// two's complement. + INLINE typename RType<1, false>::minus operator-() const { + return ap_private<1, false>(0) - (*this); + } + + /// logic negation. + INLINE bool operator!() const { return !VAL; } + + INLINE std::string toString(uint8_t radix, bool wantSigned) const; + INLINE std::string toStringUnsigned(uint8_t radix = 10) const { + return toString(radix, false); + } + INLINE std::string toStringSigned(uint8_t radix = 10) const { + return toString(radix, true); + } + INLINE void clear() { VAL = 0; } + INLINE ap_private& clear(uint32_t bitPosition) { + VAL &= ~(1ULL << (bitPosition)); + clearUnusedBits(); + return *this; + } + + INLINE ap_private ashr(uint32_t shiftAmt) const { + if (_AP_S) + return ap_private((shiftAmt == BitWidth) ? 0 + : ((int64_t)VAL) >> (shiftAmt)); + else + return ap_private((shiftAmt == BitWidth) ? 0 + : ((uint64_t)VAL) >> (shiftAmt)); + } + + INLINE ap_private lshr(uint32_t shiftAmt) const { + return ap_private((shiftAmt == BitWidth) + ? ap_private(0) + : ap_private((VAL & mask) >> (shiftAmt))); + } + + INLINE ap_private shl(uint32_t shiftAmt) const +// just for clang compiler +#if defined(__clang__) && !defined(__CLANG_3_1__) + __attribute__((no_sanitize("undefined"))) +#endif + { + if (shiftAmt > BitWidth) { + if (!isNegative()) + return ap_private(0); + else + return ap_private(-1); + } + if (shiftAmt == BitWidth) + return ap_private(0); + else + return ap_private((VAL) << (shiftAmt)); + // return ap_private((shiftAmt == BitWidth) ? ap_private(0ULL) : + // ap_private(VAL << shiftAmt)); + } + + INLINE int64_t getSExtValue() const { return VAL; } + + // XXX XXX this function is used in CBE + INLINE uint64_t getZExtValue() const { return VAL & mask; } + + template + INLINE ap_private(const _private_range_ref<_AP_W2, _AP_S2>& ref) { + set_canary(); + *this = ref.get(); + check_canary(); + } + + template + INLINE ap_private(const _private_bit_ref<_AP_W2, _AP_S2>& ref) { + set_canary(); + *this = ((uint64_t)(bool)ref); + check_canary(); + } + +// template +// INLINE ap_private(const ap_concat_ref<_AP_W2, _AP_T2, _AP_W3, _AP_T3>& ref) { +// set_canary(); +// *this = ref.get(); +// check_canary(); +// } +// +// template +// INLINE ap_private( +// const af_range_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2>& val) { +// set_canary(); +// *this = ((val.operator ap_private<_AP_W2, false>())); +// check_canary(); +// } +// +// template +// INLINE ap_private( +// const af_bit_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2>& val) { +// set_canary(); +// *this = (uint64_t)(bool)val; +// check_canary(); +// } + + INLINE void write(const ap_private<_AP_W, _AP_S>& op2) volatile { + *this = (op2); + } + + // Explicit conversions to C interger types + //----------------------------------------------------------- + INLINE operator ValType() const { return get_VAL(); } + + INLINE int to_uchar() const { return (unsigned char)get_VAL(); } + + INLINE int to_char() const { return (signed char)get_VAL(); } + + INLINE int to_ushort() const { return (unsigned short)get_VAL(); } + + INLINE int to_short() const { return (short)get_VAL(); } + + INLINE int to_int() const { + // ap_private<64 /* _AP_W */, _AP_S> res(V); + return (int)get_VAL(); + } + + INLINE unsigned to_uint() const { return (unsigned)get_VAL(); } + + INLINE long to_long() const { return (long)get_VAL(); } + + INLINE unsigned long to_ulong() const { return (unsigned long)get_VAL(); } + + INLINE ap_slong to_int64() const { return (ap_slong)get_VAL(); } + + INLINE ap_ulong to_uint64() const { return (ap_ulong)get_VAL(); } + + INLINE double to_double() const { + if (isNegative()) + return roundToDouble(true); + else + return roundToDouble(false); + } + + INLINE unsigned length() const { return _AP_W; } + + INLINE bool isMinValue() const { return VAL == 0; } + template + INLINE ap_private& operator&=(const ap_private<_AP_W1, _AP_S1>& RHS) { + VAL = (ValType)(((uint64_t)VAL) & RHS.get_VAL()); + clearUnusedBits(); + return *this; + } + + template + INLINE ap_private& operator|=(const ap_private<_AP_W1, _AP_S1>& RHS) { + VAL = (ValType)(((uint64_t)VAL) | RHS.get_VAL()); + clearUnusedBits(); + return *this; + } + + template + INLINE ap_private& operator^=(const ap_private<_AP_W1, _AP_S1>& RHS) { + VAL = (ValType)(((uint64_t)VAL) ^ RHS.get_VAL()); + clearUnusedBits(); + return *this; + } + + template + INLINE ap_private& operator*=(const ap_private<_AP_W1, _AP_S1>& RHS) { + VAL = (ValType)(((uint64_t)VAL) * RHS.get_VAL()); + clearUnusedBits(); + return *this; + } + + template + INLINE ap_private& operator+=(const ap_private<_AP_W1, _AP_S1>& RHS) { + VAL = (ValType)(((uint64_t)VAL) + RHS.get_VAL()); + clearUnusedBits(); + return *this; + } + + template + INLINE ap_private& operator-=(const ap_private<_AP_W1, _AP_S1>& RHS) { + VAL = (ValType)(((uint64_t)VAL) - RHS.get_VAL()); + clearUnusedBits(); + return *this; + } + + template + INLINE typename RType<_AP_W1, _AP_S1>::logic operator&( + const ap_private<_AP_W1, _AP_S1>& RHS) const { + if (RType<_AP_W1, _AP_S1>::logic_w <= 64) { + typename RType<_AP_W1, _AP_S1>::logic Ret(((uint64_t)VAL) & + RHS.get_VAL()); + return Ret; + } else { + typename RType<_AP_W1, _AP_S1>::logic Ret = *this; + return Ret & RHS; + } + } + + template + INLINE typename RType<_AP_W1, _AP_S1>::logic operator^( + const ap_private<_AP_W1, _AP_S1>& RHS) const { + if (RType<_AP_W1, _AP_S1>::logic_w <= 64) { + typename RType<_AP_W1, _AP_S1>::logic Ret(((uint64_t)VAL) ^ + RHS.get_VAL()); + return Ret; + } else { + typename RType<_AP_W1, _AP_S1>::logic Ret = *this; + return Ret ^ RHS; + } + } + + template + INLINE typename RType<_AP_W1, _AP_S1>::logic operator|( + const ap_private<_AP_W1, _AP_S1>& RHS) const { + if (RType<_AP_W1, _AP_S1>::logic_w <= 64) { + typename RType<_AP_W1, _AP_S1>::logic Ret(((uint64_t)VAL) | + RHS.get_VAL()); + return Ret; + } else { + typename RType<_AP_W1, _AP_S1>::logic Ret = *this; + return Ret | RHS; + } + } + + INLINE ap_private And(const ap_private& RHS) const { + return ap_private(VAL & RHS.get_VAL()); + } + + INLINE ap_private Or(const ap_private& RHS) const { + return ap_private(VAL | RHS.get_VAL()); + } + + INLINE ap_private Xor(const ap_private& RHS) const { + return ap_private(VAL ^ RHS.get_VAL()); + } +#if 1 + template + INLINE typename RType<_AP_W1, _AP_S1>::mult operator*( + const ap_private<_AP_W1, _AP_S1>& RHS) const { + if (RType<_AP_W1, _AP_S1>::mult_w <= 64) { + typename RType<_AP_W1, _AP_S1>::mult Result(((uint64_t)VAL) * + RHS.get_VAL()); + return Result; + } else { + typename RType<_AP_W1, _AP_S1>::mult Result(*this); + Result *= RHS; + return Result; + } + } +#endif + INLINE ap_private Mul(const ap_private& RHS) const { + return ap_private(VAL * RHS.get_VAL()); + } + + INLINE ap_private Add(const ap_private& RHS) const { + return ap_private(VAL + RHS.get_VAL()); + } + + INLINE ap_private Sub(const ap_private& RHS) const { + return ap_private(VAL - RHS.get_VAL()); + } + + INLINE ap_private& operator&=(uint64_t RHS) { + VAL &= (ValType)RHS; + clearUnusedBits(); + return *this; + } + INLINE ap_private& operator|=(uint64_t RHS) { + VAL |= (ValType)RHS; + clearUnusedBits(); + return *this; + } + INLINE ap_private& operator^=(uint64_t RHS) { + VAL ^= (ValType)RHS; + clearUnusedBits(); + return *this; + } + INLINE ap_private& operator*=(uint64_t RHS) { + VAL *= (ValType)RHS; + clearUnusedBits(); + return *this; + } + INLINE ap_private& operator+=(uint64_t RHS) { + VAL += (ValType)RHS; + clearUnusedBits(); + return *this; + } + INLINE ap_private& operator-=(uint64_t RHS) { + VAL -= (ValType)RHS; + clearUnusedBits(); + return *this; + } + + INLINE bool isMinSignedValue() const { + static const uint64_t min_mask = ~(~0ULL << (_AP_W - 1)); + return BitWidth == 1 ? VAL == 1 + : (ap_private_ops::isNegative<_AP_W>(*this) && + ((min_mask & VAL) == 0)); + } + + template + INLINE typename RType<_AP_W1, _AP_S1>::plus operator+( + const ap_private<_AP_W1, _AP_S1>& RHS) const { + if (RType<_AP_W1, _AP_S1>::plus_w <= 64) + return typename RType<_AP_W1, _AP_S1>::plus( + RType<_AP_W1, _AP_S1>::plus_s + ? int64_t(((uint64_t)VAL) + RHS.get_VAL()) + : uint64_t(((uint64_t)VAL) + RHS.get_VAL())); + typename RType<_AP_W1, _AP_S1>::plus Result = RHS; + Result += VAL; + return Result; + } + + template + INLINE typename RType<_AP_W1, _AP_S1>::minus operator-( + const ap_private<_AP_W1, _AP_S1>& RHS) const { + if (RType<_AP_W1, _AP_S1>::minus_w <= 64) + return typename RType<_AP_W1, _AP_S1>::minus( + int64_t(((uint64_t)VAL) - RHS.get_VAL())); + typename RType<_AP_W1, _AP_S1>::minus Result = *this; + Result -= RHS; + return Result; + } + + INLINE uint32_t countPopulation() const { + return ap_private_ops::CountPopulation_64(VAL); + } + INLINE uint32_t countLeadingZeros() const { + int remainder = BitWidth % 64; + int excessBits = (64 - remainder) % 64; + uint32_t Count = ap_private_ops::CountLeadingZeros_64(VAL); + if (Count) Count -= excessBits; + return AESL_std::min(Count, (uint32_t)_AP_W); + } + + /// HiBits - This function returns the high "numBits" bits of this ap_private. + INLINE ap_private<_AP_W, _AP_S> getHiBits(uint32_t numBits) const { + ap_private<_AP_W, _AP_S> ret(*this); + ret = (ret) >> (BitWidth - numBits); + return ret; + } + + /// LoBits - This function returns the low "numBits" bits of this ap_private. + INLINE ap_private<_AP_W, _AP_S> getLoBits(uint32_t numBits) const { + ap_private<_AP_W, _AP_S> ret(((uint64_t)VAL) << (BitWidth - numBits)); + ret = (ret) >> (BitWidth - numBits); + return ret; + // return ap_private(numBits, (VAL << (BitWidth - numBits))>> (BitWidth - + // numBits)); + } + + INLINE ap_private<_AP_W, _AP_S>& set(uint32_t bitPosition) { + VAL |= (1ULL << (bitPosition)); + clearUnusedBits(); + return *this; // clearUnusedBits(); + } + + INLINE void set() { + VAL = (ValType)~0ULL; + clearUnusedBits(); + } + + template + INLINE void set(const ap_private<_AP_W3, false>& val) { + operator=(ap_private<_AP_W3, _AP_S>(val)); + } + + INLINE void set(const ap_private& val) { operator=(val); } + + INLINE void clearUnusedBits(void) volatile +// just for clang compiler +#if defined(__clang__) && !defined(__CLANG_3_1__) + __attribute__((no_sanitize("undefined"))) +#endif + { + enum { excess_bits = (_AP_W % 64) ? 64 - _AP_W % 64 : 0 }; + VAL = (ValType)( + _AP_S + ? ((((int64_t)VAL) << (excess_bits)) >> (excess_bits)) + : (excess_bits ? (((uint64_t)VAL) << (excess_bits)) >> (excess_bits) + : (uint64_t)VAL)); + } + + INLINE void clearUnusedBitsToZero(void) { + enum { excess_bits = (_AP_W % 64) ? 64 - _AP_W % 64 : 0 }; + static uint64_t mask = ~0ULL >> (excess_bits); + VAL &= mask; + } + + INLINE ap_private udiv(const ap_private& RHS) const { + return ap_private((uint64_t)VAL / RHS.get_VAL()); + } + + /// Signed divide this ap_private by ap_private RHS. + /// @brief Signed division function for ap_private. + INLINE ap_private sdiv(const ap_private& RHS) const { + if (isNegative()) + if (RHS.isNegative()) + return ((uint64_t)(0 - (*this))) / (uint64_t)(0 - RHS); + else + return 0 - ((uint64_t)(0 - (*this)) / (uint64_t)(RHS)); + else if (RHS.isNegative()) + return 0 - (this->udiv((ap_private)(0 - RHS))); + return this->udiv(RHS); + } + + template + INLINE ap_private urem(const ap_private<_AP_W, _AP_S2>& RHS) const { + assert(RHS.get_VAL() != 0 && "Divide by 0"); + return ap_private(((uint64_t)VAL) % ((uint64_t)RHS.get_VAL())); + } + + /// Signed remainder operation on ap_private. + /// @brief Function for signed remainder operation. + template + INLINE ap_private srem(const ap_private<_AP_W, _AP_S2>& RHS) const { + if (isNegative()) { + ap_private lhs = 0 - (*this); + if (RHS.isNegative()) { + ap_private rhs = 0 - RHS; + return 0 - (lhs.urem(rhs)); + } else + return 0 - (lhs.urem(RHS)); + } else if (RHS.isNegative()) { + ap_private rhs = 0 - RHS; + return this->urem(rhs); + } + return this->urem(RHS); + } + + template + INLINE bool eq(const ap_private<_AP_W1, _AP_S1>& RHS) const { + return (*this) == RHS; + } + + template + INLINE bool ne(const ap_private<_AP_W1, _AP_S1>& RHS) const { + return !((*this) == RHS); + } + + /// Regards both *this and RHS as unsigned quantities and compares them for + /// the validity of the less-than relationship. + /// @returns true if *this < RHS when both are considered unsigned. + /// @brief Unsigned less than comparison + template + INLINE bool ult(const ap_private<_AP_W1, _AP_S1>& RHS) const { + if (_AP_W1 <= 64) { + uint64_t lhsZext = ((uint64_t(VAL)) << (64 - _AP_W)) >> (64 - _AP_W); + uint64_t rhsZext = + ((uint64_t(RHS.get_VAL())) << (64 - _AP_W1)) >> (64 - _AP_W1); + return lhsZext < rhsZext; + } else + return RHS.uge(*this); + } + + /// Regards both *this and RHS as signed quantities and compares them for + /// validity of the less-than relationship. + /// @returns true if *this < RHS when both are considered signed. + /// @brief Signed less than comparison + template + INLINE bool slt(const ap_private<_AP_W1, _AP_S1>& RHS) const +// just for clang compiler +#if defined(__clang__) && !defined(__CLANG_3_1__) + __attribute__((no_sanitize("undefined"))) +#endif + { + if (_AP_W1 <= 64) { + int64_t lhsSext = ((int64_t(VAL)) << (64 - _AP_W)) >> (64 - _AP_W); + int64_t rhsSext = + ((int64_t(RHS.get_VAL())) << (64 - _AP_W1)) >> (64 - _AP_W1); + return lhsSext < rhsSext; + } else + return RHS.sge(*this); + } + + /// Regards both *this and RHS as unsigned quantities and compares them for + /// validity of the less-or-equal relationship. + /// @returns true if *this <= RHS when both are considered unsigned. + /// @brief Unsigned less or equal comparison + template + INLINE bool ule(const ap_private<_AP_W1, _AP_S1>& RHS) const { + return ult(RHS) || eq(RHS); + } + + /// Regards both *this and RHS as signed quantities and compares them for + /// validity of the less-or-equal relationship. + /// @returns true if *this <= RHS when both are considered signed. + /// @brief Signed less or equal comparison + template + INLINE bool sle(const ap_private<_AP_W1, _AP_S1>& RHS) const { + return slt(RHS) || eq(RHS); + } + + /// Regards both *this and RHS as unsigned quantities and compares them for + /// the validity of the greater-than relationship. + /// @returns true if *this > RHS when both are considered unsigned. + /// @brief Unsigned greather than comparison + template + INLINE bool ugt(const ap_private<_AP_W1, _AP_S1>& RHS) const { + return !ult(RHS) && !eq(RHS); + } + + /// Regards both *this and RHS as signed quantities and compares them for + /// the validity of the greater-than relationship. + /// @returns true if *this > RHS when both are considered signed. + /// @brief Signed greather than comparison + template + INLINE bool sgt(const ap_private<_AP_W1, _AP_S1>& RHS) const { + return !slt(RHS) && !eq(RHS); + } + + /// Regards both *this and RHS as unsigned quantities and compares them for + /// validity of the greater-or-equal relationship. + /// @returns true if *this >= RHS when both are considered unsigned. + /// @brief Unsigned greater or equal comparison + template + INLINE bool uge(const ap_private<_AP_W1, _AP_S1>& RHS) const { + return !ult(RHS); + } + + /// Regards both *this and RHS as signed quantities and compares them for + /// validity of the greater-or-equal relationship. + /// @returns true if *this >= RHS when both are considered signed. + /// @brief Signed greather or equal comparison + template + INLINE bool sge(const ap_private<_AP_W1, _AP_S1>& RHS) const { + return !slt(RHS); + } + + INLINE ap_private abs() const { + if (isNegative()) return -(*this); + return *this; + } + + INLINE ap_private<_AP_W, false> get() const { + ap_private<_AP_W, false> ret(*this); + return ret; + } + + INLINE static uint32_t getBitsNeeded(const char* str, uint32_t slen, + uint8_t radix) { + return _AP_W; + } + + INLINE uint32_t getActiveBits() const { + uint32_t bits = _AP_W - countLeadingZeros(); + return bits ? bits : 1; + } + + INLINE double roundToDouble(bool isSigned = false) const { + return isSigned ? double((int64_t)VAL) : double((uint64_t)VAL); + } + + /*Reverse the contents of ap_private instance. I.e. LSB becomes MSB and vise + * versa*/ + INLINE ap_private& reverse() { + for (int i = 0; i < _AP_W / 2; ++i) { + bool tmp = operator[](i); + if (operator[](_AP_W - 1 - i)) + set(i); + else + clear(i); + if (tmp) + set(_AP_W - 1 - i); + else + clear(_AP_W - 1 - i); + } + clearUnusedBits(); + return *this; + } + + /*Return true if the value of ap_private instance is zero*/ + INLINE bool iszero() const { return isMinValue(); } + + INLINE bool to_bool() const { return !iszero(); } + + /* x < 0 */ + INLINE bool sign() const { + if (isNegative()) return true; + return false; + } + + /* x[i] = !x[i] */ + INLINE void invert(int i) { + assert(i >= 0 && "Attempting to read bit with negative index"); + assert(i < _AP_W && "Attempting to read bit beyond MSB"); + flip(i); + } + + /* x[i] */ + INLINE bool test(int i) const { + assert(i >= 0 && "Attempting to read bit with negative index"); + assert(i < _AP_W && "Attempting to read bit beyond MSB"); + return operator[](i); + } + + // This is used for sc_lv and sc_bv, which is implemented by sc_uint + // Rotate an ap_private object n places to the left + INLINE void lrotate(int n) { + assert(n >= 0 && "Attempting to shift negative index"); + assert(n < _AP_W && "Shift value larger than bit width"); + operator=(shl(n) | lshr(_AP_W - n)); + } + + // This is used for sc_lv and sc_bv, which is implemented by sc_uint + // Rotate an ap_private object n places to the right + INLINE void rrotate(int n) { + assert(n >= 0 && "Attempting to shift negative index"); + assert(n < _AP_W && "Shift value larger than bit width"); + operator=(lshr(n) | shl(_AP_W - n)); + } + + // Set the ith bit into v + INLINE void set(int i, bool v) { + assert(i >= 0 && "Attempting to write bit with negative index"); + assert(i < _AP_W && "Attempting to write bit beyond MSB"); + v ? set(i) : clear(i); + } + + // Set the ith bit into v + INLINE void set_bit(int i, bool v) { + assert(i >= 0 && "Attempting to write bit with negative index"); + assert(i < _AP_W && "Attempting to write bit beyond MSB"); + v ? set(i) : clear(i); + } + + // Get the value of ith bit + INLINE bool get_bit(int i) const { + assert(i >= 0 && "Attempting to read bit with negative index"); + assert(i < _AP_W && "Attempting to read bit beyond MSB"); + return (((1ULL << i) & VAL) != 0); + } + + /// Toggle all bits. + INLINE ap_private& flip() { + VAL = (ValType)((~0ULL ^ VAL) & mask); + clearUnusedBits(); + return *this; + } + + /// Toggles a given bit to its opposite value. + INLINE ap_private& flip(uint32_t bitPosition) { + assert(bitPosition < BitWidth && "Out of the bit-width range!"); + set_bit(bitPosition, !get_bit(bitPosition)); + return *this; + } + + // complements every bit + INLINE void b_not() { flip(); } + +// Binary Arithmetic +//----------------------------------------------------------- +#define OP_BIN_AP(Sym, Rty, Fun) \ + template \ + INLINE typename RType<_AP_W2, _AP_S2>::Rty operator Sym( \ + const ap_private<_AP_W2, _AP_S2>& op) const { \ + typename RType<_AP_W2, _AP_S2>::Rty lhs(*this); \ + typename RType<_AP_W2, _AP_S2>::Rty rhs(op); \ + return lhs.Fun(rhs); \ + } + +/// Bitwise and, or, xor +// OP_BIN_AP(&,logic, And) +// OP_BIN_AP(|,logic, Or) +// OP_BIN_AP(^,logic, Xor) +#undef OP_BIN_AP + + template + INLINE typename RType<_AP_W2, _AP_S2>::div operator/( + const ap_private<_AP_W2, _AP_S2>& op) const { + ap_private _AP_W2 ? _AP_S + : (_AP_W2 > _AP_W ? _AP_S2 : _AP_S || _AP_S2))> + lhs = *this; + ap_private _AP_W2 ? _AP_S + : (_AP_W2 > _AP_W ? _AP_S2 : _AP_S || _AP_S2))> + rhs = op; + return typename RType<_AP_W2, _AP_S2>::div( + (_AP_S || _AP_S2) ? lhs.sdiv(rhs) : lhs.udiv(rhs)); + } + + template + INLINE typename RType<_AP_W2, _AP_S2>::mod operator%( + const ap_private<_AP_W2, _AP_S2>& op) const { + ap_private _AP_W2 ? _AP_S + : (_AP_W2 > _AP_W ? _AP_S2 : _AP_S || _AP_S2))> + lhs = *this; + ap_private _AP_W2 ? _AP_S + : (_AP_W2 > _AP_W ? _AP_S2 : _AP_S || _AP_S2))> + rhs = op; + typename RType<_AP_W2, _AP_S2>::mod res = + typename RType<_AP_W2, _AP_S2>::mod(_AP_S ? lhs.srem(rhs) + : lhs.urem(rhs)); + return res; + } + +#define OP_ASSIGN_AP_2(Sym) \ + template \ + INLINE ap_private<_AP_W, _AP_S>& operator Sym##=( \ + const ap_private<_AP_W2, _AP_S2>& op) { \ + *this = operator Sym(op); \ + return *this; \ + } + + OP_ASSIGN_AP_2(/) + OP_ASSIGN_AP_2(%) +#undef OP_ASSIGN_AP_2 + +/// Bitwise assign: and, or, xor +//------------------------------------------------------------- +// OP_ASSIGN_AP(&) +// OP_ASSIGN_AP(^) +// OP_ASSIGN_AP(|) + +#define OP_LEFT_SHIFT_CTYPE(TYPE, SIGNED) \ + INLINE ap_private operator<<(const TYPE op) const { \ + if (op >= _AP_W) return ap_private(0); \ + if (SIGNED && op < 0) return *this >> (0 - op); \ + return shl(op); \ + } + + // OP_LEFT_SHIFT_CTYPE(bool, false) + OP_LEFT_SHIFT_CTYPE(char, CHAR_IS_SIGNED) + OP_LEFT_SHIFT_CTYPE(signed char, true) + OP_LEFT_SHIFT_CTYPE(unsigned char, false) + OP_LEFT_SHIFT_CTYPE(short, true) + OP_LEFT_SHIFT_CTYPE(unsigned short, false) + OP_LEFT_SHIFT_CTYPE(int, true) + OP_LEFT_SHIFT_CTYPE(unsigned int, false) + OP_LEFT_SHIFT_CTYPE(long, true) + OP_LEFT_SHIFT_CTYPE(unsigned long, false) + OP_LEFT_SHIFT_CTYPE(long long, true) + OP_LEFT_SHIFT_CTYPE(unsigned long long, false) +#if 0 + OP_LEFT_SHIFT_CTYPE(half, false) + OP_LEFT_SHIFT_CTYPE(float, false) + OP_LEFT_SHIFT_CTYPE(double, false) +#endif + +#undef OP_LEFT_SHIFT_CTYPE + + template + INLINE ap_private operator<<(const ap_private<_AP_W2, _AP_S2>& op2) const { + if (_AP_S2 == false) { + uint32_t sh = op2.to_uint(); + return *this << sh; + } else { + int sh = op2.to_int(); + return *this << sh; + } + } + +#define OP_RIGHT_SHIFT_CTYPE(TYPE, SIGNED) \ + INLINE ap_private operator>>(const TYPE op) const { \ + if (op >= _AP_W) { \ + if (isNegative()) \ + return ap_private(-1); \ + else \ + return ap_private(0); \ + } \ + if ((SIGNED) && op < 0) return *this << (0 - op); \ + if (_AP_S) \ + return ashr(op); \ + else \ + return lshr(op); \ + } + + // OP_RIGHT_SHIFT_CTYPE(bool, false) + OP_RIGHT_SHIFT_CTYPE(char, CHAR_IS_SIGNED) + OP_RIGHT_SHIFT_CTYPE(signed char, true) + OP_RIGHT_SHIFT_CTYPE(unsigned char, false) + OP_RIGHT_SHIFT_CTYPE(short, true) + OP_RIGHT_SHIFT_CTYPE(unsigned short, false) + OP_RIGHT_SHIFT_CTYPE(int, true) + OP_RIGHT_SHIFT_CTYPE(unsigned int, false) + OP_RIGHT_SHIFT_CTYPE(long, true) + OP_RIGHT_SHIFT_CTYPE(unsigned long, false) + OP_RIGHT_SHIFT_CTYPE(unsigned long long, false) + OP_RIGHT_SHIFT_CTYPE(long long, true) +#if 0 + OP_RIGHT_SHIFT_CTYPE(half, false) + OP_RIGHT_SHIFT_CTYPE(float, false) + OP_RIGHT_SHIFT_CTYPE(double, false) +#endif + +#undef OP_RIGHT_SHIFT_CTYPE + + template + INLINE ap_private operator>>(const ap_private<_AP_W2, _AP_S2>& op2) const { + if (_AP_S2 == false) { + uint32_t sh = op2.to_uint(); + return *this >> sh; + } else { + int sh = op2.to_int(); + return *this >> sh; + } + } + + /// Shift assign + //----------------------------------------------------------------- + + //INLINE const ap_private& operator<<=(uint32_t shiftAmt) { + // VAL <<= shiftAmt; + // clearUnusedBits(); + // return *this; + //} + +#define OP_ASSIGN_AP(Sym) \ + template \ + INLINE ap_private& operator Sym##=(int op) { \ + *this = operator Sym(op); \ + clearUnusedBits(); \ + return *this; \ + } \ + INLINE ap_private& operator Sym##=(unsigned int op) { \ + *this = operator Sym(op); \ + clearUnusedBits(); \ + return *this; \ + } \ + template \ + INLINE ap_private& operator Sym##=(const ap_private<_AP_W2, _AP_S2>& op) { \ + *this = operator Sym(op); \ + clearUnusedBits(); \ + return *this; \ + } + + OP_ASSIGN_AP(>>) + OP_ASSIGN_AP(<<) +#undef OP_ASSIGN_AP + + /// Comparisons + //----------------------------------------------------------------- + template + INLINE bool operator==(const ap_private<_AP_W1, _AP_S1>& op) const { + enum { _AP_MAX_W = AP_MAX(AP_MAX(_AP_W, _AP_W1), 32) }; + ap_private<_AP_MAX_W, false> lhs(*this); + ap_private<_AP_MAX_W, false> rhs(op); + if (_AP_MAX_W <= 64) { + return (uint64_t)lhs.get_VAL() == (uint64_t)rhs.get_VAL(); + } else + return lhs == rhs; + } + + template + INLINE bool operator!=(const ap_private<_AP_W2, _AP_S2>& op) const { + return !(*this == op); + } + + template + INLINE bool operator>(const ap_private<_AP_W2, _AP_S2>& op) const { + enum { + _AP_MAX_W = AP_MAX(_AP_W + (_AP_S || _AP_S2), _AP_W2 + (_AP_S || _AP_S2)) + }; + ap_private<_AP_MAX_W, _AP_S> lhs(*this); + ap_private<_AP_MAX_W, _AP_S2> rhs(op); + // this will follow gcc rule for comparison + // between different bitwidth and signness + if (_AP_S == _AP_S2) + return _AP_S ? lhs.sgt(rhs) : lhs.ugt(rhs); + else if (_AP_W < 32 && _AP_W2 < 32) + // different signness but both bitwidth is less than 32 + return lhs.sgt(rhs); + else + // different signness but bigger bitwidth + // is greater or equal to 32 + if (_AP_S) + if (_AP_W2 >= _AP_W) + return lhs.ugt(rhs); + else + return lhs.sgt(rhs); + else if (_AP_W >= _AP_W2) + return lhs.ugt(rhs); + else + return lhs.sgt(rhs); + } + + template + INLINE bool operator<=(const ap_private<_AP_W2, _AP_S2>& op) const { + return !(*this > op); + } + + template + INLINE bool operator<(const ap_private<_AP_W2, _AP_S2>& op) const { + enum { + _AP_MAX_W = AP_MAX(_AP_W + (_AP_S || _AP_S2), _AP_W2 + (_AP_S || _AP_S2)) + }; + ap_private<_AP_MAX_W, _AP_S> lhs(*this); + ap_private<_AP_MAX_W, _AP_S2> rhs(op); + if (_AP_S == _AP_S2) + return _AP_S ? lhs.slt(rhs) : lhs.ult(rhs); + else if (_AP_W < 32 && _AP_W2 < 32) + return lhs.slt(rhs); + else if (_AP_S) + if (_AP_W2 >= _AP_W) + return lhs.ult(rhs); + else + return lhs.slt(rhs); + else if (_AP_W >= _AP_W2) + return lhs.ult(rhs); + else + return lhs.slt(rhs); + } + + template + INLINE bool operator>=(const ap_private<_AP_W2, _AP_S2>& op) const { + return !(*this < op); + } + + /// Bit and Part Select + //-------------------------------------------------------------- + // FIXME now _private_range_ref refs to _AP_ROOT_TYPE(struct ssdm_int). + INLINE _private_range_ref<_AP_W, _AP_S> operator()(int Hi, int Lo) { + return _private_range_ref<_AP_W, _AP_S>(this, Hi, Lo); + } + + INLINE _private_range_ref<_AP_W, _AP_S> operator()(int Hi, int Lo) const { + return _private_range_ref<_AP_W, _AP_S>( + const_cast*>(this), Hi, Lo); + } + + INLINE _private_range_ref<_AP_W, _AP_S> range(int Hi, int Lo) const { + return _private_range_ref<_AP_W, _AP_S>( + (const_cast*>(this)), Hi, Lo); + } + + INLINE _private_range_ref<_AP_W, _AP_S> range(int Hi, int Lo) { + return _private_range_ref<_AP_W, _AP_S>(this, Hi, Lo); + } + + INLINE _private_bit_ref<_AP_W, _AP_S> operator[](int index) { + return _private_bit_ref<_AP_W, _AP_S>(*this, index); + } + + template + INLINE _private_bit_ref<_AP_W, _AP_S> operator[]( + const ap_private<_AP_W2, _AP_S2>& index) { + return _private_bit_ref<_AP_W, _AP_S>(*this, index.to_int()); + } + + INLINE const _private_bit_ref<_AP_W, _AP_S> operator[](int index) const { + return _private_bit_ref<_AP_W, _AP_S>( + const_cast&>(*this), index); + } + + template + INLINE const _private_bit_ref<_AP_W, _AP_S> operator[]( + const ap_private<_AP_W2, _AP_S2>& index) const { + return _private_bit_ref<_AP_W, _AP_S>( + const_cast&>(*this), index.to_int()); + } + + INLINE _private_bit_ref<_AP_W, _AP_S> bit(int index) { + return _private_bit_ref<_AP_W, _AP_S>(*this, index); + } + + template + INLINE _private_bit_ref<_AP_W, _AP_S> bit(const ap_private<_AP_W2, _AP_S2>& index) { + return _private_bit_ref<_AP_W, _AP_S>(*this, index.to_int()); + } + + INLINE const _private_bit_ref<_AP_W, _AP_S> bit(int index) const { + return _private_bit_ref<_AP_W, _AP_S>( + const_cast&>(*this), index); + } + + template + INLINE const _private_bit_ref<_AP_W, _AP_S> bit( + const ap_private<_AP_W2, _AP_S2>& index) const { + return _private_bit_ref<_AP_W, _AP_S>( + const_cast&>(*this), index.to_int()); + } + +// template +// INLINE ap_concat_ref<_AP_W, ap_private<_AP_W, _AP_S>, _AP_W2, +// ap_private<_AP_W2, _AP_S2> > +// concat(const ap_private<_AP_W2, _AP_S2>& a2) const { +// return ap_concat_ref<_AP_W, ap_private<_AP_W, _AP_S>, _AP_W2, +// ap_private<_AP_W2, _AP_S2> >( +// const_cast&>(*this), +// const_cast&>(a2)); +// } +// +// template +// INLINE ap_concat_ref<_AP_W, ap_private<_AP_W, _AP_S>, _AP_W2, +// ap_private<_AP_W2, _AP_S2> > +// concat(ap_private<_AP_W2, _AP_S2>& a2) { +// return ap_concat_ref<_AP_W, ap_private<_AP_W, _AP_S>, _AP_W2, +// ap_private<_AP_W2, _AP_S2> >(*this, a2); +// } +// +// template +// INLINE ap_concat_ref<_AP_W, ap_private, _AP_W2, ap_private<_AP_W2, _AP_S2> > +// operator,(const ap_private<_AP_W2, _AP_S2> &a2) const { +// return ap_concat_ref<_AP_W, ap_private, _AP_W2, +// ap_private<_AP_W2, _AP_S2> >( +// const_cast&>(*this), +// const_cast&>(a2)); +// } +// +// template +// INLINE ap_concat_ref<_AP_W, ap_private, _AP_W2, ap_private<_AP_W2, _AP_S2> > +// operator,(const ap_private<_AP_W2, _AP_S2> &a2) { +// return ap_concat_ref<_AP_W, ap_private, _AP_W2, +// ap_private<_AP_W2, _AP_S2> >( +// *this, const_cast&>(a2)); +// } +// +// template +// INLINE ap_concat_ref<_AP_W, ap_private, _AP_W2, ap_private<_AP_W2, _AP_S2> > +// operator,(ap_private<_AP_W2, _AP_S2> &a2) const { +// return ap_concat_ref<_AP_W, ap_private, _AP_W2, +// ap_private<_AP_W2, _AP_S2> >( +// const_cast&>(*this), a2); +// } +// +// template +// INLINE ap_concat_ref<_AP_W, ap_private, _AP_W2, ap_private<_AP_W2, _AP_S2> > +// operator,(ap_private<_AP_W2, _AP_S2> &a2) { +// return ap_concat_ref<_AP_W, ap_private, _AP_W2, +// ap_private<_AP_W2, _AP_S2> >(*this, a2); +// } +// +// template +// INLINE ap_concat_ref<_AP_W, ap_private<_AP_W, _AP_S>, _AP_W2, +// _private_range_ref<_AP_W2, _AP_S2> > +// operator,(const _private_range_ref<_AP_W2, _AP_S2> &a2) const { +// return ap_concat_ref<_AP_W, ap_private<_AP_W, _AP_S>, _AP_W2, +// _private_range_ref<_AP_W2, _AP_S2> >( +// const_cast&>(*this), +// const_cast<_private_range_ref<_AP_W2, _AP_S2>&>(a2)); +// } +// +// template +// INLINE ap_concat_ref<_AP_W, ap_private<_AP_W, _AP_S>, _AP_W2, +// _private_range_ref<_AP_W2, _AP_S2> > +// operator,(_private_range_ref<_AP_W2, _AP_S2> &a2) { +// return ap_concat_ref<_AP_W, ap_private<_AP_W, _AP_S>, _AP_W2, +// _private_range_ref<_AP_W2, _AP_S2> >(*this, a2); +// } +// +// template +// INLINE ap_concat_ref<_AP_W, ap_private<_AP_W, _AP_S>, 1, +// _private_bit_ref<_AP_W2, _AP_S2> > +// operator,(const _private_bit_ref<_AP_W2, _AP_S2> &a2) const { +// return ap_concat_ref<_AP_W, ap_private<_AP_W, _AP_S>, 1, +// _private_bit_ref<_AP_W2, _AP_S2> >( +// const_cast&>(*this), +// const_cast<_private_bit_ref<_AP_W2, _AP_S2>&>(a2)); +// } +// +// template +// INLINE ap_concat_ref<_AP_W, ap_private<_AP_W, _AP_S>, 1, +// _private_bit_ref<_AP_W2, _AP_S2> > +// operator,(_private_bit_ref<_AP_W2, _AP_S2> &a2) { +// return ap_concat_ref<_AP_W, ap_private<_AP_W, _AP_S>, 1, +// _private_bit_ref<_AP_W2, _AP_S2> >(*this, a2); +// } +// +// template +// INLINE ap_concat_ref<_AP_W, ap_private<_AP_W, _AP_S>, _AP_W2 + _AP_W3, +// ap_concat_ref<_AP_W2, _AP_T2, _AP_W3, _AP_T3> > +// operator,(const ap_concat_ref<_AP_W2, _AP_T2, _AP_W3, _AP_T3> &a2) const { +// return ap_concat_ref<_AP_W, ap_private<_AP_W, _AP_S>, _AP_W2 + _AP_W3, +// ap_concat_ref<_AP_W2, _AP_T2, _AP_W3, _AP_T3> >( +// const_cast&>(*this), +// const_cast&>(a2)); +// } +// +// template +// INLINE ap_concat_ref<_AP_W, ap_private<_AP_W, _AP_S>, _AP_W2 + _AP_W3, +// ap_concat_ref<_AP_W2, _AP_T2, _AP_W3, _AP_T3> > +// operator,(ap_concat_ref<_AP_W2, _AP_T2, _AP_W3, _AP_T3> &a2) { +// return ap_concat_ref<_AP_W, ap_private<_AP_W, _AP_S>, _AP_W2 + _AP_W3, +// ap_concat_ref<_AP_W2, _AP_T2, _AP_W3, _AP_T3> >(*this, +// a2); +// } +// +// template +// INLINE ap_concat_ref< +// _AP_W, ap_private, _AP_W2, +// af_range_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2> > +// operator,(const af_range_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2> +// &a2) const { +// return ap_concat_ref< +// _AP_W, ap_private, _AP_W2, +// af_range_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2> >( +// const_cast&>(*this), +// const_cast< +// af_range_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2>&>(a2)); +// } +// +// template +// INLINE ap_concat_ref< +// _AP_W, ap_private, _AP_W2, +// af_range_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2> > +// operator,(af_range_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2> &a2) { +// return ap_concat_ref< +// _AP_W, ap_private, _AP_W2, +// af_range_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2> >(*this, +// a2); +// } +// +// template +// INLINE +// ap_concat_ref<_AP_W, ap_private, 1, +// af_bit_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2> > +// operator,(const af_bit_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2> +// &a2) const { +// return ap_concat_ref< +// _AP_W, ap_private, 1, +// af_bit_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2> >( +// const_cast&>(*this), +// const_cast&>( +// a2)); +// } +// +// template +// INLINE +// ap_concat_ref<_AP_W, ap_private, 1, +// af_bit_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2> > +// operator,( +// af_bit_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2> &a2) { +// return ap_concat_ref< +// _AP_W, ap_private, 1, +// af_bit_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2> >(*this, a2); +// } +// +// template +// INLINE ap_private operator&( +// const ap_concat_ref<_AP_W2, _AP_T2, _AP_W3, _AP_T3>& a2) { +// return *this & a2.get(); +// } +// +// template +// INLINE ap_private operator|( +// const ap_concat_ref<_AP_W2, _AP_T2, _AP_W3, _AP_T3>& a2) { +// return *this | a2.get(); +// } +// +// template +// INLINE ap_private operator^( +// const ap_concat_ref<_AP_W2, _AP_T2, _AP_W3, _AP_T3>& a2) { +// return *this ^ a2.get(); +// } + + // Reduce operation + //----------------------------------------------------------- + INLINE bool and_reduce() const { return (VAL & mask) == mask; } + + INLINE bool nand_reduce() const { return (VAL & mask) != mask; } + + INLINE bool or_reduce() const { return (bool)VAL; } + + INLINE bool nor_reduce() const { return VAL == 0; } + + INLINE bool xor_reduce() const { + unsigned int i = countPopulation(); + return (i % 2) ? true : false; + } + + INLINE bool xnor_reduce() const { + unsigned int i = countPopulation(); + return (i % 2) ? false : true; + } + + INLINE std::string to_string(uint8_t radix = 2, bool sign = false) const { + return toString(radix, radix == 10 ? _AP_S : sign); + } +}; // End of class ap_private <_AP_W, _AP_S, true> + +template +std::string ap_private<_AP_W, _AP_S, true>::toString(uint8_t radix, + bool wantSigned) const { + assert((radix == 10 || radix == 8 || radix == 16 || radix == 2) && + "Radix should be 2, 8, 10, or 16!"); + static const char* digits[] = {"0", "1", "2", "3", "4", "5", "6", "7", + "8", "9", "a", "b", "c", "d", "e", "f"}; + std::string result; + if (radix != 10) { + // For the 2, 8 and 16 bit cases, we can just shift instead of divide + // because the number of bits per digit (1,3 and 4 respectively) divides + // equaly. We just shift until there value is zero. + + // First, check for a zero value and just short circuit the logic below. + if (*this == (uint64_t)(0)) { + // Always generate a radix indicator because fixed-point + // formats require it. + switch (radix) { + case 2: + result = "0b0"; + break; + case 8: + result = "0o0"; + break; + case 16: + result = "0x0"; + break; + default: + assert("invalid radix" && 0); + } + } else { + ap_private<_AP_W, false, true> tmp(*this); + size_t insert_at = 0; + bool leading_zero = true; + if (wantSigned && isNegative()) { + // They want to print the signed version and it is a negative value + // Flip the bits and add one to turn it into the equivalent positive + // value and put a '-' in the result. + tmp.flip(); + tmp++; + result = "-"; + insert_at = 1; + leading_zero = false; + } + switch (radix) { + case 2: + result += "0b"; + break; + case 8: + result += "0o"; + break; + case 16: + result += "0x"; + break; + default: + assert("invalid radix" && 0); + } + insert_at += 2; + + // Just shift tmp right for each digit width until it becomes zero + uint32_t shift = (radix == 16 ? 4 : (radix == 8 ? 3 : 1)); + uint64_t mask = radix - 1; + ap_private<_AP_W, false, true> zero(0); + unsigned bits = 0; + bool msb = false; + while (tmp.ne(zero)) { + unsigned digit = (unsigned)(tmp.get_VAL() & mask); + result.insert(insert_at, digits[digit]); + tmp = tmp.lshr(shift); + bits++; + msb = (digit >> (shift - 1)) == 1; + } + bits *= shift; + if (bits < _AP_W && leading_zero && msb) + result.insert(insert_at, digits[0]); + } + return result; + } + + ap_private<_AP_W, false, true> tmp(*this); + ap_private<6, false, true> divisor(radix); + ap_private<_AP_W, _AP_S, true> zero(0); + size_t insert_at = 0; + if (wantSigned && isNegative()) { + // They want to print the signed version and it is a negative value + // Flip the bits and add one to turn it into the equivalent positive + // value and put a '-' in the result. + tmp.flip(); + tmp++; + result = "-"; + insert_at = 1; + } + if (tmp == ap_private<_AP_W, false, true>(0ULL)) + result = "0"; + else + while (tmp.ne(zero)) { + ap_private<_AP_W, false, true> APdigit = tmp % divisor; + ap_private<_AP_W, false, true> tmp2 = tmp / divisor; + uint32_t digit = (uint32_t)(APdigit.getZExtValue()); + assert(digit < radix && "divide failed"); + result.insert(insert_at, digits[digit]); + tmp = tmp2; + } + return result; + +} // End of ap_private<_AP_W, _AP_S, true>::toString() + +// bitwidth > 64 +template +class ap_private<_AP_W, _AP_S, false> { + // SFINAE pattern. Only consider this class when _AP_W > 64 + const static bool valid = ap_private_enable_if<(_AP_W > 64)>::isValid; + +#ifdef _MSC_VER +#pragma warning(disable : 4521 4522) +#endif + public: + enum { BitWidth = _AP_W, _AP_N = (_AP_W + 63) / 64 }; + static const int width = _AP_W; + + private: + /// This constructor is used only internally for speed of construction of + /// temporaries. It is unsafe for general use so it is not public. + + /* Constructors */ + /// Note that numWords can be smaller or larger than the corresponding bit + /// width but any extraneous bits will be dropped. + /// @param numWords the number of words in bigVal + /// @param bigVal a sequence of words to form the initial value of the + /// ap_private + /// @brief Construct an ap_private, initialized as bigVal[]. + INLINE ap_private(uint32_t numWords, const uint64_t bigVal[]) { + set_canary(); + assert(bigVal && "Null pointer detected!"); + { + // Get memory, cleared to 0 + memset(pVal, 0, _AP_N * sizeof(uint64_t)); + + // Calculate the number of words to copy + uint32_t words = AESL_std::min(numWords, _AP_N); + // Copy the words from bigVal to pVal + memcpy(pVal, bigVal, words * APINT_WORD_SIZE); + if (words >= _AP_W) clearUnusedBits(); + // Make sure unused high bits are cleared + } + check_canary(); + } + + /// This constructor interprets Val as a string in the given radix. The + /// interpretation stops when the first charater that is not suitable for the + /// radix is encountered. Acceptable radix values are 2, 8, 10 and 16. It is + /// an error for the value implied by the string to require more bits than + /// numBits. + /// @param val the string to be interpreted + /// @param radix the radix of Val to use for the intepretation + /// @brief Construct an ap_private from a string representation. + INLINE ap_private(const std::string& val, uint8_t radix = 2) { + set_canary(); + assert(!val.empty() && "The input string is empty."); + const char* c_str = val.c_str(); + fromString(c_str, val.size(), radix); + check_canary(); + } + + /// This constructor interprets the slen characters starting at StrStart as + /// a string in the given radix. The interpretation stops when the first + /// character that is not suitable for the radix is encountered. Acceptable + /// radix values are 2, 8, 10 and 16. It is an error for the value implied by + /// the string to require more bits than numBits. + /// @param strStart the start of the string to be interpreted + /// @param slen the maximum number of characters to interpret + /// @param radix the radix to use for the conversion + /// @brief Construct an ap_private from a string representation. + /// This method does not consider whether it is negative or not. + INLINE ap_private(const char strStart[], uint32_t slen, uint8_t radix) { + set_canary(); + fromString(strStart, slen, radix); + check_canary(); + } + + INLINE void report() { + _AP_ERROR(_AP_W > MAX_MODE(AP_INT_MAX_W) * 1024, + "ap_%sint<%d>: Bitwidth exceeds the " + "default max value %d. Please use macro " + "AP_INT_MAX_W to set a larger max value.", + _AP_S ? "" : "u", _AP_W, MAX_MODE(AP_INT_MAX_W) * 1024); + } + /// This union is used to store the integer value. When the + /// integer bit-width <= 64, it uses VAL, otherwise it uses pVal. + + /// This enum is used to hold the constants we needed for ap_private. + // uint64_t VAL; ///< Used to store the <= 64 bits integer value. + uint64_t pVal[_AP_N]; ///< Used to store the >64 bits integer value. +#ifdef AP_CANARY + uint64_t CANARY; + INLINE void check_canary() { assert(CANARY == (uint64_t)0xDEADBEEFDEADBEEF); } + INLINE void set_canary() { CANARY = (uint64_t)0xDEADBEEFDEADBEEF; } +#else + INLINE void check_canary() {} + INLINE void set_canary() {} +#endif + + public: + typedef typename valtype<8, _AP_S>::Type ValType; + typedef ap_private<_AP_W, _AP_S> Type; + // FIXME remove friend type? + template + friend struct ap_fixed_base; + /// return type of variety of operations + //---------------------------------------------------------- + template + struct RType { + enum { + mult_w = _AP_W + _AP_W2, + mult_s = _AP_S || _AP_S2, + plus_w = + AP_MAX(_AP_W + (_AP_S2 && !_AP_S), _AP_W2 + (_AP_S && !_AP_S2)) + 1, + plus_s = _AP_S || _AP_S2, + minus_w = + AP_MAX(_AP_W + (_AP_S2 && !_AP_S), _AP_W2 + (_AP_S && !_AP_S2)) + 1, + minus_s = true, + div_w = _AP_W + _AP_S2, + div_s = _AP_S || _AP_S2, + mod_w = AP_MIN(_AP_W, _AP_W2 + (!_AP_S2 && _AP_S)), + mod_s = _AP_S, + logic_w = AP_MAX(_AP_W + (_AP_S2 && !_AP_S), _AP_W2 + (_AP_S && !_AP_S2)), + logic_s = _AP_S || _AP_S2 + }; + typedef ap_private mult; + typedef ap_private plus; + typedef ap_private minus; + typedef ap_private logic; + typedef ap_private div; + typedef ap_private mod; + typedef ap_private<_AP_W, _AP_S> arg1; + typedef bool reduce; + }; + + INLINE uint64_t& get_VAL(void) { return pVal[0]; } + INLINE uint64_t get_VAL(void) const { return pVal[0]; } + INLINE uint64_t get_VAL(void) const volatile { return pVal[0]; } + INLINE void set_VAL(uint64_t value) { pVal[0] = value; } + INLINE uint64_t& get_pVal(int index) { return pVal[index]; } + INLINE uint64_t* get_pVal() { return pVal; } + INLINE const uint64_t* get_pVal() const { return pVal; } + INLINE uint64_t get_pVal(int index) const { return pVal[index]; } + INLINE uint64_t* get_pVal() const volatile { return pVal; } + INLINE uint64_t get_pVal(int index) const volatile { return pVal[index]; } + INLINE void set_pVal(int i, uint64_t value) { pVal[i] = value; } + + /// This enum is used to hold the constants we needed for ap_private. + enum { + APINT_BITS_PER_WORD = sizeof(uint64_t) * 8, ///< Bits in a word + APINT_WORD_SIZE = sizeof(uint64_t) ///< Byte size of a word + }; + + enum { + excess_bits = (_AP_W % APINT_BITS_PER_WORD) + ? APINT_BITS_PER_WORD - (_AP_W % APINT_BITS_PER_WORD) + : 0 + }; + static const uint64_t mask = ((uint64_t)~0ULL >> (excess_bits)); + + public: + // NOTE changed to explicit to be consistent with ap_private + explicit INLINE ap_private(const char* val) { + set_canary(); + unsigned char radix = 10; + std::string str = ap_private_ops::parseString(val, radix); // determine radix. + std::string::size_type pos = str.find('.'); + if (pos != std::string::npos) str = str.substr(pos); + ap_private ap_private_val(str, radix); + operator=(ap_private_val); + report(); + check_canary(); + } + + INLINE ap_private(const char* val, unsigned char rd) { + set_canary(); + unsigned char radix = rd; + std::string str = ap_private_ops::parseString(val, radix); // determine radix. + std::string::size_type pos = str.find('.'); + if (pos != std::string::npos) str = str.substr(pos); + ap_private ap_private_val(str, radix); + operator=(ap_private_val); + report(); + + report(); + check_canary(); + } + + template + INLINE ap_private(const _private_range_ref<_AP_W2, _AP_S2>& ref) { + set_canary(); + *this = ref.get(); + report(); + check_canary(); + } + + template + INLINE ap_private(const _private_bit_ref<_AP_W2, _AP_S2>& ref) { + set_canary(); + *this = ((uint64_t)(bool)ref); + report(); + check_canary(); + } + +// template +// INLINE ap_private(const ap_concat_ref<_AP_W2, _AP_T2, _AP_W3, _AP_T3>& ref) { +// set_canary(); +// *this = ref.get(); +// report(); +// check_canary(); +// } +// +// template +// INLINE ap_private( +// const af_range_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2>& val) { +// set_canary(); +// *this = ((val.operator ap_private<_AP_W2, false>())); +// report(); +// check_canary(); +// } +// +// template +// INLINE ap_private( +// const af_bit_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2>& val) { +// set_canary(); +// *this = (uint64_t)(bool)val; +// report(); +// check_canary(); +// } + + /// Simply makes *this a copy of that. + /// @brief Copy Constructor. + INLINE ap_private(const ap_private& that) { + set_canary(); + memcpy(pVal, that.get_pVal(), _AP_N * APINT_WORD_SIZE); + clearUnusedBits(); + check_canary(); + } + + template + INLINE ap_private(const ap_private<_AP_W1, _AP_S1, false>& that) { + set_canary(); + operator=(that); + check_canary(); + } + + template + INLINE ap_private(const volatile ap_private<_AP_W1, _AP_S1, false>& that) { + set_canary(); + operator=(const_cast&>(that)); + check_canary(); + } + + template + INLINE ap_private(const ap_private<_AP_W1, _AP_S1, true>& that) { + set_canary(); + static const uint64_t that_sign_ext_mask = + (_AP_W1 == APINT_BITS_PER_WORD) + ? 0 + : ~0ULL >> (_AP_W1 % APINT_BITS_PER_WORD) + << (_AP_W1 % APINT_BITS_PER_WORD); + if (that.isNegative()) { + pVal[0] = that.get_VAL() | that_sign_ext_mask; + memset(pVal + 1, ~0, sizeof(uint64_t) * (_AP_N - 1)); + } else { + pVal[0] = that.get_VAL(); + memset(pVal + 1, 0, sizeof(uint64_t) * (_AP_N - 1)); + } + clearUnusedBits(); + check_canary(); + } + + template + INLINE ap_private(const volatile ap_private<_AP_W1, _AP_S1, true>& that) { + set_canary(); + operator=(const_cast&>(that)); + check_canary(); + } + + /// @brief Destructor. + // virtual ~ap_private() {} + INLINE ~ap_private() { check_canary(); } + + /// @name Constructors + /// @{ + + /// Default constructor that creates an uninitialized ap_private. This is + /// useful + /// for object deserialization (pair this with the static method Read). + INLINE ap_private() { + set_canary(); + clearUnusedBits(); + check_canary(); + } + + INLINE ap_private(uint64_t* val, uint32_t bits = _AP_W) { assert(0); } + INLINE ap_private(const uint64_t* const val, uint32_t bits) { assert(0); } + +/// If isSigned is true then val is treated as if it were a signed value +/// (i.e. as an int64_t) and the appropriate sign extension to the bit width +/// will be done. Otherwise, no sign extension occurs (high order bits beyond +/// the range of val are zero filled). +/// @param numBits the bit width of the constructed ap_private +/// @param val the initial value of the ap_private +/// @param isSigned how to treat signedness of val +/// @brief Create a new ap_private of numBits width, initialized as val. +#define CTOR(TYPE, SIGNED) \ + INLINE ap_private(TYPE val, bool isSigned = SIGNED) { \ + set_canary(); \ + pVal[0] = (ValType)val; \ + if (isSigned && int64_t(pVal[0]) < 0) { \ + memset(pVal + 1, ~0, sizeof(uint64_t) * (_AP_N - 1)); \ + } else { \ + memset(pVal + 1, 0, sizeof(uint64_t) * (_AP_N - 1)); \ + } \ + clearUnusedBits(); \ + check_canary(); \ + } + + CTOR(bool, false) + CTOR(char, CHAR_IS_SIGNED) + CTOR(signed char, true) + CTOR(unsigned char, false) + CTOR(short, true) + CTOR(unsigned short, false) + CTOR(int, true) + CTOR(unsigned int, false) + CTOR(long, true) + CTOR(unsigned long, false) + CTOR(ap_slong, true) + CTOR(ap_ulong, false) +#if 0 + CTOR(half, false) + CTOR(float, false) + CTOR(double, false) +#endif +#undef CTOR + + /// @returns true if the number of bits <= 64, false otherwise. + /// @brief Determine if this ap_private just has one word to store value. + INLINE bool isSingleWord() const { return false; } + + /// @returns the word position for the specified bit position. + /// @brief Determine which word a bit is in. + static INLINE uint32_t whichWord(uint32_t bitPosition) { + // return bitPosition / APINT_BITS_PER_WORD; + return (bitPosition) >> 6; + } + + /// @returns the bit position in a word for the specified bit position + /// in the ap_private. + /// @brief Determine which bit in a word a bit is in. + static INLINE uint32_t whichBit(uint32_t bitPosition) { + // return bitPosition % APINT_BITS_PER_WORD; + return bitPosition & 0x3f; + } + + /// bit at a specific bit position. This is used to mask the bit in the + /// corresponding word. + /// @returns a uint64_t with only bit at "whichBit(bitPosition)" set + /// @brief Get a single bit mask. + static INLINE uint64_t maskBit(uint32_t bitPosition) { + return 1ULL << (whichBit(bitPosition)); + } + + /// @returns the corresponding word for the specified bit position. + /// @brief Get the word corresponding to a bit position + INLINE uint64_t getWord(uint32_t bitPosition) const { + return pVal[whichWord(bitPosition)]; + } + + /// This method is used internally to clear the to "N" bits in the high order + /// word that are not used by the ap_private. This is needed after the most + /// significant word is assigned a value to ensure that those bits are + /// zero'd out. + /// @brief Clear unused high order bits + INLINE void clearUnusedBits(void) volatile +// just for clang compiler +#if defined(__clang__) && !defined(__CLANG_3_1__) + __attribute__((no_sanitize("undefined"))) +#endif + { + pVal[_AP_N - 1] = + _AP_S ? ((((int64_t)pVal[_AP_N - 1]) << (excess_bits)) >> excess_bits) + : (excess_bits + ? ((pVal[_AP_N - 1]) << (excess_bits)) >> (excess_bits) + : pVal[_AP_N - 1]); + } + + INLINE void clearUnusedBitsToZero(void) { pVal[_AP_N - 1] &= mask; } + + INLINE void clearUnusedBitsToOne(void) { pVal[_AP_N - 1] |= mask; } + + /// This is used by the constructors that take string arguments. + /// @brief Convert a char array into an ap_private + INLINE void fromString(const char* str, uint32_t slen, uint8_t radix) { + enum { numbits = _AP_W }; + bool isNeg = str[0] == '-'; + if (isNeg) { + str++; + slen--; + } + + if (str[0] == '0' && (str[1] == 'b' || str[1] == 'B')) { + //if(radix == 0) radix = 2; + _AP_WARNING(radix != 2, "%s seems to have base %d, but %d given.", str, 2, radix); + str += 2; + slen -=2; + } else if (str[0] == '0' && (str[1] == 'o' || str[1] == 'O')) { + //if (radix == 0) radix = 8; + _AP_WARNING(radix != 8, "%s seems to have base %d, but %d given.", str, 8, radix); + str += 2; + slen -=2; + } else if (str[0] == '0' && (str[1] == 'x' || str[1] == 'X')) { + //if (radix == 0) radix = 16; + _AP_WARNING(radix != 16, "%s seems to have base %d, but %d given.", str, 16, radix); + str += 2; + slen -=2; + } else if (str[0] == '0' && (str[1] == 'd' || str[1] == 'D')) { + //if (radix == 0) radix = 10; + _AP_WARNING(radix != 10, "%s seems to have base %d, but %d given.", str, 10, radix); + str += 2; + slen -=2; + } else if (radix == 0) { + //radix = 2; // XXX default value + } + + // Check our assumptions here + assert((radix == 10 || radix == 8 || radix == 16 || radix == 2) && + "Radix should be 2, 8, 10, or 16!"); + assert(str && "String is null?"); + + // skip any leading zero + while (*str == '0' && *(str + 1) != '\0') { + str++; + slen--; + } + assert((slen <= numbits || radix != 2) && "Insufficient bit width"); + assert(((slen - 1) * 3 <= numbits || radix != 8) && + "Insufficient bit width"); + assert(((slen - 1) * 4 <= numbits || radix != 16) && + "Insufficient bit width"); + assert((((slen - 1) * 64) / 22 <= numbits || radix != 10) && + "Insufficient bit width"); + + // clear bits + memset(pVal, 0, _AP_N * sizeof(uint64_t)); + + // Figure out if we can shift instead of multiply + uint32_t shift = (radix == 16 ? 4 : radix == 8 ? 3 : radix == 2 ? 1 : 0); + + // Set up an ap_private for the digit to add outside the loop so we don't + // constantly construct/destruct it. + uint64_t bigVal[_AP_N]; + memset(bigVal, 0, _AP_N * sizeof(uint64_t)); + ap_private<_AP_W, _AP_S> apdigit(getBitWidth(), bigVal); + ap_private<_AP_W, _AP_S> apradix(radix); + + // Enter digit traversal loop + for (unsigned i = 0; i < slen; i++) { + // Get a digit + uint32_t digit = 0; + char cdigit = str[i]; + if (radix == 16) { +#define isxdigit(c) \ + (((c) >= '0' && (c) <= '9') || ((c) >= 'a' && (c) <= 'f') || \ + ((c) >= 'A' && (c) <= 'F')) +#define isdigit(c) ((c) >= '0' && (c) <= '9') + if (!isxdigit(cdigit)) assert(0 && "Invalid hex digit in string"); + if (isdigit(cdigit)) + digit = cdigit - '0'; + else if (cdigit >= 'a') + digit = cdigit - 'a' + 10; + else if (cdigit >= 'A') + digit = cdigit - 'A' + 10; + else + assert(0 && "huh? we shouldn't get here"); + } else if (isdigit(cdigit)) { + digit = cdigit - '0'; + } else if (cdigit != '\0') { + assert(0 && "Invalid character in digit string"); + } +#undef isxdigit +#undef isdigit + // Shift or multiply the value by the radix + if (shift) + *this <<= shift; + else + *this *= apradix; + + // Add in the digit we just interpreted + apdigit.set_VAL(digit); + *this += apdigit; + } + // If its negative, put it in two's complement form + if (isNeg) { + (*this)--; + this->flip(); + } + clearUnusedBits(); + } + + INLINE ap_private read() volatile { return *this; } + + INLINE void write(const ap_private& op2) volatile { *this = (op2); } + + INLINE operator ValType() const { return get_VAL(); } + + INLINE int to_uchar() const { return (unsigned char)get_VAL(); } + + INLINE int to_char() const { return (signed char)get_VAL(); } + + INLINE int to_ushort() const { return (unsigned short)get_VAL(); } + + INLINE int to_short() const { return (short)get_VAL(); } + + INLINE int to_int() const { return (int)get_VAL(); } + + INLINE unsigned to_uint() const { return (unsigned)get_VAL(); } + + INLINE long to_long() const { return (long)get_VAL(); } + + INLINE unsigned long to_ulong() const { return (unsigned long)get_VAL(); } + + INLINE ap_slong to_int64() const { return (ap_slong)get_VAL(); } + + INLINE ap_ulong to_uint64() const { return (ap_ulong)get_VAL(); } + + INLINE double to_double() const { + if (isNegative()) + return roundToDouble(true); + else + return roundToDouble(false); + } + + INLINE unsigned length() const { return _AP_W; } + + /*Reverse the contents of ap_private instance. I.e. LSB becomes MSB and vise + * versa*/ + INLINE ap_private& reverse() { + for (int i = 0; i < _AP_W / 2; ++i) { + bool tmp = operator[](i); + if (operator[](_AP_W - 1 - i)) + set(i); + else + clear(i); + if (tmp) + set(_AP_W - 1 - i); + else + clear(_AP_W - 1 - i); + } + clearUnusedBits(); + return *this; + } + + /*Return true if the value of ap_private instance is zero*/ + INLINE bool iszero() const { return isMinValue(); } + + INLINE bool to_bool() const { return !iszero(); } + + /* x < 0 */ + INLINE bool sign() const { + if (isNegative()) return true; + return false; + } + + /* x[i] = !x[i] */ + INLINE void invert(int i) { + assert(i >= 0 && "Attempting to read bit with negative index"); + assert(i < _AP_W && "Attempting to read bit beyond MSB"); + flip(i); + } + + /* x[i] */ + INLINE bool test(int i) const { + assert(i >= 0 && "Attempting to read bit with negative index"); + assert(i < _AP_W && "Attempting to read bit beyond MSB"); + return operator[](i); + } + + // Set the ith bit into v + INLINE void set(int i, bool v) { + assert(i >= 0 && "Attempting to write bit with negative index"); + assert(i < _AP_W && "Attempting to write bit beyond MSB"); + v ? set(i) : clear(i); + } + + // Set the ith bit into v + INLINE void set_bit(int i, bool v) { + assert(i >= 0 && "Attempting to write bit with negative index"); + assert(i < _AP_W && "Attempting to write bit beyond MSB"); + v ? set(i) : clear(i); + } + + // FIXME different argument for different action? + INLINE ap_private& set(uint32_t bitPosition) { + pVal[whichWord(bitPosition)] |= maskBit(bitPosition); + clearUnusedBits(); + return *this; + } + + INLINE void set() { + for (int i = 0; i < _AP_N; ++i) pVal[i] = ~0ULL; + clearUnusedBits(); + } + + // Get the value of ith bit + INLINE bool get(int i) const { + assert(i >= 0 && "Attempting to read bit with negative index"); + assert(i < _AP_W && "Attempting to read bit beyond MSB"); + return ((maskBit(i) & (pVal[whichWord(i)])) != 0); + } + + // Get the value of ith bit + INLINE bool get_bit(int i) const { + assert(i >= 0 && "Attempting to read bit with negative index"); + assert(i < _AP_W && "Attempting to read bit beyond MSB"); + return ((maskBit(i) & (pVal[whichWord(i)])) != 0); + } + + // This is used for sc_lv and sc_bv, which is implemented by sc_uint + // Rotate an ap_private object n places to the left + INLINE void lrotate(int n) { + assert(n >= 0 && "Attempting to shift negative index"); + assert(n < _AP_W && "Shift value larger than bit width"); + operator=(shl(n) | lshr(_AP_W - n)); + } + + // This is used for sc_lv and sc_bv, which is implemented by sc_uint + // Rotate an ap_private object n places to the right + INLINE void rrotate(int n) { + assert(n >= 0 && "Attempting to shift negative index"); + assert(n < _AP_W && "Shift value larger than bit width"); + operator=(lshr(n) | shl(_AP_W - n)); + } + + /// Set the given bit to 0 whose position is given as "bitPosition". + /// @brief Set a given bit to 0. + INLINE ap_private& clear(uint32_t bitPosition) { + pVal[whichWord(bitPosition)] &= ~maskBit(bitPosition); + clearUnusedBits(); + return *this; + } + + /// @brief Set every bit to 0. + INLINE void clear() { memset(pVal, 0, _AP_N * APINT_WORD_SIZE); } + + /// @brief Toggle every bit to its opposite value. + ap_private& flip() { + for (int i = 0; i < _AP_N; ++i) pVal[i] ^= ~0ULL; + clearUnusedBits(); + return *this; + } + + /// @brief Toggles a given bit to its opposite value. + INLINE ap_private& flip(uint32_t bitPosition) { + assert(bitPosition < BitWidth && "Out of the bit-width range!"); + set_bit(bitPosition, !get_bit(bitPosition)); + return *this; + } + + // complements every bit + INLINE void b_not() { flip(); } + + INLINE ap_private getLoBits(uint32_t numBits) const { + return ap_private_ops::lshr(ap_private_ops::shl(*this, _AP_W - numBits), + _AP_W - numBits); + } + + INLINE ap_private getHiBits(uint32_t numBits) const { + return ap_private_ops::lshr(*this, _AP_W - numBits); + } + + // Binary Arithmetic + //----------------------------------------------------------- + +// template +// INLINE ap_private operator&( +// const ap_concat_ref<_AP_W2, _AP_T2, _AP_W3, _AP_T3>& a2) { +// return *this & a2.get(); +// } +// +// template +// INLINE ap_private operator|( +// const ap_concat_ref<_AP_W2, _AP_T2, _AP_W3, _AP_T3>& a2) { +// return *this | a2.get(); +// } +// +// template +// INLINE ap_private operator^( +// const ap_concat_ref<_AP_W2, _AP_T2, _AP_W3, _AP_T3>& a2) { +// return *this ^ a2.get(); +// } + +/// Arithmetic assign +//------------------------------------------------------------- + +#define OP_BIN_LOGIC_ASSIGN_AP(Sym) \ + template \ + INLINE ap_private& operator Sym(const ap_private<_AP_W1, _AP_S1>& RHS) { \ + const int _AP_N1 = ap_private<_AP_W1, _AP_S1>::_AP_N; \ + uint32_t numWords = AESL_std::min((int)_AP_N, _AP_N1); \ + uint32_t i; \ + if (_AP_W != _AP_W1) \ + fprintf(stderr, \ + "Warning! Bitsize mismach for ap_[u]int " #Sym " ap_[u]int.\n"); \ + for (i = 0; i < numWords; ++i) pVal[i] Sym RHS.get_pVal(i); \ + if (_AP_N1 < _AP_N) { \ + uint64_t ext = RHS.isNegative() ? ~0ULL : 0; \ + for (; i < _AP_N; i++) pVal[i] Sym ext; \ + } \ + clearUnusedBits(); \ + return *this; \ + } + + OP_BIN_LOGIC_ASSIGN_AP(&=); + OP_BIN_LOGIC_ASSIGN_AP(|=); + OP_BIN_LOGIC_ASSIGN_AP(^=); +#undef OP_BIN_LOGIC_ASSIGN_AP + + /// Adds the RHS APint to this ap_private. + /// @returns this, after addition of RHS. + /// @brief Addition assignment operator. + template + INLINE ap_private& operator+=(const ap_private<_AP_W1, _AP_S1>& RHS) { + const int _AP_N1 = ap_private<_AP_W1, _AP_S1>::_AP_N; + uint64_t RHSpVal[_AP_N1]; + for (int i = 0; i < _AP_N1; ++i) RHSpVal[i] = RHS.get_pVal(i); + ap_private_ops::add(pVal, pVal, RHSpVal, _AP_N, _AP_N, _AP_N1, _AP_S, + _AP_S1); + clearUnusedBits(); + return *this; + } + + template + INLINE ap_private& operator-=(const ap_private<_AP_W1, _AP_S1>& RHS) { + const int _AP_N1 = ap_private<_AP_W1, _AP_S1>::_AP_N; + uint64_t RHSpVal[_AP_N1]; + for (int i = 0; i < _AP_N1; ++i) RHSpVal[i] = RHS.get_pVal(i); + ap_private_ops::sub(pVal, pVal, RHSpVal, _AP_N, _AP_N, _AP_N1, _AP_S, + _AP_S1); + clearUnusedBits(); + return *this; + } + + template + INLINE ap_private& operator*=(const ap_private<_AP_W1, _AP_S1>& RHS) { + // Get some bit facts about LHS and check for zero + uint32_t lhsBits = getActiveBits(); + uint32_t lhsWords = !lhsBits ? 0 : whichWord(lhsBits - 1) + 1; + if (!lhsWords) { + // 0 * X ===> 0 + return *this; + } + + ap_private dupRHS = RHS; + // Get some bit facts about RHS and check for zero + uint32_t rhsBits = dupRHS.getActiveBits(); + uint32_t rhsWords = !rhsBits ? 0 : whichWord(rhsBits - 1) + 1; + if (!rhsWords) { + // X * 0 ===> 0 + clear(); + return *this; + } + + // Allocate space for the result + uint32_t destWords = rhsWords + lhsWords; + uint64_t* dest = (uint64_t*)malloc(destWords * sizeof(uint64_t)); + + // Perform the long multiply + ap_private_ops::mul(dest, pVal, lhsWords, dupRHS.get_pVal(), rhsWords, + destWords); + + // Copy result back into *this + clear(); + uint32_t wordsToCopy = destWords >= _AP_N ? _AP_N : destWords; + + memcpy(pVal, dest, wordsToCopy * APINT_WORD_SIZE); + + uint64_t ext = (isNegative() ^ RHS.isNegative()) ? ~0ULL : 0ULL; + for (int i = wordsToCopy; i < _AP_N; i++) pVal[i] = ext; + clearUnusedBits(); + // delete dest array and return + free(dest); + return *this; + } + +#define OP_ASSIGN_AP(Sym) \ + template \ + INLINE ap_private& operator Sym##=(const ap_private<_AP_W2, _AP_S2>& op) { \ + *this = operator Sym(op); \ + return *this; \ + } + + OP_ASSIGN_AP(/) + OP_ASSIGN_AP(%) +#undef OP_ASSIGN_AP + +#define OP_BIN_LOGIC_AP(Sym) \ + template \ + INLINE typename RType<_AP_W1, _AP_S1>::logic operator Sym( \ + const ap_private<_AP_W1, _AP_S1>& RHS) const { \ + enum { \ + numWords = (RType<_AP_W1, _AP_S1>::logic_w + APINT_BITS_PER_WORD - 1) / \ + APINT_BITS_PER_WORD \ + }; \ + typename RType<_AP_W1, _AP_S1>::logic Result; \ + uint32_t i; \ + const int _AP_N1 = ap_private<_AP_W1, _AP_S1>::_AP_N; \ + uint32_t min_N = std::min((int)_AP_N, _AP_N1); \ + uint32_t max_N = std::max((int)_AP_N, _AP_N1); \ + for (i = 0; i < min_N; ++i) \ + Result.set_pVal(i, pVal[i] Sym RHS.get_pVal(i)); \ + if (numWords > i) { \ + uint64_t ext = ((_AP_N < _AP_N1 && isNegative()) || \ + (_AP_N1 < _AP_N && RHS.isNegative())) \ + ? ~0ULL \ + : 0; \ + if (_AP_N > _AP_N1) \ + for (; i < max_N; i++) Result.set_pVal(i, pVal[i] Sym ext); \ + else \ + for (; i < max_N; i++) Result.set_pVal(i, RHS.get_pVal(i) Sym ext); \ + if (numWords > i) { \ + uint64_t ext2 = ((_AP_N > _AP_N1 && isNegative()) || \ + (_AP_N1 > _AP_N && RHS.isNegative())) \ + ? ~0ULL \ + : 0; \ + Result.set_pVal(i, ext Sym ext2); \ + } \ + } \ + Result.clearUnusedBits(); \ + return Result; \ + } + + OP_BIN_LOGIC_AP(|); + OP_BIN_LOGIC_AP(&); + OP_BIN_LOGIC_AP(^); + +#undef OP_BIN_LOGIC_AP + + template + INLINE typename RType<_AP_W1, _AP_S1>::plus operator+( + const ap_private<_AP_W1, _AP_S1>& RHS) const { + typename RType<_AP_W1, _AP_S1>::plus Result, lhs(*this), rhs(RHS); + const int Result_AP_N = (RType<_AP_W1, _AP_S1>::plus_w + 63) / 64; + ap_private_ops::add(Result.get_pVal(), lhs.get_pVal(), rhs.get_pVal(), + Result_AP_N, Result_AP_N, Result_AP_N, _AP_S, _AP_S1); + Result.clearUnusedBits(); + return Result; + } + + template + INLINE typename RType<_AP_W1, _AP_S1>::minus operator-( + const ap_private<_AP_W1, _AP_S1>& RHS) const { + typename RType<_AP_W1, _AP_S1>::minus Result, lhs(*this), rhs(RHS); + const int Result_AP_N = (RType<_AP_W1, _AP_S1>::minus_w + 63) / 64; + ap_private_ops::sub(Result.get_pVal(), lhs.get_pVal(), rhs.get_pVal(), + Result_AP_N, Result_AP_N, Result_AP_N, _AP_S, _AP_S1); + Result.clearUnusedBits(); + return Result; + } + + template + INLINE typename RType<_AP_W1, _AP_S1>::mult operator*( + const ap_private<_AP_W1, _AP_S1>& RHS) const { + typename RType<_AP_W1, _AP_S1>::mult temp = *this; + temp *= RHS; + return temp; + } + + template + INLINE typename RType<_AP_W2, _AP_S2>::div operator/( + const ap_private<_AP_W2, _AP_S2>& op) const { + ap_private _AP_W2 ? _AP_S + : (_AP_W2 > _AP_W ? _AP_S2 : _AP_S || _AP_S2))> + lhs = *this; + ap_private _AP_W2 ? _AP_S + : (_AP_W2 > _AP_W ? _AP_S2 : _AP_S || _AP_S2))> + rhs = op; + return typename RType<_AP_W2, _AP_S2>::div( + (_AP_S || _AP_S2) ? lhs.sdiv(rhs) : lhs.udiv(rhs)); + } + + template + INLINE typename RType<_AP_W2, _AP_S2>::mod operator%( + const ap_private<_AP_W2, _AP_S2>& op) const { + ap_private _AP_W2 ? _AP_S + : (_AP_W2 > _AP_W ? _AP_S2 : _AP_S || _AP_S2))> + lhs = *this; + ap_private _AP_W2 ? _AP_S + : (_AP_W2 > _AP_W ? _AP_S2 : _AP_S || _AP_S2))> + rhs = op; + typename RType<_AP_W2, _AP_S2>::mod res = + typename RType<_AP_W2, _AP_S2>::mod(_AP_S ? lhs.srem(rhs) + : lhs.urem(rhs)); + return res; + } + +#define OP_LEFT_SHIFT_CTYPE(TYPE, SIGNED) \ + INLINE ap_private operator<<(const TYPE op) const { \ + if (op >= _AP_W) return ap_private(0); \ + if (SIGNED && op < 0) return *this >> (0 - op); \ + return shl(op); \ + } + + OP_LEFT_SHIFT_CTYPE(int, true) + // OP_LEFT_SHIFT_CTYPE(bool, false) + OP_LEFT_SHIFT_CTYPE(signed char, true) + OP_LEFT_SHIFT_CTYPE(unsigned char, false) + OP_LEFT_SHIFT_CTYPE(short, true) + OP_LEFT_SHIFT_CTYPE(unsigned short, false) + OP_LEFT_SHIFT_CTYPE(unsigned int, false) + OP_LEFT_SHIFT_CTYPE(long, true) + OP_LEFT_SHIFT_CTYPE(unsigned long, false) + OP_LEFT_SHIFT_CTYPE(unsigned long long, false) + OP_LEFT_SHIFT_CTYPE(long long, true) +#if 0 + OP_LEFT_SHIFT_CTYPE(half, false) + OP_LEFT_SHIFT_CTYPE(float, false) + OP_LEFT_SHIFT_CTYPE(double, false) +#endif +#undef OP_LEFT_SHIFT_CTYPE + + template + INLINE ap_private operator<<(const ap_private<_AP_W2, _AP_S2>& op2) const { + if (_AP_S2 == false) { + uint32_t sh = op2.to_uint(); + return *this << sh; + } else { + int sh = op2.to_int(); + return *this << sh; + } + } + +#define OP_RIGHT_SHIFT_CTYPE(TYPE, SIGNED) \ + INLINE ap_private operator>>(const TYPE op) const { \ + if (op >= _AP_W) { \ + if (isNegative()) \ + return ap_private(-1); \ + else \ + return ap_private(0); \ + } \ + if ((SIGNED) && op < 0) return *this << (0 - op); \ + if (_AP_S) \ + return ashr(op); \ + else \ + return lshr(op); \ + } + + // OP_RIGHT_SHIFT_CTYPE(bool, false) + OP_RIGHT_SHIFT_CTYPE(char, CHAR_IS_SIGNED) + OP_RIGHT_SHIFT_CTYPE(signed char, true) + OP_RIGHT_SHIFT_CTYPE(unsigned char, false) + OP_RIGHT_SHIFT_CTYPE(short, true) + OP_RIGHT_SHIFT_CTYPE(unsigned short, false) + OP_RIGHT_SHIFT_CTYPE(int, true) + OP_RIGHT_SHIFT_CTYPE(unsigned int, false) + OP_RIGHT_SHIFT_CTYPE(long, true) + OP_RIGHT_SHIFT_CTYPE(unsigned long, false) + OP_RIGHT_SHIFT_CTYPE(unsigned long long, false) + OP_RIGHT_SHIFT_CTYPE(long long, true) +#if 0 + OP_RIGHT_SHIFT_CTYPE(half, false) + OP_RIGHT_SHIFT_CTYPE(float, false) + OP_RIGHT_SHIFT_CTYPE(double, false) +#endif +#undef OP_RIGHT_SHIFT_CTYPE + + template + INLINE ap_private operator>>(const ap_private<_AP_W2, _AP_S2>& op2) const { + if (_AP_S2 == false) { + uint32_t sh = op2.to_uint(); + return *this >> sh; + } else { + int sh = op2.to_int(); + return *this >> sh; + } + } + + /// Shift assign + //------------------------------------------------------------------ + // TODO call clearUnusedBits ? +#define OP_ASSIGN_AP(Sym) \ + template \ + INLINE ap_private& operator Sym##=(int op) { \ + *this = operator Sym(op); \ + return *this; \ + } \ + INLINE ap_private& operator Sym##=(unsigned int op) { \ + *this = operator Sym(op); \ + return *this; \ + } \ + template \ + INLINE ap_private& operator Sym##=(const ap_private<_AP_W2, _AP_S2>& op) { \ + *this = operator Sym(op); \ + return *this; \ + } + OP_ASSIGN_AP(>>) + OP_ASSIGN_AP(<<) +#undef OP_ASSIGN_AP + + /// Comparisons + //----------------------------------------------------------------- + INLINE bool operator==(const ap_private& RHS) const { + // Get some facts about the number of bits used in the two operands. + uint32_t n1 = getActiveBits(); + uint32_t n2 = RHS.getActiveBits(); + + // If the number of bits isn't the same, they aren't equal + if (n1 != n2) return false; + + // If the number of bits fits in a word, we only need to compare the low + // word. + if (n1 <= APINT_BITS_PER_WORD) return pVal[0] == RHS.get_pVal(0); + + // Otherwise, compare everything + for (int i = whichWord(n1 - 1); i >= 0; --i) + if (pVal[i] != RHS.get_pVal(i)) return false; + return true; + } + + template + INLINE bool operator==(const ap_private<_AP_W2, _AP_S2>& op) const { + enum { + _AP_MAX_W = AP_MAX(_AP_W, _AP_W2), + }; + ap_private<_AP_MAX_W, false> lhs(*this); + ap_private<_AP_MAX_W, false> rhs(op); + return lhs == rhs; + } + + INLINE bool operator==(uint64_t Val) const { + uint32_t n = getActiveBits(); + if (n <= APINT_BITS_PER_WORD) + return pVal[0] == Val; + else + return false; + } + + template + INLINE bool operator!=(const ap_private<_AP_W2, _AP_S2>& op) const { + return !(*this == op); + } + + template + INLINE bool operator!=(const ap_private<_AP_W, _AP_S1>& RHS) const { + return !((*this) == RHS); + } + + INLINE bool operator!=(uint64_t Val) const { return !((*this) == Val); } + + template + INLINE bool operator<=(const ap_private<_AP_W2, _AP_S2>& op) const { + return !(*this > op); + } + + INLINE bool operator<(const ap_private& op) const { + return _AP_S ? slt(op) : ult(op); + } + + template + INLINE bool operator<(const ap_private<_AP_W2, _AP_S2>& op) const { + enum { + _AP_MAX_W = AP_MAX(_AP_W + (_AP_S || _AP_S2), _AP_W2 + (_AP_S || _AP_S2)) + }; + ap_private<_AP_MAX_W, _AP_S> lhs(*this); + ap_private<_AP_MAX_W, _AP_S2> rhs(op); + if (_AP_S == _AP_S2) + return _AP_S ? lhs.slt(rhs) : lhs.ult(rhs); + else if (_AP_S) + if (_AP_W2 >= _AP_W) + return lhs.ult(rhs); + else + return lhs.slt(rhs); + else if (_AP_W >= _AP_W2) + return lhs.ult(rhs); + else + return lhs.slt(rhs); + } + + template + INLINE bool operator>=(const ap_private<_AP_W2, _AP_S2>& op) const { + return !(*this < op); + } + + INLINE bool operator>(const ap_private& op) const { + return _AP_S ? sgt(op) : ugt(op); + } + + template + INLINE bool operator>(const ap_private<_AP_W2, _AP_S2>& op) const { + enum { + _AP_MAX_W = AP_MAX(_AP_W + (_AP_S || _AP_S2), _AP_W2 + (_AP_S || _AP_S2)) + }; + ap_private<_AP_MAX_W, _AP_S> lhs(*this); + ap_private<_AP_MAX_W, _AP_S2> rhs(op); + if (_AP_S == _AP_S2) + return _AP_S ? lhs.sgt(rhs) : lhs.ugt(rhs); + else if (_AP_S) + if (_AP_W2 >= _AP_W) + return lhs.ugt(rhs); + else + return lhs.sgt(rhs); + else if (_AP_W >= _AP_W2) + return lhs.ugt(rhs); + else + return lhs.sgt(rhs); + } + + /// Bit and Part Select + //-------------------------------------------------------------- + INLINE _private_range_ref<_AP_W, _AP_S> operator()(int Hi, int Lo) { + return _private_range_ref<_AP_W, _AP_S>(this, Hi, Lo); + } + + INLINE _private_range_ref<_AP_W, _AP_S> operator()(int Hi, int Lo) const { + return _private_range_ref<_AP_W, _AP_S>( + const_cast*>(this), Hi, Lo); + } + + INLINE _private_range_ref<_AP_W, _AP_S> range(int Hi, int Lo) const { + return _private_range_ref<_AP_W, _AP_S>( + (const_cast*>(this)), Hi, Lo); + } + + INLINE _private_range_ref<_AP_W, _AP_S> range(int Hi, int Lo) { + return _private_range_ref<_AP_W, _AP_S>(this, Hi, Lo); + } + + template + INLINE _private_range_ref<_AP_W, _AP_S> range( + const ap_private<_AP_W2, _AP_S2>& HiIdx, + const ap_private<_AP_W3, _AP_S3>& LoIdx) { + int Hi = HiIdx.to_int(); + int Lo = LoIdx.to_int(); + return _private_range_ref<_AP_W, _AP_S>(this, Hi, Lo); + } + + template + INLINE _private_range_ref<_AP_W, _AP_S> operator()( + const ap_private<_AP_W2, _AP_S2>& HiIdx, + const ap_private<_AP_W3, _AP_S3>& LoIdx) { + int Hi = HiIdx.to_int(); + int Lo = LoIdx.to_int(); + return _private_range_ref<_AP_W, _AP_S>(this, Hi, Lo); + } + + template + INLINE _private_range_ref<_AP_W, _AP_S> range( + const ap_private<_AP_W2, _AP_S2>& HiIdx, + const ap_private<_AP_W3, _AP_S3>& LoIdx) const { + int Hi = HiIdx.to_int(); + int Lo = LoIdx.to_int(); + return _private_range_ref<_AP_W, _AP_S>(const_cast(this), Hi, Lo); + } + + template + INLINE _private_range_ref<_AP_W, _AP_S> operator()( + const ap_private<_AP_W2, _AP_S2>& HiIdx, + const ap_private<_AP_W3, _AP_S3>& LoIdx) const { + int Hi = HiIdx.to_int(); + int Lo = LoIdx.to_int(); + return this->range(Hi, Lo); + } + + INLINE _private_bit_ref<_AP_W, _AP_S> operator[](int index) { + return _private_bit_ref<_AP_W, _AP_S>(*this, index); + } + + template + INLINE _private_bit_ref<_AP_W, _AP_S> operator[]( + const ap_private<_AP_W2, _AP_S2>& index) { + return _private_bit_ref<_AP_W, _AP_S>(*this, index.to_int()); + } + + template + INLINE const _private_bit_ref<_AP_W, _AP_S> operator[]( + const ap_private<_AP_W2, _AP_S2>& index) const { + return _private_bit_ref<_AP_W, _AP_S>( + const_cast&>(*this), index.to_int()); + } + + INLINE const _private_bit_ref<_AP_W, _AP_S> operator[](int index) const { + return _private_bit_ref<_AP_W, _AP_S>( + const_cast&>(*this), index); + } + + INLINE _private_bit_ref<_AP_W, _AP_S> bit(int index) { + return _private_bit_ref<_AP_W, _AP_S>(*this, index); + } + + template + INLINE _private_bit_ref<_AP_W, _AP_S> bit(const ap_private<_AP_W2, _AP_S2>& index) { + return _private_bit_ref<_AP_W, _AP_S>(*this, index.to_int()); + } + + INLINE const _private_bit_ref<_AP_W, _AP_S> bit(int index) const { + return _private_bit_ref<_AP_W, _AP_S>( + const_cast&>(*this), index); + } + + template + INLINE const _private_bit_ref<_AP_W, _AP_S> bit( + const ap_private<_AP_W2, _AP_S2>& index) const { + return _private_bit_ref<_AP_W, _AP_S>( + const_cast&>(*this), index.to_int()); + } + +// template +// INLINE ap_concat_ref<_AP_W, ap_private<_AP_W, _AP_S>, _AP_W2, +// ap_private<_AP_W2, _AP_S2> > +// concat(ap_private<_AP_W2, _AP_S2>& a2) { +// return ap_concat_ref<_AP_W, ap_private<_AP_W, _AP_S>, _AP_W2, +// ap_private<_AP_W2, _AP_S2> >(*this, a2); +// } +// +// template +// INLINE ap_concat_ref<_AP_W, ap_private<_AP_W, _AP_S>, _AP_W2, +// ap_private<_AP_W2, _AP_S2> > +// concat(const ap_private<_AP_W2, _AP_S2>& a2) const { +// return ap_concat_ref<_AP_W, ap_private<_AP_W, _AP_S>, _AP_W2, +// ap_private<_AP_W2, _AP_S2> >( +// const_cast&>(*this), +// const_cast&>(a2)); +// } +// +// template +// INLINE ap_concat_ref<_AP_W, ap_private, _AP_W2, ap_private<_AP_W2, _AP_S2> > +// operator,(ap_private<_AP_W2, _AP_S2> &a2) { +// return ap_concat_ref<_AP_W, ap_private, _AP_W2, +// ap_private<_AP_W2, _AP_S2> >(*this, a2); +// } +// +// template +// INLINE ap_concat_ref<_AP_W, ap_private, _AP_W2, ap_private<_AP_W2, _AP_S2> > +// operator,(ap_private<_AP_W2, _AP_S2> &a2) const { +// return ap_concat_ref<_AP_W, ap_private, _AP_W2, +// ap_private<_AP_W2, _AP_S2> >( +// const_cast&>(*this), a2); +// } +// +// template +// INLINE ap_concat_ref<_AP_W, ap_private, _AP_W2, ap_private<_AP_W2, _AP_S2> > +// operator,(const ap_private<_AP_W2, _AP_S2> &a2) { +// return ap_concat_ref<_AP_W, ap_private, _AP_W2, +// ap_private<_AP_W2, _AP_S2> >( +// *this, const_cast&>(a2)); +// } +// +// template +// INLINE ap_concat_ref<_AP_W, ap_private, _AP_W2, ap_private<_AP_W2, _AP_S2> > +// operator,(const ap_private<_AP_W2, _AP_S2> &a2) const { +// return ap_concat_ref<_AP_W, ap_private, _AP_W2, +// ap_private<_AP_W2, _AP_S2> >( +// const_cast&>(*this), +// const_cast&>(a2)); +// } +// +// template +// INLINE ap_concat_ref<_AP_W, ap_private<_AP_W, _AP_S>, _AP_W2, +// _private_range_ref<_AP_W2, _AP_S2> > +// operator,(const _private_range_ref<_AP_W2, _AP_S2> &a2) const { +// return ap_concat_ref<_AP_W, ap_private<_AP_W, _AP_S>, _AP_W2, +// _private_range_ref<_AP_W2, _AP_S2> >( +// const_cast&>(*this), +// const_cast<_private_range_ref<_AP_W2, _AP_S2>&>(a2)); +// } +// +// template +// INLINE ap_concat_ref<_AP_W, ap_private<_AP_W, _AP_S>, _AP_W2, +// _private_range_ref<_AP_W2, _AP_S2> > +// operator,(_private_range_ref<_AP_W2, _AP_S2> &a2) { +// return ap_concat_ref<_AP_W, ap_private<_AP_W, _AP_S>, _AP_W2, +// _private_range_ref<_AP_W2, _AP_S2> >(*this, a2); +// } +// +// template +// INLINE ap_concat_ref<_AP_W, ap_private<_AP_W, _AP_S>, 1, +// _private_bit_ref<_AP_W2, _AP_S2> > +// operator,(const _private_bit_ref<_AP_W2, _AP_S2> &a2) const { +// return ap_concat_ref<_AP_W, ap_private<_AP_W, _AP_S>, 1, +// _private_bit_ref<_AP_W2, _AP_S2> >( +// const_cast&>(*this), +// const_cast<_private_bit_ref<_AP_W2, _AP_S2>&>(a2)); +// } +// +// template +// INLINE ap_concat_ref<_AP_W, ap_private<_AP_W, _AP_S>, 1, +// _private_bit_ref<_AP_W2, _AP_S2> > +// operator,(_private_bit_ref<_AP_W2, _AP_S2> &a2) { +// return ap_concat_ref<_AP_W, ap_private<_AP_W, _AP_S>, 1, +// _private_bit_ref<_AP_W2, _AP_S2> >(*this, a2); +// } +// +// template +// INLINE ap_concat_ref<_AP_W, ap_private<_AP_W, _AP_S>, _AP_W2 + _AP_W3, +// ap_concat_ref<_AP_W2, _AP_T2, _AP_W3, _AP_T3> > +// operator,(const ap_concat_ref<_AP_W2, _AP_T2, _AP_W3, _AP_T3> &a2) const { +// return ap_concat_ref<_AP_W, ap_private<_AP_W, _AP_S>, _AP_W2 + _AP_W3, +// ap_concat_ref<_AP_W2, _AP_T2, _AP_W3, _AP_T3> >( +// const_cast&>(*this), +// const_cast&>(a2)); +// } +// +// template +// INLINE ap_concat_ref<_AP_W, ap_private<_AP_W, _AP_S>, _AP_W2 + _AP_W3, +// ap_concat_ref<_AP_W2, _AP_T2, _AP_W3, _AP_T3> > +// operator,(ap_concat_ref<_AP_W2, _AP_T2, _AP_W3, _AP_T3> &a2) { +// return ap_concat_ref<_AP_W, ap_private<_AP_W, _AP_S>, _AP_W2 + _AP_W3, +// ap_concat_ref<_AP_W2, _AP_T2, _AP_W3, _AP_T3> >(*this, +// a2); +// } +// +// template +// INLINE ap_concat_ref< +// _AP_W, ap_private, _AP_W2, +// af_range_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2> > +// operator,(const af_range_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2> +// &a2) const { +// return ap_concat_ref< +// _AP_W, ap_private, _AP_W2, +// af_range_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2> >( +// const_cast&>(*this), +// const_cast< +// af_range_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2>&>(a2)); +// } +// +// template +// INLINE ap_concat_ref< +// _AP_W, ap_private, _AP_W2, +// af_range_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2> > +// operator,(af_range_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2> &a2) { +// return ap_concat_ref< +// _AP_W, ap_private, _AP_W2, +// af_range_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2> >(*this, +// a2); +// } +// +// template +// INLINE +// ap_concat_ref<_AP_W, ap_private, 1, +// af_bit_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2> > +// operator,(const af_bit_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2> +// &a2) const { +// return ap_concat_ref< +// _AP_W, ap_private, 1, +// af_bit_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2> >( +// const_cast&>(*this), +// const_cast&>( +// a2)); +// } +// +// template +// INLINE +// ap_concat_ref<_AP_W, ap_private, 1, +// af_bit_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2> > +// operator,( +// af_bit_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2> &a2) { +// return ap_concat_ref< +// _AP_W, ap_private, 1, +// af_bit_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2> >(*this, a2); +// } + + INLINE ap_private<_AP_W, false> get() const { + ap_private<_AP_W, false> ret(*this); + return ret; + } + + template + INLINE void set(const ap_private<_AP_W3, false>& val) { + operator=(ap_private<_AP_W3, _AP_S>(val)); + } + + /// + /// @name Value Tests + /// + /// This tests the high bit of this ap_private to determine if it is set. + /// @returns true if this ap_private is negative, false otherwise + /// @brief Determine sign of this ap_private. + INLINE bool isNegative() const { + // just for get rid of warnings + enum { shift = (_AP_W - APINT_BITS_PER_WORD * (_AP_N - 1) - 1) }; + static const uint64_t mask = 1ULL << (shift); + return _AP_S && (pVal[_AP_N - 1] & mask); + } + + /// This tests the high bit of the ap_private to determine if it is unset. + /// @brief Determine if this ap_private Value is positive (not negative). + INLINE bool isPositive() const { return !isNegative(); } + + /// This tests if the value of this ap_private is strictly positive (> 0). + /// @returns true if this ap_private is Positive and not zero. + /// @brief Determine if this ap_private Value is strictly positive. + INLINE bool isStrictlyPositive() const { + return isPositive() && (*this) != 0; + } + + /// This checks to see if the value has all bits of the ap_private are set or + /// not. + /// @brief Determine if all bits are set + INLINE bool isAllOnesValue() const { return countPopulation() == _AP_W; } + + /// This checks to see if the value of this ap_private is the maximum unsigned + /// value for the ap_private's bit width. + /// @brief Determine if this is the largest unsigned value. + INLINE bool isMaxValue() const { return countPopulation() == _AP_W; } + + /// This checks to see if the value of this ap_private is the maximum signed + /// value for the ap_private's bit width. + /// @brief Determine if this is the largest signed value. + INLINE bool isMaxSignedValue() const { + return !isNegative() && countPopulation() == _AP_W - 1; + } + + /// This checks to see if the value of this ap_private is the minimum unsigned + /// value for the ap_private's bit width. + /// @brief Determine if this is the smallest unsigned value. + INLINE bool isMinValue() const { return countPopulation() == 0; } + + /// This checks to see if the value of this ap_private is the minimum signed + /// value for the ap_private's bit width. + /// @brief Determine if this is the smallest signed value. + INLINE bool isMinSignedValue() const { + return isNegative() && countPopulation() == 1; + } + + /// This function returns a pointer to the internal storage of the ap_private. + /// This is useful for writing out the ap_private in binary form without any + /// conversions. + INLINE const uint64_t* getRawData() const { return &pVal[0]; } + + // Square Root - this method computes and returns the square root of "this". + // Three mechanisms are used for computation. For small values (<= 5 bits), + // a table lookup is done. This gets some performance for common cases. For + // values using less than 52 bits, the value is converted to double and then + // the libc sqrt function is called. The result is rounded and then converted + // back to a uint64_t which is then used to construct the result. Finally, + // the Babylonian method for computing square roots is used. + INLINE ap_private sqrt() const { + // Determine the magnitude of the value. + uint32_t magnitude = getActiveBits(); + + // Use a fast table for some small values. This also gets rid of some + // rounding errors in libc sqrt for small values. + if (magnitude <= 5) { + static const uint8_t results[32] = { + /* 0 */ 0, + /* 1- 2 */ 1, 1, + /* 3- 6 */ 2, 2, 2, 2, + /* 7-12 */ 3, 3, 3, 3, 3, 3, + /* 13-20 */ 4, 4, 4, 4, 4, 4, 4, 4, + /* 21-30 */ 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, + /* 31 */ 6}; + return ap_private<_AP_W, _AP_S>(/*BitWidth,*/ results[get_VAL()]); + } + + // If the magnitude of the value fits in less than 52 bits (the precision of + // an IEEE double precision floating point value), then we can use the + // libc sqrt function which will probably use a hardware sqrt computation. + // This should be faster than the algorithm below. + if (magnitude < 52) { +#ifdef _MSC_VER + // Amazingly, VC++ doesn't have round(). + return ap_private<_AP_W, _AP_S>(/*BitWidth,*/ + uint64_t(::sqrt(double(get_VAL()))) + + 0.5); +#else + return ap_private<_AP_W, _AP_S>(/*BitWidth,*/ + uint64_t( + ::round(::sqrt(double(get_VAL()))))); +#endif + } + + // Okay, all the short cuts are exhausted. We must compute it. The following + // is a classical Babylonian method for computing the square root. This code + // was adapted to APINt from a wikipedia article on such computations. + // See http://www.wikipedia.org/ and go to the page named + // Calculate_an_integer_square_root. + uint32_t nbits = BitWidth, i = 4; + ap_private<_AP_W, _AP_S> testy(16); + ap_private<_AP_W, _AP_S> x_old(/*BitWidth,*/ 1); + ap_private<_AP_W, _AP_S> x_new(0); + ap_private<_AP_W, _AP_S> two(/*BitWidth,*/ 2); + + // Select a good starting value using binary logarithms. + for (;; i += 2, testy = testy.shl(2)) + if (i >= nbits || this->ule(testy)) { + x_old = x_old.shl(i / 2); + break; + } + + // Use the Babylonian method to arrive at the integer square root: + for (;;) { + x_new = (this->udiv(x_old) + x_old).udiv(two); + if (x_old.ule(x_new)) break; + x_old = x_new; + } + + // Make sure we return the closest approximation + // NOTE: The rounding calculation below is correct. It will produce an + // off-by-one discrepancy with results from pari/gp. That discrepancy has + // been + // determined to be a rounding issue with pari/gp as it begins to use a + // floating point representation after 192 bits. There are no discrepancies + // between this algorithm and pari/gp for bit widths < 192 bits. + ap_private<_AP_W, _AP_S> square(x_old * x_old); + ap_private<_AP_W, _AP_S> nextSquare((x_old + 1) * (x_old + 1)); + if (this->ult(square)) + return x_old; + else if (this->ule(nextSquare)) { + ap_private<_AP_W, _AP_S> midpoint((nextSquare - square).udiv(two)); + ap_private<_AP_W, _AP_S> offset(*this - square); + if (offset.ult(midpoint)) + return x_old; + else + return x_old + 1; + } else + assert(0 && "Error in ap_private<_AP_W, _AP_S>::sqrt computation"); + return x_old + 1; + } + + /// + /// @Assignment Operators + /// + /// @returns *this after assignment of RHS. + /// @brief Copy assignment operator. + INLINE ap_private& operator=(const ap_private& RHS) { + if (this != &RHS) memcpy(pVal, RHS.get_pVal(), _AP_N * APINT_WORD_SIZE); + clearUnusedBits(); + return *this; + } + INLINE ap_private& operator=(const volatile ap_private& RHS) { + if (this != &RHS) + for (int i = 0; i < _AP_N; ++i) pVal[i] = RHS.get_pVal(i); + clearUnusedBits(); + return *this; + } + INLINE void operator=(const ap_private& RHS) volatile { + if (this != &RHS) + for (int i = 0; i < _AP_N; ++i) pVal[i] = RHS.get_pVal(i); + clearUnusedBits(); + } + INLINE void operator=(const volatile ap_private& RHS) volatile { + if (this != &RHS) + for (int i = 0; i < _AP_N; ++i) pVal[i] = RHS.get_pVal(i); + clearUnusedBits(); + } + + template + INLINE ap_private& operator=(const ap_private<_AP_W1, _AP_S1>& RHS) { + if (_AP_S1) + cpSextOrTrunc(RHS); + else + cpZextOrTrunc(RHS); + clearUnusedBits(); + return *this; + } + + template + INLINE ap_private& operator=(const volatile ap_private<_AP_W1, _AP_S1>& RHS) { + if (_AP_S1) + cpSextOrTrunc(RHS); + else + cpZextOrTrunc(RHS); + clearUnusedBits(); + return *this; + } + + template + INLINE ap_private& operator=(const _private_range_ref<_AP_W2, _AP_S2>& op2) { + *this = ap_private<_AP_W2, false>(op2); + return *this; + } + +#if 0 + template + INLINE ap_private& operator=(const ap_private<_AP_W1, _AP_S1, true>& RHS) { + static const uint64_t that_sign_ext_mask = (_AP_W1==APINT_BITS_PER_WORD)?0:~0ULL>>(_AP_W1%APINT_BITS_PER_WORD)<<(_AP_W1%APINT_BITS_PER_WORD); + if (RHS.isNegative()) { + pVal[0] = RHS.get_VAL() | that_sign_ext_mask; + memset(pVal+1,~0, APINT_WORD_SIZE*(_AP_N-1)); + } else { + pVal[0] = RHS.get_VAL(); + memset(pVal+1, 0, APINT_WORD_SIZE*(_AP_N-1)); + } + clearUnusedBits(); + return *this; + } + + template + INLINE ap_private& operator=(const volatile ap_private<_AP_W1, _AP_S1, true>& RHS) { + static const uint64_t that_sign_ext_mask = (_AP_W1==APINT_BITS_PER_WORD)?0:~0ULL>>(_AP_W1%APINT_BITS_PER_WORD)<<(_AP_W1%APINT_BITS_PER_WORD); + if (RHS.isNegative()) { + pVal[0] = RHS.get_VAL() | that_sign_ext_mask; + memset(pVal+1,~0, APINT_WORD_SIZE*(_AP_N-1)); + } else { + pVal[0] = RHS.get_VAL(); + memset(pVal+1, 0, APINT_WORD_SIZE*(_AP_N-1)); + } + clearUnusedBits(); + return *this; + } +#endif + +/// from all c types. +#define ASSIGN_OP_FROM_INT(C_TYPE, _AP_W2, _AP_S2) \ + INLINE ap_private& operator=(const C_TYPE rhs) { \ + ap_private<(_AP_W2), (_AP_S2)> tmp = rhs; \ + operator=(tmp); \ + return *this; \ + } + + ASSIGN_OP_FROM_INT(bool, 1, false) + ASSIGN_OP_FROM_INT(char, 8, CHAR_IS_SIGNED) + ASSIGN_OP_FROM_INT(signed char, 8, true) + ASSIGN_OP_FROM_INT(unsigned char, 8, false) + ASSIGN_OP_FROM_INT(short, sizeof(short) * 8, true) + ASSIGN_OP_FROM_INT(unsigned short, sizeof(unsigned short) * 8, false) + ASSIGN_OP_FROM_INT(int, sizeof(int) * 8, true) + ASSIGN_OP_FROM_INT(unsigned int, sizeof(unsigned int) * 8, false) + ASSIGN_OP_FROM_INT(long, sizeof(long) * 8, true) + ASSIGN_OP_FROM_INT(unsigned long, sizeof(unsigned long) * 8, false) + ASSIGN_OP_FROM_INT(ap_slong, sizeof(ap_slong) * 8, true) + ASSIGN_OP_FROM_INT(ap_ulong, sizeof(ap_ulong) * 8, false) +#undef ASSIGN_OP_FROM_INT + + /// from c string. + // XXX this is a must, to prevent pointer being converted to bool. + INLINE ap_private& operator=(const char* s) { + ap_private tmp(s); // XXX direct initialization, as ctor is explicit. + operator=(tmp); + return *this; + } + + /// + /// @name Unary Operators + /// + /// @returns a new ap_private value representing *this incremented by one + /// @brief Postfix increment operator. + INLINE const ap_private operator++(int) { + ap_private API(*this); + ++(*this); + return API; + } + + /// @returns *this incremented by one + /// @brief Prefix increment operator. + INLINE ap_private& operator++() { + ap_private_ops::add_1(pVal, pVal, _AP_N, 1); + clearUnusedBits(); + return *this; + } + + /// @returns a new ap_private representing *this decremented by one. + /// @brief Postfix decrement operator. + INLINE const ap_private operator--(int) { + ap_private API(*this); + --(*this); + return API; + } + + /// @returns *this decremented by one. + /// @brief Prefix decrement operator. + INLINE ap_private& operator--() { + ap_private_ops::sub_1(pVal, _AP_N, 1); + clearUnusedBits(); + return *this; + } + + /// Performs a bitwise complement operation on this ap_private. + /// @returns an ap_private that is the bitwise complement of *this + /// @brief Unary bitwise complement operator. + INLINE ap_private<_AP_W + !_AP_S, true> operator~() const { + ap_private<_AP_W + !_AP_S, true> Result(*this); + Result.flip(); + return Result; + } + + /// Negates *this using two's complement logic. + /// @returns An ap_private value representing the negation of *this. + /// @brief Unary negation operator + INLINE typename RType<1, false>::minus operator-() const { + return ap_private<1, false>(0) - (*this); + } + + /// Performs logical negation operation on this ap_private. + /// @returns true if *this is zero, false otherwise. + /// @brief Logical negation operator. + INLINE bool operator!() const { + for (int i = 0; i < _AP_N; ++i) + if (pVal[i]) return false; + return true; + } + + template + INLINE ap_private<_AP_W, _AP_S || _AP_S1> And( + const ap_private<_AP_W, _AP_S1>& RHS) const { + return this->operator&(RHS); + } + template + INLINE ap_private Or(const ap_private<_AP_W, _AP_S1>& RHS) const { + return this->operator|(RHS); + } + template + INLINE ap_private Xor(const ap_private<_AP_W, _AP_S1>& RHS) const { + return this->operator^(RHS); + } + + INLINE ap_private Mul(const ap_private& RHS) const { + ap_private Result(*this); + Result *= RHS; + return Result; + } + + INLINE ap_private Add(const ap_private& RHS) const { + ap_private Result(0); + ap_private_ops::add(Result.get_pVal(), pVal, RHS.get_pVal(), _AP_N, _AP_N, + _AP_N, _AP_S, _AP_S); + Result.clearUnusedBits(); + return Result; + } + + INLINE ap_private Sub(const ap_private& RHS) const { + ap_private Result(0); + ap_private_ops::sub(Result.get_pVal(), pVal, RHS.get_pVal(), _AP_N, _AP_N, + _AP_N, _AP_S, _AP_S); + Result.clearUnusedBits(); + return Result; + } + + /// Arithmetic right-shift this ap_private by shiftAmt. + /// @brief Arithmetic right-shift function. + INLINE ap_private ashr(uint32_t shiftAmt) const { + assert(shiftAmt <= BitWidth && "Invalid shift amount, too big"); + // Handle a degenerate case + if (shiftAmt == 0) return ap_private(*this); + + // If all the bits were shifted out, the result is, technically, undefined. + // We return -1 if it was negative, 0 otherwise. We check this early to + // avoid + // issues in the algorithm below. + if (shiftAmt == BitWidth) { + if (isNegative()) + return ap_private(-1); + else + return ap_private(0); + } + + // Create some space for the result. + ap_private Retval(0); + uint64_t* val = Retval.get_pVal(); + + // Compute some values needed by the following shift algorithms + uint32_t wordShift = + shiftAmt % APINT_BITS_PER_WORD; // bits to shift per word + uint32_t offset = shiftAmt / APINT_BITS_PER_WORD; // word offset for shift + uint32_t breakWord = _AP_N - 1 - offset; // last word affected + uint32_t bitsInWord = whichBit(BitWidth); // how many bits in last word? + if (bitsInWord == 0) bitsInWord = APINT_BITS_PER_WORD; + + // If we are shifting whole words, just move whole words + if (wordShift == 0) { + // Move the words containing significant bits + for (uint32_t i = 0; i <= breakWord; ++i) + val[i] = pVal[i + offset]; // move whole word + + // Adjust the top significant word for sign bit fill, if negative + if (isNegative()) + if (bitsInWord < APINT_BITS_PER_WORD) + val[breakWord] |= ~0ULL << (bitsInWord); // set high bits + } else { + // Shift the low order words + for (uint32_t i = 0; i < breakWord; ++i) { + // This combines the shifted corresponding word with the low bits from + // the next word (shifted into this word's high bits). + val[i] = ((pVal[i + offset]) >> (wordShift)); + val[i] |= ((pVal[i + offset + 1]) << (APINT_BITS_PER_WORD - wordShift)); + } + + // Shift the break word. In this case there are no bits from the next word + // to include in this word. + val[breakWord] = (pVal[breakWord + offset]) >> (wordShift); + + // Deal with sign extenstion in the break word, and possibly the word + // before + // it. + if (isNegative()) { + if (wordShift > bitsInWord) { + if (breakWord > 0) + val[breakWord - 1] |= + ~0ULL << (APINT_BITS_PER_WORD - (wordShift - bitsInWord)); + val[breakWord] |= ~0ULL; + } else + val[breakWord] |= (~0ULL << (bitsInWord - wordShift)); + } + } + + // Remaining words are 0 or -1, just assign them. + uint64_t fillValue = (isNegative() ? ~0ULL : 0); + for (int i = breakWord + 1; i < _AP_N; ++i) val[i] = fillValue; + Retval.clearUnusedBits(); + return Retval; + } + + /// Logical right-shift this ap_private by shiftAmt. + /// @brief Logical right-shift function. + INLINE ap_private lshr(uint32_t shiftAmt) const { + // If all the bits were shifted out, the result is 0. This avoids issues + // with shifting by the size of the integer type, which produces undefined + // results. We define these "undefined results" to always be 0. + if (shiftAmt == BitWidth) return ap_private(0); + + // If none of the bits are shifted out, the result is *this. This avoids + // issues with shifting byt he size of the integer type, which produces + // undefined results in the code below. This is also an optimization. + if (shiftAmt == 0) return ap_private(*this); + + // Create some space for the result. + ap_private Retval(0); + uint64_t* val = Retval.get_pVal(); + + // If we are shifting less than a word, compute the shift with a simple + // carry + if (shiftAmt < APINT_BITS_PER_WORD) { + uint64_t carry = 0; + for (int i = _AP_N - 1; i >= 0; --i) { + val[i] = ((pVal[i]) >> (shiftAmt)) | carry; + carry = (pVal[i]) << (APINT_BITS_PER_WORD - shiftAmt); + } + Retval.clearUnusedBits(); + return Retval; + } + + // Compute some values needed by the remaining shift algorithms + uint32_t wordShift = shiftAmt % APINT_BITS_PER_WORD; + uint32_t offset = shiftAmt / APINT_BITS_PER_WORD; + + // If we are shifting whole words, just move whole words + if (wordShift == 0) { + for (uint32_t i = 0; i < _AP_N - offset; ++i) val[i] = pVal[i + offset]; + for (uint32_t i = _AP_N - offset; i < _AP_N; i++) val[i] = 0; + Retval.clearUnusedBits(); + return Retval; + } + + // Shift the low order words + uint32_t breakWord = _AP_N - offset - 1; + for (uint32_t i = 0; i < breakWord; ++i) + val[i] = ((pVal[i + offset]) >> (wordShift)) | + ((pVal[i + offset + 1]) << (APINT_BITS_PER_WORD - wordShift)); + // Shift the break word. + val[breakWord] = (pVal[breakWord + offset]) >> (wordShift); + + // Remaining words are 0 + for (int i = breakWord + 1; i < _AP_N; ++i) val[i] = 0; + Retval.clearUnusedBits(); + return Retval; + } + + /// Left-shift this ap_private by shiftAmt. + /// @brief Left-shift function. + INLINE ap_private shl(uint32_t shiftAmt) const { + assert(shiftAmt <= BitWidth && "Invalid shift amount, too big"); + // If all the bits were shifted out, the result is 0. This avoids issues + // with shifting by the size of the integer type, which produces undefined + // results. We define these "undefined results" to always be 0. + if (shiftAmt == BitWidth) return ap_private(0); + + // If none of the bits are shifted out, the result is *this. This avoids a + // lshr by the words size in the loop below which can produce incorrect + // results. It also avoids the expensive computation below for a common + // case. + if (shiftAmt == 0) return ap_private(*this); + + // Create some space for the result. + ap_private Retval(0); + uint64_t* val = Retval.get_pVal(); + // If we are shifting less than a word, do it the easy way + if (shiftAmt < APINT_BITS_PER_WORD) { + uint64_t carry = 0; + for (int i = 0; i < _AP_N; i++) { + val[i] = ((pVal[i]) << (shiftAmt)) | carry; + carry = (pVal[i]) >> (APINT_BITS_PER_WORD - shiftAmt); + } + Retval.clearUnusedBits(); + return Retval; + } + + // Compute some values needed by the remaining shift algorithms + uint32_t wordShift = shiftAmt % APINT_BITS_PER_WORD; + uint32_t offset = shiftAmt / APINT_BITS_PER_WORD; + + // If we are shifting whole words, just move whole words + if (wordShift == 0) { + for (uint32_t i = 0; i < offset; i++) val[i] = 0; + for (int i = offset; i < _AP_N; i++) val[i] = pVal[i - offset]; + Retval.clearUnusedBits(); + return Retval; + } + + // Copy whole words from this to Result. + uint32_t i = _AP_N - 1; + for (; i > offset; --i) + val[i] = (pVal[i - offset]) << (wordShift) | + (pVal[i - offset - 1]) >> (APINT_BITS_PER_WORD - wordShift); + val[offset] = (pVal[0]) << (wordShift); + for (i = 0; i < offset; ++i) val[i] = 0; + Retval.clearUnusedBits(); + return Retval; + } + + INLINE ap_private rotl(uint32_t rotateAmt) const { + if (rotateAmt == 0) return ap_private(*this); + // Don't get too fancy, just use existing shift/or facilities + ap_private hi(*this); + ap_private lo(*this); + hi.shl(rotateAmt); + lo.lshr(BitWidth - rotateAmt); + return hi | lo; + } + + INLINE ap_private rotr(uint32_t rotateAmt) const { + if (rotateAmt == 0) return ap_private(*this); + // Don't get too fancy, just use existing shift/or facilities + ap_private hi(*this); + ap_private lo(*this); + lo.lshr(rotateAmt); + hi.shl(BitWidth - rotateAmt); + return hi | lo; + } + + /// Perform an unsigned divide operation on this ap_private by RHS. Both this + /// and + /// RHS are treated as unsigned quantities for purposes of this division. + /// @returns a new ap_private value containing the division result + /// @brief Unsigned division operation. + INLINE ap_private udiv(const ap_private& RHS) const { + // Get some facts about the LHS and RHS number of bits and words + uint32_t rhsBits = RHS.getActiveBits(); + uint32_t rhsWords = !rhsBits ? 0 : (whichWord(rhsBits - 1) + 1); + assert(rhsWords && "Divided by zero???"); + uint32_t lhsBits = this->getActiveBits(); + uint32_t lhsWords = !lhsBits ? 0 : (whichWord(lhsBits - 1) + 1); + + // Deal with some degenerate cases + if (!lhsWords) + // 0 / X ===> 0 + return ap_private(0); + else if (lhsWords < rhsWords || this->ult(RHS)) { + // X / Y ===> 0, iff X < Y + return ap_private(0); + } else if (*this == RHS) { + // X / X ===> 1 + return ap_private(1); + } else if (lhsWords == 1 && rhsWords == 1) { + // All high words are zero, just use native divide + return ap_private(this->pVal[0] / RHS.get_pVal(0)); + } + + // We have to compute it the hard way. Invoke the Knuth divide algorithm. + ap_private Quotient(0); // to hold result. + ap_private_ops::divide(*this, lhsWords, RHS, rhsWords, &Quotient, + (ap_private*)0); + return Quotient; + } + + /// Signed divide this ap_private by ap_private RHS. + /// @brief Signed division function for ap_private. + INLINE ap_private sdiv(const ap_private& RHS) const { + if (isNegative()) + if (RHS.isNegative()) + return (-(*this)).udiv(-RHS); + else + return -((-(*this)).udiv(RHS)); + else if (RHS.isNegative()) + return -(this->udiv((ap_private)(-RHS))); + return this->udiv(RHS); + } + + /// Perform an unsigned remainder operation on this ap_private with RHS being + /// the + /// divisor. Both this and RHS are treated as unsigned quantities for purposes + /// of this operation. Note that this is a true remainder operation and not + /// a modulo operation because the sign follows the sign of the dividend + /// which is *this. + /// @returns a new ap_private value containing the remainder result + /// @brief Unsigned remainder operation. + INLINE ap_private urem(const ap_private& RHS) const { + // Get some facts about the LHS + uint32_t lhsBits = getActiveBits(); + uint32_t lhsWords = !lhsBits ? 0 : (whichWord(lhsBits - 1) + 1); + + // Get some facts about the RHS + uint32_t rhsBits = RHS.getActiveBits(); + uint32_t rhsWords = !rhsBits ? 0 : (whichWord(rhsBits - 1) + 1); + assert(rhsWords && "Performing remainder operation by zero ???"); + + // Check the degenerate cases + if (lhsWords == 0) { + // 0 % Y ===> 0 + return ap_private(0); + } else if (lhsWords < rhsWords || this->ult(RHS)) { + // X % Y ===> X, iff X < Y + return *this; + } else if (*this == RHS) { + // X % X == 0; + return ap_private(0); + } else if (lhsWords == 1) { + // All high words are zero, just use native remainder + return ap_private(pVal[0] % RHS.get_pVal(0)); + } + + // We have to compute it the hard way. Invoke the Knuth divide algorithm. + ap_private Remainder(0); + ap_private_ops::divide(*this, lhsWords, RHS, rhsWords, (ap_private*)(0), + &Remainder); + return Remainder; + } + + INLINE ap_private urem(uint64_t RHS) const { + // Get some facts about the LHS + uint32_t lhsBits = getActiveBits(); + uint32_t lhsWords = !lhsBits ? 0 : (whichWord(lhsBits - 1) + 1); + // Get some facts about the RHS + uint32_t rhsWords = 1; //! rhsBits ? 0 : (ap_private<_AP_W, + //! _AP_S>::whichWord(rhsBits - 1) + 1); + assert(rhsWords && "Performing remainder operation by zero ???"); + // Check the degenerate cases + if (lhsWords == 0) { + // 0 % Y ===> 0 + return ap_private(0); + } else if (lhsWords < rhsWords || this->ult(RHS)) { + // X % Y ===> X, iff X < Y + return *this; + } else if (*this == RHS) { + // X % X == 0; + return ap_private(0); + } else if (lhsWords == 1) { + // All high words are zero, just use native remainder + return ap_private(pVal[0] % RHS); + } + + // We have to compute it the hard way. Invoke the Knuth divide algorithm. + ap_private Remainder(0); + divide(*this, lhsWords, RHS, (ap_private*)(0), &Remainder); + return Remainder; + } + + /// Signed remainder operation on ap_private. + /// @brief Function for signed remainder operation. + INLINE ap_private srem(const ap_private& RHS) const { + if (isNegative()) { + ap_private lhs = -(*this); + if (RHS.isNegative()) { + ap_private rhs = -RHS; + return -(lhs.urem(rhs)); + } else + return -(lhs.urem(RHS)); + } else if (RHS.isNegative()) { + ap_private rhs = -RHS; + return this->urem(rhs); + } + return this->urem(RHS); + } + + /// Signed remainder operation on ap_private. + /// @brief Function for signed remainder operation. + INLINE ap_private srem(int64_t RHS) const { + if (isNegative()) + if (RHS < 0) + return -((-(*this)).urem(-RHS)); + else + return -((-(*this)).urem(RHS)); + else if (RHS < 0) + return this->urem(-RHS); + return this->urem(RHS); + } + + /// Compares this ap_private with RHS for the validity of the equality + /// relationship. + /// @returns true if *this == Val + /// @brief Equality comparison. + template + INLINE bool eq(const ap_private<_AP_W, _AP_S1>& RHS) const { + return (*this) == RHS; + } + + /// Compares this ap_private with RHS for the validity of the inequality + /// relationship. + /// @returns true if *this != Val + /// @brief Inequality comparison + template + INLINE bool ne(const ap_private<_AP_W, _AP_S1>& RHS) const { + return !((*this) == RHS); + } + + /// Regards both *this and RHS as unsigned quantities and compares them for + /// the validity of the less-than relationship. + /// @returns true if *this < RHS when both are considered unsigned. + /// @brief Unsigned less than comparison + template + INLINE bool ult(const ap_private<_AP_W, _AP_S1>& RHS) const { + // Get active bit length of both operands + uint32_t n1 = getActiveBits(); + uint32_t n2 = RHS.getActiveBits(); + + // If magnitude of LHS is less than RHS, return true. + if (n1 < n2) return true; + + // If magnitude of RHS is greather than LHS, return false. + if (n2 < n1) return false; + + // If they bot fit in a word, just compare the low order word + if (n1 <= APINT_BITS_PER_WORD && n2 <= APINT_BITS_PER_WORD) + return pVal[0] < RHS.get_pVal(0); + + // Otherwise, compare all words + uint32_t topWord = whichWord(AESL_std::max(n1, n2) - 1); + for (int i = topWord; i >= 0; --i) { + if (pVal[i] > RHS.get_pVal(i)) return false; + if (pVal[i] < RHS.get_pVal(i)) return true; + } + return false; + } + + INLINE bool ult(uint64_t RHS) const { + // Get active bit length of both operands + uint32_t n1 = getActiveBits(); + uint32_t n2 = + 64 - ap_private_ops::CountLeadingZeros_64(RHS); // RHS.getActiveBits(); + + // If magnitude of LHS is less than RHS, return true. + if (n1 < n2) return true; + + // If magnitude of RHS is greather than LHS, return false. + if (n2 < n1) return false; + + // If they bot fit in a word, just compare the low order word + if (n1 <= APINT_BITS_PER_WORD && n2 <= APINT_BITS_PER_WORD) + return pVal[0] < RHS; + assert(0); + } + + template + INLINE bool slt(const ap_private<_AP_W, _AP_S1>& RHS) const { + ap_private lhs(*this); + ap_private<_AP_W, _AP_S1> rhs(RHS); + bool lhsNeg = isNegative(); + bool rhsNeg = rhs.isNegative(); + if (lhsNeg) { + // Sign bit is set so perform two's complement to make it positive + lhs.flip(); + lhs++; + } + if (rhsNeg) { + // Sign bit is set so perform two's complement to make it positive + rhs.flip(); + rhs++; + } + + // Now we have unsigned values to compare so do the comparison if necessary + // based on the negativeness of the values. + if (lhsNeg) + if (rhsNeg) + return lhs.ugt(rhs); + else + return true; + else if (rhsNeg) + return false; + else + return lhs.ult(rhs); + } + + /// Regards both *this and RHS as unsigned quantities and compares them for + /// validity of the less-or-equal relationship. + /// @returns true if *this <= RHS when both are considered unsigned. + /// @brief Unsigned less or equal comparison + template + INLINE bool ule(const ap_private<_AP_W, _AP_S1>& RHS) const { + return ult(RHS) || eq(RHS); + } + + /// Regards both *this and RHS as signed quantities and compares them for + /// validity of the less-or-equal relationship. + /// @returns true if *this <= RHS when both are considered signed. + /// @brief Signed less or equal comparison + template + INLINE bool sle(const ap_private<_AP_W, _AP_S1>& RHS) const { + return slt(RHS) || eq(RHS); + } + + /// Regards both *this and RHS as unsigned quantities and compares them for + /// the validity of the greater-than relationship. + /// @returns true if *this > RHS when both are considered unsigned. + /// @brief Unsigned greather than comparison + template + INLINE bool ugt(const ap_private<_AP_W, _AP_S1>& RHS) const { + return !ult(RHS) && !eq(RHS); + } + + /// Regards both *this and RHS as signed quantities and compares them for + /// the validity of the greater-than relationship. + /// @returns true if *this > RHS when both are considered signed. + /// @brief Signed greather than comparison + template + INLINE bool sgt(const ap_private<_AP_W, _AP_S1>& RHS) const { + return !slt(RHS) && !eq(RHS); + } + + /// Regards both *this and RHS as unsigned quantities and compares them for + /// validity of the greater-or-equal relationship. + /// @returns true if *this >= RHS when both are considered unsigned. + /// @brief Unsigned greater or equal comparison + template + INLINE bool uge(const ap_private<_AP_W, _AP_S>& RHS) const { + return !ult(RHS); + } + + /// Regards both *this and RHS as signed quantities and compares them for + /// validity of the greater-or-equal relationship. + /// @returns true if *this >= RHS when both are considered signed. + /// @brief Signed greather or equal comparison + template + INLINE bool sge(const ap_private<_AP_W, _AP_S1>& RHS) const { + return !slt(RHS); + } + + // Sign extend to a new width. + template + INLINE void cpSext(const ap_private<_AP_W1, _AP_S1>& that) { + assert(_AP_W1 < BitWidth && "Invalid ap_private SignExtend request"); + assert(_AP_W1 <= MAX_INT_BITS && "Too many bits"); + // If the sign bit isn't set, this is the same as zext. + if (!that.isNegative()) { + cpZext(that); + return; + } + + // The sign bit is set. First, get some facts + enum { wordBits = _AP_W1 % APINT_BITS_PER_WORD }; + const int _AP_N1 = ap_private<_AP_W1, _AP_S1>::_AP_N; + // Mask the high order word appropriately + if (_AP_N1 == _AP_N) { + enum { newWordBits = _AP_W % APINT_BITS_PER_WORD }; + // The extension is contained to the wordsBefore-1th word. + static const uint64_t mask = wordBits ? (~0ULL << (wordBits)) : 0ULL; + for (int i = 0; i < _AP_N; ++i) pVal[i] = that.get_pVal(i); + pVal[_AP_N - 1] |= mask; + return; + } + + enum { newWordBits = _AP_W % APINT_BITS_PER_WORD }; + // The extension is contained to the wordsBefore-1th word. + static const uint64_t mask = wordBits ? (~0ULL << (wordBits)) : 0ULL; + int i; + for (i = 0; i < _AP_N1; ++i) pVal[i] = that.get_pVal(i); + pVal[i - 1] |= mask; + for (; i < _AP_N - 1; i++) pVal[i] = ~0ULL; + pVal[i] = ~0ULL; + clearUnusedBits(); + return; + } + + // Zero extend to a new width. + template + INLINE void cpZext(const ap_private<_AP_W1, _AP_S1>& that) { + assert(_AP_W1 < BitWidth && "Invalid ap_private ZeroExtend request"); + assert(_AP_W1 <= MAX_INT_BITS && "Too many bits"); + const int _AP_N1 = ap_private<_AP_W1, _AP_S1>::_AP_N; + int i = 0; + for (; i < _AP_N1; ++i) pVal[i] = that.get_pVal(i); + for (; i < _AP_N; ++i) pVal[i] = 0; + clearUnusedBits(); + } + + template + INLINE void cpZextOrTrunc(const ap_private<_AP_W1, _AP_S1>& that) { + if (BitWidth > _AP_W1) + cpZext(that); + else { + for (int i = 0; i < _AP_N; ++i) pVal[i] = that.get_pVal(i); + clearUnusedBits(); + } + } + + template + INLINE void cpSextOrTrunc(const ap_private<_AP_W1, _AP_S1>& that) { + if (BitWidth > _AP_W1) + cpSext(that); + else { + for (int i = 0; i < _AP_N; ++i) pVal[i] = that.get_pVal(i); + clearUnusedBits(); + } + } + + /// @} + /// @name Value Characterization Functions + /// @{ + + /// @returns the total number of bits. + INLINE uint32_t getBitWidth() const { return BitWidth; } + + /// Here one word's bitwidth equals to that of uint64_t. + /// @returns the number of words to hold the integer value of this ap_private. + /// @brief Get the number of words. + INLINE uint32_t getNumWords() const { + return (BitWidth + APINT_BITS_PER_WORD - 1) / APINT_BITS_PER_WORD; + } + + /// This function returns the number of active bits which is defined as the + /// bit width minus the number of leading zeros. This is used in several + /// computations to see how "wide" the value is. + /// @brief Compute the number of active bits in the value + INLINE uint32_t getActiveBits() const { + uint32_t bits = BitWidth - countLeadingZeros(); + return bits ? bits : 1; + } + + /// This method attempts to return the value of this ap_private as a zero + /// extended + /// uint64_t. The bitwidth must be <= 64 or the value must fit within a + /// uint64_t. Otherwise an assertion will result. + /// @brief Get zero extended value + INLINE uint64_t getZExtValue() const { + assert(getActiveBits() <= 64 && "Too many bits for uint64_t"); + return *pVal; + } + + /// This method attempts to return the value of this ap_private as a sign + /// extended + /// int64_t. The bit width must be <= 64 or the value must fit within an + /// int64_t. Otherwise an assertion will result. + /// @brief Get sign extended value + INLINE int64_t getSExtValue() const { + assert(getActiveBits() <= 64 && "Too many bits for int64_t"); + return int64_t(pVal[0]); + } + + /// This method determines how many bits are required to hold the ap_private + /// equivalent of the string given by \p str of length \p slen. + /// @brief Get bits required for string value. + INLINE static uint32_t getBitsNeeded(const char* str, uint32_t slen, + uint8_t radix) { + assert(str != 0 && "Invalid value string"); + assert(slen > 0 && "Invalid string length"); + + // Each computation below needs to know if its negative + uint32_t isNegative = str[0] == '-'; + if (isNegative) { + slen--; + str++; + } + // For radixes of power-of-two values, the bits required is accurately and + // easily computed + if (radix == 2) return slen + isNegative; + if (radix == 8) return slen * 3 + isNegative; + if (radix == 16) return slen * 4 + isNegative; + + // Otherwise it must be radix == 10, the hard case + assert(radix == 10 && "Invalid radix"); + + // Convert to the actual binary value. + // ap_private<_AP_W, _AP_S> tmp(sufficient, str, slen, radix); + + // Compute how many bits are required. + // return isNegative + tmp.logBase2() + 1; + return isNegative + slen * 4; + } + + /// countLeadingZeros - This function is an ap_private version of the + /// countLeadingZeros_{32,64} functions in MathExtras.h. It counts the number + /// of zeros from the most significant bit to the first one bit. + /// @returns BitWidth if the value is zero. + /// @returns the number of zeros from the most significant bit to the first + /// one bits. + INLINE uint32_t countLeadingZeros() const { + enum { + msw_bits = (BitWidth % APINT_BITS_PER_WORD) + ? (BitWidth % APINT_BITS_PER_WORD) + : APINT_BITS_PER_WORD, + excessBits = APINT_BITS_PER_WORD - msw_bits + }; + uint32_t Count = ap_private_ops::CountLeadingZeros_64(pVal[_AP_N - 1]); + if (Count >= excessBits) Count -= excessBits; + if (!pVal[_AP_N - 1]) { + for (int i = _AP_N - 1; i; --i) { + if (!pVal[i - 1]) + Count += APINT_BITS_PER_WORD; + else { + Count += ap_private_ops::CountLeadingZeros_64(pVal[i - 1]); + break; + } + } + } + return Count; + } + + /// countLeadingOnes - This function counts the number of contiguous 1 bits + /// in the high order bits. The count stops when the first 0 bit is reached. + /// @returns 0 if the high order bit is not set + /// @returns the number of 1 bits from the most significant to the least + /// @brief Count the number of leading one bits. + INLINE uint32_t countLeadingOnes() const { + if (isSingleWord()) + return countLeadingOnes_64(get_VAL(), APINT_BITS_PER_WORD - BitWidth); + + uint32_t highWordBits = BitWidth % APINT_BITS_PER_WORD; + uint32_t shift = + (highWordBits == 0 ? 0 : APINT_BITS_PER_WORD - highWordBits); + int i = _AP_N - 1; + uint32_t Count = countLeadingOnes_64(get_pVal(i), shift); + if (Count == highWordBits) { + for (i--; i >= 0; --i) { + if (get_pVal(i) == ~0ULL) + Count += APINT_BITS_PER_WORD; + else { + Count += countLeadingOnes_64(get_pVal(i), 0); + break; + } + } + } + return Count; + } + + /// countTrailingZeros - This function is an ap_private version of the + /// countTrailingZoers_{32,64} functions in MathExtras.h. It counts + /// the number of zeros from the least significant bit to the first set bit. + /// @returns BitWidth if the value is zero. + /// @returns the number of zeros from the least significant bit to the first + /// one bit. + /// @brief Count the number of trailing zero bits. + INLINE uint32_t countTrailingZeros() const { + uint32_t Count = 0; + uint32_t i = 0; + for (; i < _AP_N && get_pVal(i) == 0; ++i) Count += APINT_BITS_PER_WORD; + if (i < _AP_N) Count += ap_private_ops::CountTrailingZeros_64(get_pVal(i)); + return AESL_std::min(Count, BitWidth); + } + /// countPopulation - This function is an ap_private version of the + /// countPopulation_{32,64} functions in MathExtras.h. It counts the number + /// of 1 bits in the ap_private value. + /// @returns 0 if the value is zero. + /// @returns the number of set bits. + /// @brief Count the number of bits set. + INLINE uint32_t countPopulation() const { + uint32_t Count = 0; + for (int i = 0; i < _AP_N - 1; ++i) + Count += ap_private_ops::CountPopulation_64(pVal[i]); + Count += ap_private_ops::CountPopulation_64(pVal[_AP_N - 1] & mask); + return Count; + } + + /// @} + /// @name Conversion Functions + /// @ + + /// This is used internally to convert an ap_private to a string. + /// @brief Converts an ap_private to a std::string + INLINE std::string toString(uint8_t radix, bool wantSigned) const; + + /// Considers the ap_private to be unsigned and converts it into a string in + /// the + /// radix given. The radix can be 2, 8, 10 or 16. + /// @returns a character interpretation of the ap_private + /// @brief Convert unsigned ap_private to string representation. + INLINE std::string toStringUnsigned(uint8_t radix = 10) const { + return toString(radix, false); + } + + /// Considers the ap_private to be unsigned and converts it into a string in + /// the + /// radix given. The radix can be 2, 8, 10 or 16. + /// @returns a character interpretation of the ap_private + /// @brief Convert unsigned ap_private to string representation. + INLINE std::string toStringSigned(uint8_t radix = 10) const { + return toString(radix, true); + } + + /// @brief Converts this ap_private to a double value. + INLINE double roundToDouble(bool isSigned) const { + // Handle the simple case where the value is contained in one uint64_t. + if (isSingleWord() || getActiveBits() <= APINT_BITS_PER_WORD) { + uint64_t val = pVal[0]; + if (isSigned) { + int64_t sext = ((int64_t(val)) << (64 - BitWidth)) >> (64 - BitWidth); + return double(sext); + } else + return double(val); + } + + // Determine if the value is negative. + bool isNeg = isSigned ? (*this)[BitWidth - 1] : false; + + // Construct the absolute value if we're negative. + ap_private<_AP_W, _AP_S> Tmp(isNeg ? -(*this) : (*this)); + + // Figure out how many bits we're using. + uint32_t n = Tmp.getActiveBits(); + + // The exponent (without bias normalization) is just the number of bits + // we are using. Note that the sign bit is gone since we constructed the + // absolute value. + uint64_t exp = n; + + // Return infinity for exponent overflow + if (exp > 1023) { + if (!isSigned || !isNeg) + return std::numeric_limits::infinity(); + else + return -std::numeric_limits::infinity(); + } + exp += 1023; // Increment for 1023 bias + + // Number of bits in mantissa is 52. To obtain the mantissa value, we must + // extract the high 52 bits from the correct words in pVal. + uint64_t mantissa; + unsigned hiWord = whichWord(n - 1); + if (hiWord == 0) { + mantissa = Tmp.get_pVal(0); + if (n > 52) + (mantissa) >>= (n - 52); // shift down, we want the top 52 bits. + } else { + assert(hiWord > 0 && "High word is negative?"); + uint64_t hibits = (Tmp.get_pVal(hiWord)) + << (52 - n % APINT_BITS_PER_WORD); + uint64_t lobits = + (Tmp.get_pVal(hiWord - 1)) >> (11 + n % APINT_BITS_PER_WORD); + mantissa = hibits | lobits; + } + + // The leading bit of mantissa is implicit, so get rid of it. + uint64_t sign = isNeg ? (1ULL << (APINT_BITS_PER_WORD - 1)) : 0; + union { + double __D; + uint64_t __I; + } __T; + __T.__I = sign | ((exp) << 52) | mantissa; + return __T.__D; + } + + /// @brief Converts this unsigned ap_private to a double value. + INLINE double roundToDouble() const { return roundToDouble(false); } + + /// @brief Converts this signed ap_private to a double value. + INLINE double signedRoundToDouble() const { return roundToDouble(true); } + + /// The conversion does not do a translation from integer to double, it just + /// re-interprets the bits as a double. Note that it is valid to do this on + /// any bit width. Exactly 64 bits will be translated. + /// @brief Converts ap_private bits to a double + INLINE double bitsToDouble() const { + union { + uint64_t __I; + double __D; + } __T; + __T.__I = pVal[0]; + return __T.__D; + } + + /// The conversion does not do a translation from integer to float, it just + /// re-interprets the bits as a float. Note that it is valid to do this on + /// any bit width. Exactly 32 bits will be translated. + /// @brief Converts ap_private bits to a double + INLINE float bitsToFloat() const { + union { + uint32_t __I; + float __F; + } __T; + __T.__I = uint32_t(pVal[0]); + return __T.__F; + } + + /// The conversion does not do a translation from double to integer, it just + /// re-interprets the bits of the double. Note that it is valid to do this on + /// any bit width but bits from V may get truncated. + /// @brief Converts a double to ap_private bits. + INLINE ap_private& doubleToBits(double __V) { + union { + uint64_t __I; + double __D; + } __T; + __T.__D = __V; + pVal[0] = __T.__I; + return *this; + } + + /// The conversion does not do a translation from float to integer, it just + /// re-interprets the bits of the float. Note that it is valid to do this on + /// any bit width but bits from V may get truncated. + /// @brief Converts a float to ap_private bits. + INLINE ap_private& floatToBits(float __V) { + union { + uint32_t __I; + float __F; + } __T; + __T.__F = __V; + pVal[0] = __T.__I; + } + + // Reduce operation + //----------------------------------------------------------- + INLINE bool and_reduce() const { return isMaxValue(); } + + INLINE bool nand_reduce() const { return isMinValue(); } + + INLINE bool or_reduce() const { return (bool)countPopulation(); } + + INLINE bool nor_reduce() const { return countPopulation() == 0; } + + INLINE bool xor_reduce() const { + unsigned int i = countPopulation(); + return (i % 2) ? true : false; + } + + INLINE bool xnor_reduce() const { + unsigned int i = countPopulation(); + return (i % 2) ? false : true; + } + INLINE std::string to_string(uint8_t radix = 16, bool sign = false) const { + return toString(radix, radix == 10 ? _AP_S : sign); + } +}; // End of class ap_private <_AP_W, _AP_S, false> + +namespace ap_private_ops { + +enum { APINT_BITS_PER_WORD = 64 }; +template +INLINE bool operator==(uint64_t V1, const ap_private<_AP_W, _AP_S>& V2) { + return V2 == V1; +} + +template +INLINE bool operator!=(uint64_t V1, const ap_private<_AP_W, _AP_S>& V2) { + return V2 != V1; +} + +template +INLINE bool get(const ap_private<_AP_W, _AP_S>& a) { + static const uint64_t mask = 1ULL << (index & 0x3f); + return ((mask & a.get_pVal((index) >> 6)) != 0); +} + +template +INLINE void set(ap_private<_AP_W, _AP_S>& a, + const ap_private& mark1 = 0, + const ap_private& mark2 = 0) { + enum { + APINT_BITS_PER_WORD = 64, + lsb_word = lsb_index / APINT_BITS_PER_WORD, + msb_word = msb_index / APINT_BITS_PER_WORD, + msb = msb_index % APINT_BITS_PER_WORD, + lsb = lsb_index % APINT_BITS_PER_WORD + }; + if (msb_word == lsb_word) { + const uint64_t mask = ~0ULL >> + (lsb) << (APINT_BITS_PER_WORD - msb + lsb - 1) >> + (APINT_BITS_PER_WORD - msb - 1); + // a.set_pVal(msb_word, a.get_pVal(msb_word) | mask); + a.get_pVal(msb_word) |= mask; + } else { + const uint64_t lsb_mask = ~0ULL >> (lsb) << (lsb); + const uint64_t msb_mask = ~0ULL << (APINT_BITS_PER_WORD - msb - 1) >> + (APINT_BITS_PER_WORD - msb - 1); + // a.set_pVal(lsb_word, a.get_pVal(lsb_word) | lsb_mask); + a.get_pVal(lsb_word) |= lsb_mask; + for (int i = lsb_word + 1; i < msb_word; i++) { + a.set_pVal(i, ~0ULL); + // a.get_pVal(i)=0; + } + // a.set_pVal(msb_word, a.get_pVal(msb_word) | msb_mask); + + a.get_pVal(msb_word) |= msb_mask; + } + a.clearUnusedBits(); +} + +template +INLINE void clear(ap_private<_AP_W, _AP_S>& a, + const ap_private& mark1 = 0, + const ap_private& mark2 = 0) { + enum { + APINT_BITS_PER_WORD = 64, + lsb_word = lsb_index / APINT_BITS_PER_WORD, + msb_word = msb_index / APINT_BITS_PER_WORD, + msb = msb_index % APINT_BITS_PER_WORD, + lsb = lsb_index % APINT_BITS_PER_WORD + }; + if (msb_word == lsb_word) { + const uint64_t mask = + ~(~0ULL >> (lsb) << (APINT_BITS_PER_WORD - msb + lsb - 1) >> + (APINT_BITS_PER_WORD - msb - 1)); + // a.set_pVal(msb_word, a.get_pVal(msb_word) & mask); + a.get_pVal(msb_word) &= mask; + } else { + const uint64_t lsb_mask = ~(~0ULL >> (lsb) << (lsb)); + const uint64_t msb_mask = ~(~0ULL << (APINT_BITS_PER_WORD - msb - 1) >> + (APINT_BITS_PER_WORD - msb - 1)); + // a.set_pVal(lsb_word, a.get_pVal(lsb_word) & lsb_mask); + a.get_pVal(lsb_word) &= lsb_mask; + for (int i = lsb_word + 1; i < msb_word; i++) { + // a.set_pVal(i, 0); + a.get_pVal(i) = 0; + } + // a.set_pVal(msb_word, a.get_pVal(msb_word) & msb_mask); + a.get_pVal(msb_word) &= msb_mask; + } + a.clearUnusedBits(); +} + +template +INLINE void set(ap_private<_AP_W, _AP_S>& a, + const ap_private& mark = 0) { + enum { APINT_BITS_PER_WORD = 64, word = index / APINT_BITS_PER_WORD }; + static const uint64_t mask = 1ULL << (index % APINT_BITS_PER_WORD); + // a.set_pVal(word, a.get_pVal(word) | mask); + a.get_pVal(word) |= mask; + a.clearUnusedBits(); +} + +template +INLINE void clear(ap_private<_AP_W, _AP_S>& a, + const ap_private& mark = 0) { + enum { APINT_BITS_PER_WORD = 64, word = index / APINT_BITS_PER_WORD }; + static const uint64_t mask = ~(1ULL << (index % APINT_BITS_PER_WORD)); + // a.set_pVal(word, a.get_pVal(word) & mask); + a.get_pVal(word) &= mask; + a.clearUnusedBits(); +} + +} // End of ap_private_ops namespace + +template +INLINE std::string ap_private<_AP_W, _AP_S, false>::toString( + uint8_t radix, bool wantSigned) const { + assert((radix == 10 || radix == 8 || radix == 16 || radix == 2) && + "Radix should be 2, 8, 10, or 16!"); + static const char* digits[] = {"0", "1", "2", "3", "4", "5", "6", "7", + "8", "9", "A", "B", "C", "D", "E", "F"}; + std::string result; + + if (radix != 10) { + // For the 2, 8 and 16 bit cases, we can just shift instead of divide + // because the number of bits per digit (1,3 and 4 respectively) divides + // equaly. We just shift until there value is zero. + + // First, check for a zero value and just short circuit the logic below. + if (*this == (uint64_t)(0)) + result = "0"; + else { + ap_private<_AP_W, false> tmp(*this); + size_t insert_at = 0; + bool leading_zero = true; + if (wantSigned && isNegative()) { + // They want to print the signed version and it is a negative value + // Flip the bits and add one to turn it into the equivalent positive + // value and put a '-' in the result. + tmp.flip(); + tmp++; + tmp.clearUnusedBitsToZero(); + result = "-"; + insert_at = 1; + leading_zero = false; + } + switch (radix) { + case 2: + result += "0b"; + break; + case 8: + result += "0o"; + break; + case 16: + result += "0x"; + break; + default: + assert("invalid radix" && 0); + } + insert_at += 2; + // Just shift tmp right for each digit width until it becomes zero + uint32_t shift = (radix == 16 ? 4 : (radix == 8 ? 3 : 1)); + uint64_t mask = radix - 1; + ap_private<_AP_W, false> zero(0); + unsigned bits = 0; + while (tmp.ne(zero)) { + uint64_t digit = tmp.get_VAL() & mask; + result.insert(insert_at, digits[digit]); + tmp = tmp.lshr(shift); + ++bits; + } + bits *= shift; + if (bits < _AP_W && leading_zero) result.insert(insert_at, digits[0]); + } + return result; + } + + ap_private<_AP_W, false> tmp(*this); + ap_private<_AP_W, false> divisor(radix); + ap_private<_AP_W, false> zero(0); + size_t insert_at = 0; + if (wantSigned && isNegative()) { + // They want to print the signed version and it is a negative value + // Flip the bits and add one to turn it into the equivalent positive + // value and put a '-' in the result. + tmp.flip(); + tmp++; + tmp.clearUnusedBitsToZero(); + result = "-"; + insert_at = 1; + } + if (tmp == ap_private<_AP_W, false>(0)) + result = "0"; + else + while (tmp.ne(zero)) { + ap_private<_AP_W, false> APdigit(0); + ap_private<_AP_W, false> tmp2(0); + ap_private_ops::divide(tmp, tmp.getNumWords(), divisor, + divisor.getNumWords(), &tmp2, &APdigit); + uint64_t digit = APdigit.getZExtValue(); + assert(digit < radix && "divide failed"); + result.insert(insert_at, digits[digit]); + tmp = tmp2; + } + + return result; +} // End of ap_private<_AP_W, _AP_S, false>::toString() + +template +std::ostream &operator<<(std::ostream &os, const ap_private<_AP_W, _AP_S> &x) { + std::ios_base::fmtflags ff = std::cout.flags(); + if (ff & std::cout.hex) { + os << x.toString(16, false); // don't print sign + } else if (ff & std::cout.oct) { + os << x.toString(8, false); // don't print sign + } else { + os << x.toString(10, _AP_S); + } + return os; +} + +// ------------------------------------------------------------ // +// XXX moved here from ap_int_sim.h XXX // +// ------------------------------------------------------------ // + +/// Concatination reference. +/// Proxy class which allows concatination to be used as rvalue(for reading) and +/// lvalue(for writing) +// ---------------------------------------------------------------- +// template +// struct ap_concat_ref { +//#ifdef _MSC_VER +//#pragma warning(disable : 4521 4522) +//#endif +// enum { +// _AP_WR = _AP_W1 + _AP_W2, +// }; +// _AP_T1& mbv1; +// _AP_T2& mbv2; +// +// INLINE ap_concat_ref(const ap_concat_ref<_AP_W1, _AP_T1, _AP_W2, _AP_T2>& +// ref) +// : mbv1(ref.mbv1), mbv2(ref.mbv2) {} +// +// INLINE ap_concat_ref(_AP_T1& bv1, _AP_T2& bv2) : mbv1(bv1), mbv2(bv2) {} +// +// template +// INLINE ap_concat_ref& operator=(const ap_private<_AP_W3, _AP_S3>& val) { +// ap_private<_AP_W1 + _AP_W2, false> vval(val); +// int W_ref1 = mbv1.length(); +// int W_ref2 = mbv2.length(); +// ap_private<_AP_W1, false> mask1(-1); +// mask1 >>= _AP_W1 - W_ref1; +// ap_private<_AP_W2, false> mask2(-1); +// mask2 >>= _AP_W2 - W_ref2; +// mbv1.set(ap_private<_AP_W1, false>((vval >> W_ref2) & mask1)); +// mbv2.set(ap_private<_AP_W2, false>(vval & mask2)); +// return *this; +// } +// +// INLINE ap_concat_ref& operator=(unsigned long long val) { +// ap_private<_AP_W1 + _AP_W2, false> tmpVal(val); +// return operator=(tmpVal); +// } +// +// template +// INLINE ap_concat_ref& operator=( +// const ap_concat_ref<_AP_W3, _AP_T3, _AP_W4, _AP_T4>& val) { +// ap_private<_AP_W1 + _AP_W2, false> tmpVal(val); +// return operator=(tmpVal); +// } +// +// INLINE ap_concat_ref& operator=( +// const ap_concat_ref<_AP_W1, _AP_T1, _AP_W2, _AP_T2>& val) { +// ap_private<_AP_W1 + _AP_W2, false> tmpVal(val); +// return operator=(tmpVal); +// } +// +// template +// INLINE ap_concat_ref& operator=(const _private_bit_ref<_AP_W3, _AP_S3>& +// val) { +// ap_private<_AP_W1 + _AP_W2, false> tmpVal(val); +// return operator=(tmpVal); +// } +// +// template +// INLINE ap_concat_ref& operator=(const _private_range_ref<_AP_W3, _AP_S3>& +// val) { +// ap_private<_AP_W1 + _AP_W2, false> tmpVal(val); +// return operator=(tmpVal); +// } +// +// template +// INLINE ap_concat_ref& operator=( +// const af_range_ref<_AP_W3, _AP_I3, _AP_S3, _AP_Q3, _AP_O3, _AP_N3>& val) +// { +// return operator=((const ap_private<_AP_W3, false>)(val)); +// } +// +// template +// INLINE ap_concat_ref& operator=( +// const ap_fixed_base<_AP_W3, _AP_I3, _AP_S3, _AP_Q3, _AP_O3, _AP_N3>& +// val) { +// return operator=(val.to_ap_private()); +// } +// +// template +// INLINE ap_concat_ref& operator=( +// const af_bit_ref<_AP_W3, _AP_I3, _AP_S3, _AP_Q3, _AP_O3, _AP_N3>& val) { +// return operator=((unsigned long long)(bool)(val)); +// } +// +// INLINE operator ap_private<_AP_WR, false>() const { return get(); } +// +// INLINE operator unsigned long long() const { return get().to_uint64(); } +// +// template +// INLINE ap_concat_ref<_AP_WR, ap_concat_ref, _AP_W3, +// _private_range_ref<_AP_W3, _AP_S3> > +// operator,(const _private_range_ref<_AP_W3, _AP_S3> &a2) { +// return ap_concat_ref<_AP_WR, ap_concat_ref, _AP_W3, +// _private_range_ref<_AP_W3, _AP_S3> >( +// *this, const_cast<_private_range_ref<_AP_W3, _AP_S3>&>(a2)); +// } +// +// template +// INLINE +// ap_concat_ref<_AP_WR, ap_concat_ref, _AP_W3, ap_private<_AP_W3, _AP_S3> +// > +// operator,(ap_private<_AP_W3, _AP_S3> &a2) { +// return ap_concat_ref<_AP_WR, ap_concat_ref, _AP_W3, +// ap_private<_AP_W3, _AP_S3> >(*this, a2); +// } +// +// template +// INLINE +// ap_concat_ref<_AP_WR, ap_concat_ref, _AP_W3, ap_private<_AP_W3, _AP_S3> +// > +// operator,(const ap_private<_AP_W3, _AP_S3> &a2) { +// return ap_concat_ref<_AP_WR, ap_concat_ref, _AP_W3, +// ap_private<_AP_W3, _AP_S3> >( +// *this, const_cast&>(a2)); +// } +// +// template +// INLINE ap_concat_ref<_AP_WR, ap_concat_ref, 1, _private_bit_ref<_AP_W3, +// _AP_S3> > +// operator,(const _private_bit_ref<_AP_W3, _AP_S3> &a2) { +// return ap_concat_ref<_AP_WR, ap_concat_ref, 1, _private_bit_ref<_AP_W3, +// _AP_S3> >( +// *this, const_cast<_private_bit_ref<_AP_W3, _AP_S3>&>(a2)); +// } +// +// template +// INLINE ap_concat_ref<_AP_WR, ap_concat_ref, _AP_W3 + _AP_W4, +// ap_concat_ref<_AP_W3, _AP_T3, _AP_W4, _AP_T4> > +// operator,(const ap_concat_ref<_AP_W3, _AP_T3, _AP_W4, _AP_T4> &a2) { +// return ap_concat_ref<_AP_WR, ap_concat_ref, _AP_W3 + _AP_W4, +// ap_concat_ref<_AP_W3, _AP_T3, _AP_W4, _AP_T4> >( +// *this, const_cast&>(a2)); +// } +// +// template +// INLINE ap_concat_ref< +// _AP_WR, ap_concat_ref, _AP_W3, +// af_range_ref<_AP_W3, _AP_I3, _AP_S3, _AP_Q3, _AP_O3, _AP_N3> > +// operator,( +// const af_range_ref<_AP_W3, _AP_I3, _AP_S3, _AP_Q3, _AP_O3, _AP_N3> &a2) +// { +// return ap_concat_ref< +// _AP_WR, ap_concat_ref, _AP_W3, +// af_range_ref<_AP_W3, _AP_I3, _AP_S3, _AP_Q3, _AP_O3, _AP_N3> >( +// *this, +// const_cast< +// af_range_ref<_AP_W3, _AP_I3, _AP_S3, _AP_Q3, _AP_O3, +// _AP_N3>&>(a2)); +// } +// +// template +// INLINE +// ap_concat_ref<_AP_WR, ap_concat_ref, 1, +// af_bit_ref<_AP_W3, _AP_I3, _AP_S3, _AP_Q3, _AP_O3, _AP_N3> +// > +// operator,(const af_bit_ref<_AP_W3, _AP_I3, _AP_S3, _AP_Q3, _AP_O3, +// _AP_N3> +// &a2) { +// return ap_concat_ref< +// _AP_WR, ap_concat_ref, 1, +// af_bit_ref<_AP_W3, _AP_I3, _AP_S3, _AP_Q3, _AP_O3, _AP_N3> >( +// *this, +// const_cast&>( +// a2)); +// } +// +// template +// INLINE ap_private operator&( +// const ap_private<_AP_W3, _AP_S3>& a2) { +// return get() & a2; +// } +// +// template +// INLINE ap_private operator|( +// const ap_private<_AP_W3, _AP_S3>& a2) { +// return get() | a2; +// } +// +// template +// INLINE ap_private operator^( +// const ap_private<_AP_W3, _AP_S3>& a2) { +// return ap_private(get() ^ a2); +// } +// +// INLINE const ap_private<_AP_WR, false> get() const { +// ap_private<_AP_W1 + _AP_W2, false> tmpVal = +// ap_private<_AP_W1 + _AP_W2, false>(mbv1.get()); +// ap_private<_AP_W1 + _AP_W2, false> tmpVal2 = +// ap_private<_AP_W1 + _AP_W2, false>(mbv2.get()); +// int W_ref2 = mbv2.length(); +// tmpVal <<= W_ref2; +// tmpVal |= tmpVal2; +// return tmpVal; +// } +// +// INLINE const ap_private<_AP_WR, false> get() { +// ap_private<_AP_W1 + _AP_W2, false> tmpVal = +// ap_private<_AP_W1 + _AP_W2, false>(mbv1.get()); +// ap_private<_AP_W1 + _AP_W2, false> tmpVal2 = +// ap_private<_AP_W1 + _AP_W2, false>(mbv2.get()); +// int W_ref2 = mbv2.length(); +// tmpVal <<= W_ref2; +// tmpVal |= tmpVal2; +// return tmpVal; +// } +// +// template +// INLINE void set(const ap_private<_AP_W3, false>& val) { +// ap_private<_AP_W1 + _AP_W2, false> vval(val); +// int W_ref1 = mbv1.length(); +// int W_ref2 = mbv2.length(); +// ap_private<_AP_W1, false> mask1(-1); +// mask1 >>= _AP_W1 - W_ref1; +// ap_private<_AP_W2, false> mask2(-1); +// mask2 >>= _AP_W2 - W_ref2; +// mbv1.set(ap_private<_AP_W1, false>((vval >> W_ref2) & mask1)); +// mbv2.set(ap_private<_AP_W2, false>(vval & mask2)); +// } +// +// INLINE int length() const { return mbv1.length() + mbv2.length(); } +// +// INLINE std::string to_string(uint8_t radix = 2) const { +// return get().to_string(radix); +// } +//}; // struct ap_concat_ref. + +/// Range(slice) reference +/// Proxy class, which allows part selection to be used as rvalue(for reading) +/// and lvalue(for writing) +//------------------------------------------------------------ +template +struct _private_range_ref { +#ifdef _MSC_VER +#pragma warning(disable : 4521 4522) +#endif + ap_private<_AP_W, _AP_S>& d_bv; + int l_index; + int h_index; + + public: + /// copy ctor. + INLINE _private_range_ref(const _private_range_ref<_AP_W, _AP_S>& ref) + : d_bv(ref.d_bv), l_index(ref.l_index), h_index(ref.h_index) {} + + /// direct ctor. + INLINE _private_range_ref(ap_private<_AP_W, _AP_S>* bv, int h, int l) + : d_bv(*bv), l_index(l), h_index(h) { + _AP_WARNING(h < 0 || l < 0, + "Higher bound (%d) and lower bound (%d) cannot be " + "negative.", + h, l); + _AP_WARNING(h >= _AP_W || l >= _AP_W, + "Higher bound (%d) or lower bound (%d) out of range (%d).", h, l, + _AP_W); + } + + /// compound or assignment. + template + INLINE _private_range_ref<_AP_W, _AP_S>& operator|=( + const _private_range_ref<_AP_W2, _AP_S2>& ref) { + _AP_WARNING((h_index - l_index) != (ref.h_index - ref.l_index), + "Bitsize mismach for ap_private<>.range() &= " + "ap_private<>.range()."); + this->d_bv |= ref.d_bv; + return *this; + } + + /// compound or assignment with root type. + template + INLINE _private_range_ref<_AP_W, _AP_S>& operator|=( + const _AP_ROOT_TYPE<_AP_W2, _AP_S2>& ref) { + _AP_WARNING((h_index - l_index + 1) != _AP_W2, + "Bitsize mismach for ap_private<>.range() |= _AP_ROOT_TYPE<>."); + this->d_bv |= ref.V; + return *this; + } + + /// compound and assignment. + template + INLINE _private_range_ref<_AP_W, _AP_S>& operator&=( + const _private_range_ref<_AP_W2, _AP_S2>& ref) { + _AP_WARNING((h_index - l_index) != (ref.h_index - ref.l_index), + "Bitsize mismach for ap_private<>.range() &= " + "ap_private<>.range()."); + this->d_bv &= ref.d_bv; + return *this; + }; + + /// compound and assignment with root type. + template + INLINE _private_range_ref<_AP_W, _AP_S>& operator&=( + const _AP_ROOT_TYPE<_AP_W2, _AP_S2>& ref) { + _AP_WARNING((h_index - l_index + 1) != _AP_W2, + "Bitsize mismach for ap_private<>.range() &= _AP_ROOT_TYPE<>."); + this->d_bv &= ref.V; + return *this; + } + + /// compound xor assignment. + template + INLINE _private_range_ref<_AP_W, _AP_S>& operator^=( + const _private_range_ref<_AP_W2, _AP_S2>& ref) { + _AP_WARNING((h_index - l_index) != (ref.h_index - ref.l_index), + "Bitsize mismach for ap_private<>.range() ^= " + "ap_private<>.range()."); + this->d_bv ^= ref.d_bv; + return *this; + }; + + /// compound xor assignment with root type. + template + INLINE _private_range_ref<_AP_W, _AP_S>& operator^=( + const _AP_ROOT_TYPE<_AP_W2, _AP_S2>& ref) { + _AP_WARNING((h_index - l_index + 1) != _AP_W2, + "Bitsize mismach for ap_private<>.range() ^= _AP_ROOT_TYPE<>."); + this->d_bv ^= ref.V; + return *this; + } + + /// @name convertors. + // @{ + INLINE operator ap_private<_AP_W, false>() const { + ap_private<_AP_W, false> val(0); + if (h_index >= l_index) { + if (_AP_W > 64) { + val = d_bv; + ap_private<_AP_W, false> mask(-1); + mask >>= _AP_W - (h_index - l_index + 1); + val >>= l_index; + val &= mask; + } else { + const static uint64_t mask = (~0ULL >> (64 > _AP_W ? (64 - _AP_W) : 0)); + val = (d_bv >> l_index) & (mask >> (_AP_W - (h_index - l_index + 1))); + } + } else { + for (int i = 0, j = l_index; j >= 0 && j >= h_index; j--, i++) + if ((d_bv)[j]) val.set(i); + } + return val; + } + + INLINE operator unsigned long long() const { return to_uint64(); } + // @} + + template + INLINE _private_range_ref& operator=(const ap_private<_AP_W2, _AP_S2>& val) { + ap_private<_AP_W, false> vval = ap_private<_AP_W, false>(val); + if (l_index > h_index) { + for (int i = 0, j = l_index; j >= 0 && j >= h_index; j--, i++) + (vval)[i] ? d_bv.set(j) : d_bv.clear(j); + } else { + if (_AP_W > 64) { + ap_private<_AP_W, false> mask(-1); + if (l_index > 0) { + mask <<= l_index; + vval <<= l_index; + } + if (h_index < _AP_W - 1) { + ap_private<_AP_W, false> mask2(-1); + mask2 >>= _AP_W - h_index - 1; + mask &= mask2; + vval &= mask2; + } + mask.flip(); + d_bv &= mask; + d_bv |= vval; + } else { + unsigned shift = 64 - _AP_W; + uint64_t mask = ~0ULL >> (shift); + if (l_index > 0) { + vval = mask & vval << l_index; + mask = mask & mask << l_index; + } + if (h_index < _AP_W - 1) { + uint64_t mask2 = mask; + mask2 >>= (_AP_W - h_index - 1); + mask &= mask2; + vval &= mask2; + } + mask = ~mask; + d_bv &= mask; + d_bv |= vval; + } + } + return *this; + } // operator=(const ap_private<>&) + + INLINE _private_range_ref& operator=(unsigned long long val) { + const ap_private<_AP_W, _AP_S> vval = val; + return operator=(vval); + } + + template + INLINE _private_range_ref& operator=( + const _private_bit_ref<_AP_W2, _AP_S2>& val) { + return operator=((unsigned long long)(bool)val); + } + + template + INLINE _private_range_ref& operator=( + const _private_range_ref<_AP_W2, _AP_S2>& val) { + const ap_private<_AP_W, false> tmpVal(val); + return operator=(tmpVal); + } + +// template +// INLINE _private_range_ref& operator=( +// const ap_concat_ref<_AP_W3, _AP_T3, _AP_W4, _AP_T4>& val) { +// const ap_private<_AP_W, false> tmpVal(val); +// return operator=(tmpVal); +// } + + // TODO from ap_int_base, ap_bit_ref and ap_range_ref. + + template + INLINE _private_range_ref& operator=( + const ap_fixed_base<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2>& val) { + return operator=(val.to_ap_int_base().V); + } + + template + INLINE _private_range_ref& operator=( + const af_range_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2>& val) { + return operator=(val.operator ap_int_base<_AP_W2, false>().V); + } + + template + INLINE _private_range_ref& operator=( + const af_bit_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2>& val) { + return operator=((unsigned long long)(bool)val); + } + +// template +// INLINE ap_concat_ref<_AP_W, _private_range_ref, _AP_W2, +// _private_range_ref<_AP_W2, _AP_S2> > +// operator,(const _private_range_ref<_AP_W2, _AP_S2> &a2) { +// return ap_concat_ref<_AP_W, _private_range_ref, _AP_W2, +// _private_range_ref<_AP_W2, _AP_S2> >( +// *this, const_cast<_private_range_ref<_AP_W2, _AP_S2>&>(a2)); +// } +// +// template +// INLINE ap_concat_ref<_AP_W, _private_range_ref, _AP_W2, +// ap_private<_AP_W2, _AP_S2> > +// operator,(ap_private<_AP_W2, _AP_S2> &a2) { +// return ap_concat_ref<_AP_W, _private_range_ref, _AP_W2, +// ap_private<_AP_W2, _AP_S2> >(*this, a2); +// } +// +// INLINE +// ap_concat_ref<_AP_W, _private_range_ref, _AP_W, ap_private<_AP_W, _AP_S> > +// operator,(ap_private<_AP_W, _AP_S>& a2) { +// return ap_concat_ref<_AP_W, _private_range_ref, _AP_W, +// ap_private<_AP_W, _AP_S> >(*this, a2); +// } +// +// template +// INLINE ap_concat_ref<_AP_W, _private_range_ref, 1, +// _private_bit_ref<_AP_W2, _AP_S2> > +// operator,(const _private_bit_ref<_AP_W2, _AP_S2> &a2) { +// return ap_concat_ref<_AP_W, _private_range_ref, 1, +// _private_bit_ref<_AP_W2, _AP_S2> >( +// *this, const_cast<_private_bit_ref<_AP_W2, _AP_S2>&>(a2)); +// } +// +// template +// INLINE ap_concat_ref<_AP_W, _private_range_ref, _AP_W2 + _AP_W3, +// ap_concat_ref<_AP_W2, _AP_T2, _AP_W3, _AP_T3> > +// operator,(const ap_concat_ref<_AP_W2, _AP_T2, _AP_W3, _AP_T3> &a2) { +// return ap_concat_ref<_AP_W, _private_range_ref, _AP_W2 + _AP_W3, +// ap_concat_ref<_AP_W2, _AP_T2, _AP_W3, _AP_T3> >( +// *this, const_cast&>(a2)); +// } +// +// template +// INLINE ap_concat_ref< +// _AP_W, _private_range_ref, _AP_W2, +// af_range_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2> > +// operator,( +// const af_range_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2> &a2) { +// return ap_concat_ref< +// _AP_W, _private_range_ref, _AP_W2, +// af_range_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2> >( +// *this, +// const_cast< +// af_range_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2>&>(a2)); +// } +// +// template +// INLINE +// ap_concat_ref<_AP_W, _private_range_ref, 1, +// af_bit_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2> > +// operator,(const af_bit_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2> +// &a2) { +// return ap_concat_ref< +// _AP_W, _private_range_ref, 1, +// af_bit_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2> >( +// *this, +// const_cast&>( +// a2)); +// } + + template + INLINE bool operator==(const _private_range_ref<_AP_W2, _AP_S2>& op2) { + ap_private<_AP_W, false> lhs = get(); + ap_private<_AP_W2, false> rhs = op2.get(); + return lhs == rhs; + } + + template + INLINE bool operator!=(const _private_range_ref<_AP_W2, _AP_S2>& op2) { + ap_private<_AP_W, false> lhs = get(); + ap_private<_AP_W2, false> rhs = op2.get(); + return lhs != rhs; + } + + template + INLINE bool operator>(const _private_range_ref<_AP_W2, _AP_S2>& op2) { + ap_private<_AP_W, false> lhs = get(); + ap_private<_AP_W2, false> rhs = op2.get(); + return lhs > rhs; + } + + template + INLINE bool operator>=(const _private_range_ref<_AP_W2, _AP_S2>& op2) { + ap_private<_AP_W, false> lhs = get(); + ap_private<_AP_W2, false> rhs = op2.get(); + return lhs >= rhs; + } + + template + INLINE bool operator<(const _private_range_ref<_AP_W2, _AP_S2>& op2) { + ap_private<_AP_W, false> lhs = get(); + ap_private<_AP_W2, false> rhs = op2.get(); + return lhs < rhs; + } + + template + INLINE bool operator<=(const _private_range_ref<_AP_W2, _AP_S2>& op2) { + ap_private<_AP_W, false> lhs = get(); + ap_private<_AP_W2, false> rhs = op2.get(); + return lhs <= rhs; + } + + template + INLINE void set(const ap_private<_AP_W2, false>& val) { + ap_private<_AP_W, _AP_S> vval = val; + if (l_index > h_index) { + for (int i = 0, j = l_index; j >= 0 && j >= h_index; j--, i++) + (vval)[i] ? d_bv.set(j) : d_bv.clear(j); + } else { + if (_AP_W > 64) { + ap_private<_AP_W, _AP_S> mask(-1); + if (l_index > 0) { + ap_private<_AP_W, false> mask1(-1); + mask1 >>= _AP_W - l_index; + mask1.flip(); + mask = mask1; + // vval&=mask1; + vval <<= l_index; + } + if (h_index < _AP_W - 1) { + ap_private<_AP_W, false> mask2(-1); + mask2 <<= h_index + 1; + mask2.flip(); + mask &= mask2; + vval &= mask2; + } + mask.flip(); + d_bv &= mask; + d_bv |= vval; + } else { + uint64_t mask = ~0ULL >> (64 - _AP_W); + if (l_index > 0) { + uint64_t mask1 = mask; + mask1 = mask & (mask1 >> (_AP_W - l_index)); + vval = mask & (vval << l_index); + mask = ~mask1 & mask; + // vval&=mask1; + } + if (h_index < _AP_W - 1) { + uint64_t mask2 = ~0ULL >> (64 - _AP_W); + mask2 = mask & (mask2 << (h_index + 1)); + mask &= ~mask2; + vval &= ~mask2; + } + d_bv &= (~mask & (~0ULL >> (64 - _AP_W))); + d_bv |= vval; + } + } + } + + INLINE ap_private<_AP_W, false> get() const { + ap_private<_AP_W, false> val(0); + if (h_index < l_index) { + for (int i = 0, j = l_index; j >= 0 && j >= h_index; j--, i++) + if ((d_bv)[j]) val.set(i); + } else { + val = d_bv; + val >>= l_index; + if (h_index < _AP_W - 1) { + if (_AP_W <= 64) { + const static uint64_t mask = + (~0ULL >> (64 > _AP_W ? (64 - _AP_W) : 0)); + val &= (mask >> (_AP_W - (h_index - l_index + 1))); + } else { + ap_private<_AP_W, false> mask(-1); + mask >>= _AP_W - (h_index - l_index + 1); + val &= mask; + } + } + } + return val; + } + + INLINE ap_private<_AP_W, false> get() { + ap_private<_AP_W, false> val(0); + if (h_index < l_index) { + for (int i = 0, j = l_index; j >= 0 && j >= h_index; j--, i++) + if ((d_bv)[j]) val.set(i); + } else { + val = d_bv; + val >>= l_index; + if (h_index < _AP_W - 1) { + if (_AP_W <= 64) { + static const uint64_t mask = ~0ULL >> (64 > _AP_W ? (64 - _AP_W) : 0); + return val &= ((mask) >> (_AP_W - (h_index - l_index + 1))); + } else { + ap_private<_AP_W, false> mask(-1); + mask >>= _AP_W - (h_index - l_index + 1); + val &= mask; + } + } + } + return val; + } + + INLINE int length() const { + return h_index >= l_index ? h_index - l_index + 1 : l_index - h_index + 1; + } + + INLINE int to_int() const { + ap_private<_AP_W, false> val = get(); + return val.to_int(); + } + + INLINE unsigned int to_uint() const { + ap_private<_AP_W, false> val = get(); + return val.to_uint(); + } + + INLINE long to_long() const { + ap_private<_AP_W, false> val = get(); + return val.to_long(); + } + + INLINE unsigned long to_ulong() const { + ap_private<_AP_W, false> val = get(); + return val.to_ulong(); + } + + INLINE ap_slong to_int64() const { + ap_private<_AP_W, false> val = get(); + return val.to_int64(); + } + + INLINE ap_ulong to_uint64() const { + ap_private<_AP_W, false> val = get(); + return val.to_uint64(); + } + + INLINE std::string to_string(uint8_t radix = 2) const { + return get().to_string(radix); + } + + INLINE bool and_reduce() { + bool ret = true; + bool reverse = l_index > h_index; + unsigned low = reverse ? h_index : l_index; + unsigned high = reverse ? l_index : h_index; + for (unsigned i = low; i != high; ++i) ret &= d_bv[i]; + return ret; + } + + INLINE bool or_reduce() { + bool ret = false; + bool reverse = l_index > h_index; + unsigned low = reverse ? h_index : l_index; + unsigned high = reverse ? l_index : h_index; + for (unsigned i = low; i != high; ++i) ret |= d_bv[i]; + return ret; + } + + INLINE bool xor_reduce() { + bool ret = false; + bool reverse = l_index > h_index; + unsigned low = reverse ? h_index : l_index; + unsigned high = reverse ? l_index : h_index; + for (unsigned i = low; i != high; ++i) ret ^= d_bv[i]; + return ret; + } +}; // struct _private_range_ref. + +/// Bit reference +/// Proxy class, which allows bit selection to be used as rvalue(for reading) +/// and lvalue(for writing) +//-------------------------------------------------------------- +template +struct _private_bit_ref { +#ifdef _MSC_VER +#pragma warning(disable : 4521 4522) +#endif + ap_private<_AP_W, _AP_S>& d_bv; + int d_index; + + public: + // copy ctor. + INLINE _private_bit_ref(const _private_bit_ref<_AP_W, _AP_S>& ref) + : d_bv(ref.d_bv), d_index(ref.d_index) {} + + // director ctor. + INLINE _private_bit_ref(ap_private<_AP_W, _AP_S>& bv, int index = 0) + : d_bv(bv), d_index(index) { + _AP_WARNING(d_index < 0, "Index of bit vector (%d) cannot be negative.\n", + d_index); + _AP_WARNING(d_index >= _AP_W, + "Index of bit vector (%d) out of range (%d).\n", d_index, _AP_W); + } + + INLINE operator bool() const { return d_bv.get_bit(d_index); } + + INLINE bool to_bool() const { return operator bool(); } + + template + INLINE _private_bit_ref& operator=(const T& val) { + if (!!val) + d_bv.set(d_index); + else + d_bv.clear(d_index); + return *this; + } + +// template +// INLINE ap_concat_ref<1, _private_bit_ref, _AP_W2, ap_private<_AP_W2, +// _AP_S2> > +// operator,(ap_private<_AP_W2, _AP_S2> &a2) const { +// return ap_concat_ref<1, _private_bit_ref, _AP_W2, ap_private<_AP_W2, +// _AP_S2> >( +// const_cast<_private_bit_ref<_AP_W, _AP_S>&>(*this), a2); +// } +// +// template +// INLINE ap_concat_ref<1, _private_bit_ref, _AP_W2, +// _private_range_ref<_AP_W2, +// _AP_S2> > +// operator,(const _private_range_ref<_AP_W2, _AP_S2> &a2) const { +// return ap_concat_ref<1, _private_bit_ref, _AP_W2, +// _private_range_ref<_AP_W2, +// _AP_S2> >( +// const_cast<_private_bit_ref<_AP_W, _AP_S>&>(*this), +// const_cast<_private_range_ref<_AP_W2, _AP_S2>&>(a2)); +// } +// +// template +// INLINE ap_concat_ref<1, _private_bit_ref, 1, _private_bit_ref<_AP_W2, +// _AP_S2> > operator,( +// const _private_bit_ref<_AP_W2, _AP_S2> &a2) const { +// return ap_concat_ref<1, _private_bit_ref, 1, +// _private_bit_ref<_AP_W2, _AP_S2> >( +// const_cast<_private_bit_ref<_AP_W, _AP_S>&>(*this), +// const_cast<_private_bit_ref<_AP_W2, _AP_S2>&>(a2)); +// } +// +// INLINE ap_concat_ref<1, _private_bit_ref, 1, _private_bit_ref> +// operator,( +// const _private_bit_ref &a2) const { +// return ap_concat_ref<1, _private_bit_ref, 1, _private_bit_ref>( +// const_cast<_private_bit_ref<_AP_W, _AP_S>&>(*this), +// const_cast<_private_bit_ref&>(a2)); +// } +// +// template +// INLINE ap_concat_ref<1, _private_bit_ref, _AP_W2 + _AP_W3, +// ap_concat_ref<_AP_W2, _AP_T2, _AP_W3, _AP_T3> > +// operator,(const ap_concat_ref<_AP_W2, _AP_T2, _AP_W3, _AP_T3> &a2) const { +// return ap_concat_ref<1, _private_bit_ref, _AP_W2 + _AP_W3, +// ap_concat_ref<_AP_W2, _AP_T2, _AP_W3, _AP_T3> >( +// const_cast<_private_bit_ref<_AP_W, _AP_S>&>(*this), +// const_cast&>(a2)); +// } +// +// template +// INLINE ap_concat_ref< +// 1, _private_bit_ref, _AP_W2, +// af_range_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2> > +// operator,(const af_range_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, +// _AP_N2> +// &a2) const { +// return ap_concat_ref< +// 1, _private_bit_ref, _AP_W2, +// af_range_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2> >( +// const_cast<_private_bit_ref<_AP_W, _AP_S>&>(*this), +// const_cast< +// af_range_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, +// _AP_N2>&>(a2)); +// } +// +// template +// INLINE +// ap_concat_ref<1, _private_bit_ref, 1, +// af_bit_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, +// _AP_N2> > +// operator,(const af_bit_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, +// _AP_N2> +// &a2) const { +// return ap_concat_ref<1, _private_bit_ref, 1, af_bit_ref<_AP_W2, +// _AP_I2, _AP_S2, +// _AP_Q2, _AP_O2, +// _AP_N2> >( +// const_cast<_private_bit_ref<_AP_W, _AP_S>&>(*this), +// const_cast&>( +// a2)); +// } + + template + INLINE bool operator==(const _private_bit_ref<_AP_W2, _AP_S2>& op) const { + return get() == op.get(); + } + + template + INLINE bool operator!=(const _private_bit_ref<_AP_W2, _AP_S2>& op) const { + return get() != op.get(); + } + + INLINE bool get() const { return operator bool(); } + + // template + // INLINE void set(const ap_private<_AP_W3, false>& val) { + // operator=(val); + // } + + // INLINE bool operator~() const { + // bool bit = (d_bv)[d_index]; + // return bit ? false : true; + // } + + INLINE int length() const { return 1; } + + // INLINE std::string to_string() const { + // bool val = get(); + // return val ? "1" : "0"; + // } + +}; // struct _private_bit_ref. + +// char a[100]; +// char* ptr = a; +// ap_int<2> n = 3; +// char* ptr2 = ptr + n*2; +// avoid ambiguous errors +#define OP_BIN_MIX_PTR(BIN_OP) \ + template \ + INLINE PTR_TYPE* operator BIN_OP(PTR_TYPE* i_op, \ + const ap_private<_AP_W, _AP_S>& op) { \ + typename ap_private<_AP_W, _AP_S>::ValType op2 = op; \ + return i_op BIN_OP op2; \ + } \ + template \ + INLINE PTR_TYPE* operator BIN_OP(const ap_private<_AP_W, _AP_S>& op, \ + PTR_TYPE* i_op) { \ + typename ap_private<_AP_W, _AP_S>::ValType op2 = op; \ + return op2 BIN_OP i_op; \ + } + +OP_BIN_MIX_PTR(+) +OP_BIN_MIX_PTR(-) +#undef OP_BIN_MIX_PTR + +// float OP ap_int +// when ap_int's width > 64, then trunc ap_int to ap_int<64> +#define OP_BIN_MIX_FLOAT(BIN_OP, C_TYPE) \ + template \ + INLINE C_TYPE operator BIN_OP(C_TYPE i_op, \ + const ap_private<_AP_W, _AP_S>& op) { \ + typename ap_private<_AP_W, _AP_S>::ValType op2 = op; \ + return i_op BIN_OP op2; \ + } \ + template \ + INLINE C_TYPE operator BIN_OP(const ap_private<_AP_W, _AP_S>& op, \ + C_TYPE i_op) { \ + typename ap_private<_AP_W, _AP_S>::ValType op2 = op; \ + return op2 BIN_OP i_op; \ + } + +#define OPS_MIX_FLOAT(C_TYPE) \ + OP_BIN_MIX_FLOAT(*, C_TYPE) \ + OP_BIN_MIX_FLOAT(/, C_TYPE) \ + OP_BIN_MIX_FLOAT(+, C_TYPE) \ + OP_BIN_MIX_FLOAT(-, C_TYPE) + +OPS_MIX_FLOAT(float) +OPS_MIX_FLOAT(double) +#undef OP_BIN_MIX_FLOAT +#undef OPS_MIX_FLOAT + +/// Operators mixing Integers with AP_Int +// ---------------------------------------------------------------- + +// partially specialize template argument _AP_C in order that: +// for _AP_W > 64, we will explicitly convert operand with native data type +// into corresponding ap_private +// for _AP_W <= 64, we will implicitly convert operand with ap_private into +// (unsigned) long long +#define OP_BIN_MIX_INT(BIN_OP, C_TYPE, _AP_WI, _AP_SI, RTYPE) \ + template \ + INLINE \ + typename ap_private<_AP_WI, _AP_SI>::template RType<_AP_W, _AP_S>::RTYPE \ + operator BIN_OP(C_TYPE i_op, const ap_private<_AP_W, _AP_S>& op) { \ + return ap_private<_AP_WI, _AP_SI>(i_op).operator BIN_OP(op); \ + } \ + template \ + INLINE \ + typename ap_private<_AP_W, _AP_S>::template RType<_AP_WI, _AP_SI>::RTYPE \ + operator BIN_OP(const ap_private<_AP_W, _AP_S>& op, C_TYPE i_op) { \ + return op.operator BIN_OP(ap_private<_AP_WI, _AP_SI>(i_op)); \ + } + +#define OP_REL_MIX_INT(REL_OP, C_TYPE, _AP_W2, _AP_S2) \ + template \ + INLINE bool operator REL_OP(const ap_private<_AP_W, _AP_S>& op, \ + C_TYPE op2) { \ + return op.operator REL_OP(ap_private<_AP_W2, _AP_S2>(op2)); \ + } \ + template \ + INLINE bool operator REL_OP(C_TYPE op2, \ + const ap_private<_AP_W, _AP_S, false>& op) { \ + return ap_private<_AP_W2, _AP_S2>(op2).operator REL_OP(op); \ + } + +#define OP_ASSIGN_MIX_INT(ASSIGN_OP, C_TYPE, _AP_W2, _AP_S2) \ + template \ + INLINE ap_private<_AP_W, _AP_S>& operator ASSIGN_OP( \ + ap_private<_AP_W, _AP_S>& op, C_TYPE op2) { \ + return op.operator ASSIGN_OP(ap_private<_AP_W2, _AP_S2>(op2)); \ + } + +#define OP_BIN_SHIFT_INT(BIN_OP, C_TYPE, _AP_WI, _AP_SI, RTYPE) \ + template \ + C_TYPE operator BIN_OP(C_TYPE i_op, \ + const ap_private<_AP_W, _AP_S, false>& op) { \ + return i_op BIN_OP(op.get_VAL()); \ + } \ + template \ + INLINE \ + typename ap_private<_AP_W, _AP_S>::template RType<_AP_WI, _AP_SI>::RTYPE \ + operator BIN_OP(const ap_private<_AP_W, _AP_S>& op, C_TYPE i_op) { \ + return op.operator BIN_OP(i_op); \ + } + +#define OP_ASSIGN_RSHIFT_INT(ASSIGN_OP, C_TYPE, _AP_W2, _AP_S2) \ + template \ + INLINE ap_private<_AP_W, _AP_S>& operator ASSIGN_OP( \ + ap_private<_AP_W, _AP_S>& op, C_TYPE op2) { \ + op = op.operator>>(op2); \ + return op; \ + } + +#define OP_ASSIGN_LSHIFT_INT(ASSIGN_OP, C_TYPE, _AP_W2, _AP_S2) \ + template \ + INLINE ap_private<_AP_W, _AP_S>& operator ASSIGN_OP( \ + ap_private<_AP_W, _AP_S>& op, C_TYPE op2) { \ + op = op.operator<<(op2); \ + return op; \ + } + +#define OPS_MIX_INT(C_TYPE, _AP_W2, _AP_S2) \ + OP_BIN_MIX_INT(*, C_TYPE, (_AP_W2), (_AP_S2), mult) \ + OP_BIN_MIX_INT(+, C_TYPE, (_AP_W2), (_AP_S2), plus) \ + OP_BIN_MIX_INT(-, C_TYPE, (_AP_W2), (_AP_S2), minus) \ + OP_BIN_MIX_INT(/, C_TYPE, (_AP_W2), (_AP_S2), div) \ + OP_BIN_MIX_INT(%, C_TYPE, (_AP_W2), (_AP_S2), mod) \ + OP_BIN_MIX_INT(&, C_TYPE, (_AP_W2), (_AP_S2), logic) \ + OP_BIN_MIX_INT(|, C_TYPE, (_AP_W2), (_AP_S2), logic) \ + OP_BIN_MIX_INT (^, C_TYPE, (_AP_W2), (_AP_S2), logic) \ + OP_BIN_SHIFT_INT(>>, C_TYPE, (_AP_W2), (_AP_S2), arg1) \ + OP_BIN_SHIFT_INT(<<, C_TYPE, (_AP_W2), (_AP_S2), arg1) \ + \ + OP_ASSIGN_MIX_INT(+=, C_TYPE, (_AP_W2), (_AP_S2)) \ + OP_ASSIGN_MIX_INT(-=, C_TYPE, (_AP_W2), (_AP_S2)) \ + OP_ASSIGN_MIX_INT(*=, C_TYPE, (_AP_W2), (_AP_S2)) \ + OP_ASSIGN_MIX_INT(/=, C_TYPE, (_AP_W2), (_AP_S2)) \ + OP_ASSIGN_MIX_INT(%=, C_TYPE, (_AP_W2), (_AP_S2)) \ + OP_ASSIGN_MIX_INT(&=, C_TYPE, (_AP_W2), (_AP_S2)) \ + OP_ASSIGN_MIX_INT(|=, C_TYPE, (_AP_W2), (_AP_S2)) \ + OP_ASSIGN_MIX_INT(^=, C_TYPE, (_AP_W2), (_AP_S2)) \ + OP_ASSIGN_RSHIFT_INT(>>=, C_TYPE, (_AP_W2), (_AP_S2)) \ + OP_ASSIGN_LSHIFT_INT(<<=, C_TYPE, (_AP_W2), (_AP_S2)) \ + \ + OP_REL_MIX_INT(>, C_TYPE, (_AP_W2), (_AP_S2)) \ + OP_REL_MIX_INT(<, C_TYPE, (_AP_W2), (_AP_S2)) \ + OP_REL_MIX_INT(>=, C_TYPE, (_AP_W2), (_AP_S2)) \ + OP_REL_MIX_INT(<=, C_TYPE, (_AP_W2), (_AP_S2)) \ + OP_REL_MIX_INT(==, C_TYPE, (_AP_W2), (_AP_S2)) \ + OP_REL_MIX_INT(!=, C_TYPE, (_AP_W2), (_AP_S2)) + +OPS_MIX_INT(bool, 1, false) +OPS_MIX_INT(char, 8, CHAR_IS_SIGNED) +OPS_MIX_INT(signed char, 8, true) +OPS_MIX_INT(unsigned char, 8, false) +OPS_MIX_INT(short, sizeof(short) * 8, true) +OPS_MIX_INT(unsigned short, sizeof(unsigned short) * 8, false) +OPS_MIX_INT(int, sizeof(int) * 8, true) +OPS_MIX_INT(unsigned int, sizeof(unsigned int) * 8, false) +OPS_MIX_INT(long, sizeof(long) * 8, true) +OPS_MIX_INT(unsigned long, sizeof(unsigned long) * 8, false) +OPS_MIX_INT(ap_slong, sizeof(ap_slong) * 8, true) +OPS_MIX_INT(ap_ulong, sizeof(ap_ulong) * 8, false) + +#undef OP_BIN_MIX_INT +#undef OP_BIN_SHIFT_INT +#undef OP_ASSIGN_MIX_INT +#undef OP_ASSIGN_RSHIFT_INT +#undef OP_ASSIGN_LSHIFT_INT +#undef OP_REL_MIX_INT +#undef OPS_MIX_INT + +#define OP_BIN_MIX_RANGE(BIN_OP, RTYPE) \ + template \ + INLINE typename ap_private<_AP_W1, _AP_S1>::template RType<_AP_W2, \ + _AP_S2>::RTYPE \ + operator BIN_OP(const _private_range_ref<_AP_W1, _AP_S1>& op1, \ + const ap_private<_AP_W2, _AP_S2>& op2) { \ + return ap_private<_AP_W1, false>(op1).operator BIN_OP(op2); \ + } \ + template \ + INLINE typename ap_private<_AP_W1, _AP_S1>::template RType<_AP_W2, \ + _AP_S2>::RTYPE \ + operator BIN_OP(const ap_private<_AP_W1, _AP_S1>& op1, \ + const _private_range_ref<_AP_W2, _AP_S2>& op2) { \ + return op1.operator BIN_OP(ap_private<_AP_W2, false>(op2)); \ + } + +#define OP_ASSIGN_MIX_RANGE(ASSIGN_OP) \ + template \ + INLINE ap_private<_AP_W1, _AP_S1>& operator ASSIGN_OP( \ + ap_private<_AP_W1, _AP_S1>& op1, \ + const _private_range_ref<_AP_W2, _AP_S2>& op2) { \ + return op1.operator ASSIGN_OP(ap_private<_AP_W2, false>(op2)); \ + } \ + template \ + INLINE _private_range_ref<_AP_W1, _AP_S1>& operator ASSIGN_OP( \ + _private_range_ref<_AP_W1, _AP_S1>& op1, \ + ap_private<_AP_W2, _AP_S2>& op2) { \ + ap_private<_AP_W1, false> tmp(op1); \ + tmp.operator ASSIGN_OP(op2); \ + op1 = tmp; \ + return op1; \ + } + +#define OP_REL_MIX_RANGE(REL_OP) \ + template \ + INLINE bool operator REL_OP(const _private_range_ref<_AP_W1, _AP_S1>& op1, \ + const ap_private<_AP_W2, _AP_S2>& op2) { \ + return ap_private<_AP_W1, false>(op1).operator REL_OP(op2); \ + } \ + template \ + INLINE bool operator REL_OP(const ap_private<_AP_W1, _AP_S1>& op1, \ + const _private_range_ref<_AP_W2, _AP_S2>& op2) { \ + return op1.operator REL_OP(op2.operator ap_private<_AP_W2, false>()); \ + } + +OP_BIN_MIX_RANGE(+, plus) +OP_BIN_MIX_RANGE(-, minus) +OP_BIN_MIX_RANGE(*, mult) +OP_BIN_MIX_RANGE(/, div) +OP_BIN_MIX_RANGE(%, mod) +OP_BIN_MIX_RANGE(&, logic) +OP_BIN_MIX_RANGE(|, logic) +OP_BIN_MIX_RANGE(^, logic) +OP_BIN_MIX_RANGE(>>, arg1) +OP_BIN_MIX_RANGE(<<, arg1) +#undef OP_BIN_MIX_RANGE + +OP_ASSIGN_MIX_RANGE(+=) +OP_ASSIGN_MIX_RANGE(-=) +OP_ASSIGN_MIX_RANGE(*=) +OP_ASSIGN_MIX_RANGE(/=) +OP_ASSIGN_MIX_RANGE(%=) +OP_ASSIGN_MIX_RANGE(&=) +OP_ASSIGN_MIX_RANGE(|=) +OP_ASSIGN_MIX_RANGE(^=) +OP_ASSIGN_MIX_RANGE(>>=) +OP_ASSIGN_MIX_RANGE(<<=) +#undef OP_ASSIGN_MIX_RANGE + +OP_REL_MIX_RANGE(>) +OP_REL_MIX_RANGE(<) +OP_REL_MIX_RANGE(>=) +OP_REL_MIX_RANGE(<=) +OP_REL_MIX_RANGE(==) +OP_REL_MIX_RANGE(!=) +#undef OP_REL_MIX_RANGE + +#define OP_BIN_MIX_BIT(BIN_OP, RTYPE) \ + template \ + INLINE typename ap_private<1, false>::template RType<_AP_W2, _AP_S2>::RTYPE \ + operator BIN_OP(const _private_bit_ref<_AP_W1, _AP_S1>& op1, \ + const ap_private<_AP_W2, _AP_S2>& op2) { \ + return ap_private<1, false>(op1).operator BIN_OP(op2); \ + } \ + template \ + INLINE typename ap_private<_AP_W1, _AP_S1>::template RType<1, false>::RTYPE \ + operator BIN_OP(const ap_private<_AP_W1, _AP_S1>& op1, \ + const _private_bit_ref<_AP_W2, _AP_S2>& op2) { \ + return op1.operator BIN_OP(ap_private<1, false>(op2)); \ + } + +#define OP_ASSIGN_MIX_BIT(ASSIGN_OP) \ + template \ + INLINE ap_private<_AP_W1, _AP_S1>& operator ASSIGN_OP( \ + ap_private<_AP_W1, _AP_S1>& op1, \ + _private_bit_ref<_AP_W2, _AP_S2>& op2) { \ + return op1.operator ASSIGN_OP(ap_private<1, false>(op2)); \ + } \ + template \ + INLINE _private_bit_ref<_AP_W1, _AP_S1>& operator ASSIGN_OP( \ + _private_bit_ref<_AP_W1, _AP_S1>& op1, \ + ap_private<_AP_W2, _AP_S2>& op2) { \ + ap_private<1, false> tmp(op1); \ + tmp.operator ASSIGN_OP(op2); \ + op1 = tmp; \ + return op1; \ + } + +#define OP_REL_MIX_BIT(REL_OP) \ + template \ + INLINE bool operator REL_OP(const _private_bit_ref<_AP_W1, _AP_S1>& op1, \ + const ap_private<_AP_W2, _AP_S2>& op2) { \ + return ap_private<_AP_W1, false>(op1).operator REL_OP(op2); \ + } \ + template \ + INLINE bool operator REL_OP(const ap_private<_AP_W1, _AP_S1>& op1, \ + const _private_bit_ref<_AP_W2, _AP_S2>& op2) { \ + return op1.operator REL_OP(ap_private<1, false>(op2)); \ + } + +OP_ASSIGN_MIX_BIT(+=) +OP_ASSIGN_MIX_BIT(-=) +OP_ASSIGN_MIX_BIT(*=) +OP_ASSIGN_MIX_BIT(/=) +OP_ASSIGN_MIX_BIT(%=) +OP_ASSIGN_MIX_BIT(&=) +OP_ASSIGN_MIX_BIT(|=) +OP_ASSIGN_MIX_BIT(^=) +OP_ASSIGN_MIX_BIT(>>=) +OP_ASSIGN_MIX_BIT(<<=) +#undef OP_ASSIGN_MIX_BIT + +OP_BIN_MIX_BIT(+, plus) +OP_BIN_MIX_BIT(-, minus) +OP_BIN_MIX_BIT(*, mult) +OP_BIN_MIX_BIT(/, div) +OP_BIN_MIX_BIT(%, mod) +OP_BIN_MIX_BIT(&, logic) +OP_BIN_MIX_BIT(|, logic) +OP_BIN_MIX_BIT(^, logic) +OP_BIN_MIX_BIT(>>, arg1) +OP_BIN_MIX_BIT(<<, arg1) +#undef OP_BIN_MIX_BIT + +OP_REL_MIX_BIT(>) +OP_REL_MIX_BIT(<) +OP_REL_MIX_BIT(<=) +OP_REL_MIX_BIT(>=) +OP_REL_MIX_BIT(==) +OP_REL_MIX_BIT(!=) +#undef OP_REL_MIX_BIT + +#define REF_REL_OP_MIX_INT(REL_OP, C_TYPE, _AP_W2, _AP_S2) \ + template \ + INLINE bool operator REL_OP(const _private_range_ref<_AP_W, _AP_S>& op, \ + C_TYPE op2) { \ + return (ap_private<_AP_W, false>(op)) \ + . \ + operator REL_OP(ap_private<_AP_W2, _AP_S2>(op2)); \ + } \ + template \ + INLINE bool operator REL_OP(C_TYPE op2, \ + const _private_range_ref<_AP_W, _AP_S>& op) { \ + return ap_private<_AP_W2, _AP_S2>(op2).operator REL_OP( \ + ap_private<_AP_W, false>(op)); \ + } \ + template \ + INLINE bool operator REL_OP(const _private_bit_ref<_AP_W, _AP_S>& op, \ + C_TYPE op2) { \ + return (bool(op))REL_OP op2; \ + } \ + template \ + INLINE bool operator REL_OP(C_TYPE op2, \ + const _private_bit_ref<_AP_W, _AP_S>& op) { \ + return op2 REL_OP(bool(op)); \ + } + +#define REF_REL_MIX_INT(C_TYPE, _AP_W2, _AP_S2) \ + REF_REL_OP_MIX_INT(>, C_TYPE, (_AP_W2), (_AP_S2)) \ + REF_REL_OP_MIX_INT(<, C_TYPE, (_AP_W2), (_AP_S2)) \ + REF_REL_OP_MIX_INT(>=, C_TYPE, (_AP_W2), (_AP_S2)) \ + REF_REL_OP_MIX_INT(<=, C_TYPE, (_AP_W2), (_AP_S2)) \ + REF_REL_OP_MIX_INT(==, C_TYPE, (_AP_W2), (_AP_S2)) \ + REF_REL_OP_MIX_INT(!=, C_TYPE, (_AP_W2), (_AP_S2)) + +REF_REL_MIX_INT(bool, 1, false) +REF_REL_MIX_INT(char, 8, CHAR_IS_SIGNED) +REF_REL_MIX_INT(signed char, 8, true) +REF_REL_MIX_INT(unsigned char, 8, false) +REF_REL_MIX_INT(short, sizeof(short) * 8, true) +REF_REL_MIX_INT(unsigned short, sizeof(unsigned short) * 8, false) +REF_REL_MIX_INT(int, sizeof(int) * 8, true) +REF_REL_MIX_INT(unsigned int, sizeof(unsigned int) * 8, false) +REF_REL_MIX_INT(long, sizeof(long) * 8, true) +REF_REL_MIX_INT(unsigned long, sizeof(unsigned long) * 8, false) +REF_REL_MIX_INT(ap_slong, sizeof(ap_slong) * 8, true) +REF_REL_MIX_INT(ap_ulong, sizeof(ap_ulong) * 8, false) +#undef REF_REL_OP_MIX_INT +#undef REF_REL_MIX_INT + +#define REF_BIN_OP_MIX_INT(BIN_OP, RTYPE, C_TYPE, _AP_W2, _AP_S2) \ + template \ + INLINE \ + typename ap_private<_AP_W, false>::template RType<_AP_W2, _AP_S2>::RTYPE \ + operator BIN_OP(const _private_range_ref<_AP_W, _AP_S>& op, \ + C_TYPE op2) { \ + return (ap_private<_AP_W, false>(op)) \ + . \ + operator BIN_OP(ap_private<_AP_W2, _AP_S2>(op2)); \ + } \ + template \ + INLINE \ + typename ap_private<_AP_W2, _AP_S2>::template RType<_AP_W, false>::RTYPE \ + operator BIN_OP(C_TYPE op2, \ + const _private_range_ref<_AP_W, _AP_S>& op) { \ + return ap_private<_AP_W2, _AP_S2>(op2).operator BIN_OP( \ + ap_private<_AP_W, false>(op)); \ + } + +#define REF_BIN_MIX_INT(C_TYPE, _AP_W2, _AP_S2) \ + REF_BIN_OP_MIX_INT(+, plus, C_TYPE, (_AP_W2), (_AP_S2)) \ + REF_BIN_OP_MIX_INT(-, minus, C_TYPE, (_AP_W2), (_AP_S2)) \ + REF_BIN_OP_MIX_INT(*, mult, C_TYPE, (_AP_W2), (_AP_S2)) \ + REF_BIN_OP_MIX_INT(/, div, C_TYPE, (_AP_W2), (_AP_S2)) \ + REF_BIN_OP_MIX_INT(%, mod, C_TYPE, (_AP_W2), (_AP_S2)) \ + REF_BIN_OP_MIX_INT(&, logic, C_TYPE, (_AP_W2), (_AP_S2)) \ + REF_BIN_OP_MIX_INT(|, logic, C_TYPE, (_AP_W2), (_AP_S2)) \ + REF_BIN_OP_MIX_INT(^, logic, C_TYPE, (_AP_W2), (_AP_S2)) \ + REF_BIN_OP_MIX_INT(>>, arg1, C_TYPE, (_AP_W2), (_AP_S2)) \ + REF_BIN_OP_MIX_INT(<<, arg1, C_TYPE, (_AP_W2), (_AP_S2)) + +REF_BIN_MIX_INT(bool, 1, false) +REF_BIN_MIX_INT(char, 8, CHAR_IS_SIGNED) +REF_BIN_MIX_INT(signed char, 8, true) +REF_BIN_MIX_INT(unsigned char, 8, false) +REF_BIN_MIX_INT(short, sizeof(short) * 8, true) +REF_BIN_MIX_INT(unsigned short, sizeof(unsigned short) * 8, false) +REF_BIN_MIX_INT(int, sizeof(int) * 8, true) +REF_BIN_MIX_INT(unsigned int, sizeof(unsigned int) * 8, false) +REF_BIN_MIX_INT(long, sizeof(long) * 8, true) +REF_BIN_MIX_INT(unsigned long, sizeof(unsigned long) * 8, false) +REF_BIN_MIX_INT(ap_slong, sizeof(ap_slong) * 8, true) +REF_BIN_MIX_INT(ap_ulong, sizeof(ap_ulong) * 8, false) +#undef REF_BIN_OP_MIX_INT +#undef REF_BIN_MIX_INT + +#define REF_BIN_OP(BIN_OP, RTYPE) \ + template \ + INLINE \ + typename ap_private<_AP_W, false>::template RType<_AP_W2, false>::RTYPE \ + operator BIN_OP(const _private_range_ref<_AP_W, _AP_S>& lhs, \ + const _private_range_ref<_AP_W2, _AP_S2>& rhs) { \ + return ap_private<_AP_W, false>(lhs).operator BIN_OP( \ + ap_private<_AP_W2, false>(rhs)); \ + } + +REF_BIN_OP(+, plus) +REF_BIN_OP(-, minus) +REF_BIN_OP(*, mult) +REF_BIN_OP(/, div) +REF_BIN_OP(%, mod) +REF_BIN_OP(&, logic) +REF_BIN_OP(|, logic) +REF_BIN_OP(^, logic) +REF_BIN_OP(>>, arg1) +REF_BIN_OP(<<, arg1) +#undef REF_BIN_OP + +//************************************************************************ +// Implement +// ap_private = ap_concat_ref OP ap_concat_ref +// for operators +, -, *, /, %, >>, <<, &, |, ^ +// Without these operators the operands are converted to int64 and +// larger results lose informations (higher order bits). +// +// operand OP +// / | +// left-concat right-concat +// / | / | +// +// +// _AP_LW1, _AP_LT1 (width and type of left-concat's left side) +// _AP_LW2, _AP_LT2 (width and type of left-concat's right side) +// Similarly for RHS of operand OP: _AP_RW1, AP_RW2, _AP_RT1, _AP_RT2 +// +// In Verilog 2001 result of concatenation is always unsigned even +// when both sides are signed. +//************************************************************************ + +#endif // ifndef __AP_PRIVATE_H__ + +// -*- cpp -*- diff --git a/TrigScint/include/TrigScint/objdef.h b/TrigScint/include/TrigScint/objdef.h new file mode 100755 index 000000000..9b3b7ac74 --- /dev/null +++ b/TrigScint/include/TrigScint/objdef.h @@ -0,0 +1,96 @@ +#ifndef OBJDEF_H +#include "TrigScint/ap_int.h" +#define OBJDEF_H + +#define NTIMES 6 +#define NHITS 25 +#define NCLUS 25 +#define NCHAN 50 +#define NTRK 10 +#define W = 10 + +#define NCENT 99 + +#define NDIGIS 14 +#define COMBO 9 + +//2*NCHAN*NTIMES are the number of bytes per event plus 4+4+4+3+1 bytes for the header + +#define NSAMPLES 6 + +//NSAMPLES/8 is the number of 64 bit words + +#define NWORDS 72 + +struct Digi { + int mID, bID; + int adc0, adc1, adc2, adc3, adc4, adc5; + int tdc0, tdc1, tdc2, tdc3, tdc4, tdc5; +}; +inline void clearDigi(Digi & c){ + c.mID=0;c.bID=0; + c.adc0=0;c.adc1=0;c.adc2=0;c.adc3=0;c.adc4=0;c.adc5=0; + c.tdc0=0;c.tdc1=0;c.tdc2=0;c.tdc3=0;c.tdc4=0;c.tdc5=0; +} +struct Hit { + ap_int<12> mID, bID; + ap_int<12> Amp, Time; //TrigTime; +}; +inline void clearHit(Hit & c){ + c.mID=0; c.bID=-1; c.Amp=0; c.Time=0; //c.TrigTime=0.0; +} +inline void cpyHit(Hit & c1, Hit & c2){ + c1.mID=c2.mID;c1.bID=c2.bID;c1.Amp=c2.Amp;c1.Time=c2.Time; +} + +struct Cluster { + Hit Seed; Hit Sec; + ap_int<11> Cent; + //int nhits, mID, SeedID; + //float CentX, CentY, CentZ, Amp, Time, TrigTime; +}; +inline void clearClus(Cluster & c){ + clearHit(c.Seed);clearHit(c.Sec);c.Cent = (ap_int<11>)(0);//clearHit(c.For); +} +inline void calcCent(Cluster & c){ + if(c.Seed.Amp>0){ + c.Cent = (ap_int<12>)(10.*((float)(c.Seed.Amp*c.Seed.bID+c.Sec.Amp*c.Sec.bID))/((float)(c.Seed.Amp+c.Sec.Amp))); + }else{ + c.Cent=(ap_int<12>)(0); + } +} +inline void cpyCluster(Cluster & c1, Cluster & c2){ + cpyHit(c1.Seed,c2.Seed);cpyHit(c1.Sec,c2.Sec); +} + +struct Track { + Cluster Pad1; Cluster Pad2; Cluster Pad3; + ap_int<12> resid; +}; +inline void clearTrack(Track & c){ + clearClus(c.Pad1);clearClus(c.Pad2);clearClus(c.Pad3); + c.resid=5000; +} +inline ap_int<12> calcTCent(Track & c){ + calcCent(c.Pad1);calcCent(c.Pad2);calcCent(c.Pad3); + float one = (float)c.Pad1.Cent; + float two = (float)c.Pad2.Cent; + float three = (float)c.Pad3.Cent; + float mean = (one+two+three)/3.0; + ap_int<12> Cent = (ap_int<10>)((int)(mean)); + return Cent; +} +inline void calcResid(Track & c){ + calcCent(c.Pad1);calcCent(c.Pad2);calcCent(c.Pad3); + float one = (float)c.Pad1.Cent; + float two = (float)c.Pad2.Cent; + float three = (float)c.Pad3.Cent; + float mean = (one+two+three)/3.0; + c.resid = (ap_int<12>)((int)(((one-mean)*(one-mean)+(two-mean)*(two-mean)+(three-mean)*(three-mean))/3.0)); +} +inline void cpyTrack(Track & c1, Track & c2){ + cpyCluster(c1.Pad1,c2.Pad1);cpyCluster(c1.Pad2,c2.Pad2);cpyCluster(c1.Pad3,c2.Pad3); + c1.resid=c2.resid; +} + +#endif diff --git a/TrigScint/include/TrigScint/testutils.h b/TrigScint/include/TrigScint/testutils.h new file mode 100755 index 000000000..e64a282fc --- /dev/null +++ b/TrigScint/include/TrigScint/testutils.h @@ -0,0 +1,16 @@ +#ifndef TESTUTILS_H +#define TESTUTILS_H +#include "objdef.h" + +bool compareHit(Hit Hit1, Hit Hit2){ + return ((Hit1.mID==Hit2.mID)and(Hit1.bID==Hit2.bID)and(Hit1.Amp==Hit2.Amp)and(Hit1.Time==Hit2.Time));//and(Hit1.TrigTime==Hit2.TrigTime)); +} + +bool compareClus(Cluster clus1[NHITS], Cluster clus2[NHITS]){ + for(int i = 0; i lookup[NCENT][COMBO][2]); +void trackproducer_hw(Cluster Pad1[NTRK],Cluster Pad2[NCLUS],Cluster Pad3[NCLUS],Track outTrk[NTRK], ap_int<12> lookup[NCENT][COMBO][2]); + +#endif diff --git a/TrigScint/src/TrigScint/TrigScintFirmwareTracker.cxx b/TrigScint/src/TrigScint/TrigScintFirmwareTracker.cxx new file mode 100644 index 000000000..ec4f56ba1 --- /dev/null +++ b/TrigScint/src/TrigScint/TrigScintFirmwareTracker.cxx @@ -0,0 +1,290 @@ + +#include "TrigScint/TrigScintFirmwareTracker.h" +#include "TrigScint/trackproducer.h" +#include "TrigScint/clusterproducer.h" +#include "TrigScint/objdef.h" +#include // std::next +#include + +namespace trigscint { + +void TrigScintFirmwareTracker::configure(framework::config::Parameters &ps) { + minThr_ = ps.getParameter("clustering_threshold"); + digis1_collection_ = ps.getParameter("digis1_collection"); + digis2_collection_ = ps.getParameter("digis2_collection"); + digis3_collection_ = ps.getParameter("digis3_collection"); + passName_ = ps.getParameter("input_pass_name"); + output_collection_ = ps.getParameter("output_collection"); + verbose_ = ps.getParameter("verbosity"); + timeTolerance_ = ps.getParameter("time_tolerance"); + padTime_ = ps.getParameter("pad_time"); + if (verbose_) { + ldmx_log(info) << "In TrigScintFirmwareTracker: configure done!"; + ldmx_log(info) << "\nClustering threshold: " << minThr_ + << "\nExpected pad hit time: " << padTime_ + << "\nMax hit time delay: " << timeTolerance_ + << "\ndigis1 collection: " << digis1_collection_ + << "\ndigis2 collection: " << digis2_collection_ + << "\ndigis3 collection: " << digis3_collection_ + << "\nInput pass name: " << passName_ + << "\nOutput collection: " << output_collection_ + << "\nVerbosity: " << verbose_; + } + + return; +} + +void TrigScintFirmwareTracker::produce(framework::Event &event) { + + if (verbose_) { + ldmx_log(debug) + << "TrigScintFirmwareTracker: produce() starts! Event number: " + << event.getEventHeader().getEventNumber(); + } + //I AM FILLING IN THE TRACKING LUT FOR LATER USE + ap_int<12> A[3]={0,0,0}; + ap_int<12> LOOKUP[NCENT][COMBO][2]; + for(int i = 0; i=0)and(LOOKUP[i][j][1]>=0)and(LOOKUP[i][j][0](digis1_collection_, passName_)}; + const auto digis3_{ + event.getCollection(digis2_collection_, passName_)}; + const auto digis2_{ + event.getCollection(digis3_collection_, passName_)}; + + std::cout<<"I GOT HERE 2"< + minThr_)and(digi.getBarID()<=50)and(digi.getBarID()>=0)){//and(digi.getTime()>padTime_+timeTolerance_)) { + std::cout<<"I AM LOOPING IN DIGIS1 "< bID = (ap_int<12>)(digi.getBarID()); + ap_int<12> Amp = (ap_int<12>)(digi.getPE()); + int index=count; + if(occupied[(int)digi.getBarID()]>=0){ + if(HPad1[(int)occupied[(int)digi.getBarID()]].Amp)(digi.getBarID()); + HPad1[(int)occupied[(int)digi.getBarID()]].mID=(ap_int<12>)(digi.getModuleID()); + HPad1[(int)occupied[(int)digi.getBarID()]].Amp=(ap_int<12>)(digi.getPE()); + HPad1[(int)occupied[(int)digi.getBarID()]].Time=(ap_int<12>)(digi.getTime()); + } + }else{ + HPad1[count].bID=(ap_int<12>)(digi.getBarID()); + std::cout<<(ap_int<12>)(digi.getBarID())<)(digi.getModuleID()); + HPad1[count].Amp=(ap_int<12>)(digi.getPE()); + HPad1[count].Time=(ap_int<12>)(digi.getTime()); + occupied[digi.getBarID()]=count; + count++; + } + } + } + + std::cout<<"I GOT HERE 3"< + minThr_)and(digi.getBarID()<=50)and(digi.getBarID()>=0)){//and(digi.getTime()>padTime_+timeTolerance_)) { + ap_int<12> bID = (ap_int<12>)(digi.getBarID()); + ap_int<12> Amp = (ap_int<12>)(digi.getPE()); + int index=count; + if(occupied[(int)digi.getBarID()]>=0){ + if(HPad2[(int)occupied[(int)digi.getBarID()]].Amp)(digi.getBarID()); + HPad2[(int)occupied[(int)digi.getBarID()]].mID=(ap_int<12>)(digi.getModuleID()); + HPad2[(int)occupied[(int)digi.getBarID()]].Amp=(ap_int<12>)(digi.getPE()); + HPad2[(int)occupied[(int)digi.getBarID()]].Time=(ap_int<12>)(digi.getTime()); + } + }else{ + HPad2[count].bID=(ap_int<12>)(digi.getBarID()); + HPad2[count].mID=(ap_int<12>)(digi.getModuleID()); + HPad2[count].Amp=(ap_int<12>)(digi.getPE()); + HPad2[count].Time=(ap_int<12>)(digi.getTime()); + occupied[digi.getBarID()]=count; + count++; + } + } + } + //std::cout<<"I GOT HERE 4"< + minThr_)and(digi.getBarID()<=50)and(digi.getBarID()>=0)){//and(digi.getTime()>padTime_+timeTolerance_)) { + ap_int<12> bID = (ap_int<12>)(digi.getBarID()); + ap_int<12> Amp = (ap_int<12>)(digi.getPE()); + int index=count; + if(occupied[(int)digi.getBarID()]>=0){ + if(HPad3[(int)occupied[(int)digi.getBarID()]].Amp)(digi.getBarID()); + HPad3[(int)occupied[(int)digi.getBarID()]].mID=(ap_int<12>)(digi.getModuleID()); + HPad3[(int)occupied[(int)digi.getBarID()]].Amp=(ap_int<12>)(digi.getPE()); + HPad3[(int)occupied[(int)digi.getBarID()]].Time=(ap_int<12>)(digi.getTime()); + } + }else{ + HPad3[count].bID=(ap_int<12>)(digi.getBarID()); + HPad3[count].mID=(ap_int<12>)(digi.getModuleID()); + HPad3[count].Amp=(ap_int<12>)(digi.getPE()); + HPad3[count].Time=(ap_int<12>)(digi.getTime()); + occupied[digi.getBarID()]=count; + count++; + } + } + } + count=0; + std::cout<<"GOT HERE"<30)and(Point1[i].Seed.bID<51)and(Point1[i].Seed.bID>=0)and(Point1[i].Sec.Amp<450)and(counterN=topSeed){ + cpyHit(Pad1[counterN].Seed,Point1[i].Seed);cpyHit(Pad1[counterN].Sec,Point1[i].Sec); + calcCent(Pad1[counterN]); + counterN++; + topSeed=Point1[i].Seed.bID; + } + } + } + Cluster* Point2=clusterproducer_sw(HPad2); + topSeed=0; + for(int i = 0; i30)and(Point2[i].Seed.bID<51)and(Point2[i].Seed.bID>=0)and(Point2[i].Sec.Amp<450)){ + if(Point2[i].Seed.bID>=topSeed){ + std::cout<30)and(Point3[i].Seed.bID<51)and(Point3[i].Seed.bID>=0)and(Point3[i].Sec.Amp<450)){ + std::cout<<"Top Seed "<=topSeed){ + std::cout<0){ + std::cout< +#include +#include "TrigScint/objdef.h" +#include "TrigScint/clusterproducer.h" + +Cluster* clusterproducer_sw(Hit inHit[NHITS]){ + + ap_int<12> SEEDTHR = 30; + ap_int<12> CLUSTHR = 30; + + ap_int<12> mapL1[NCHAN]; + + Cluster* outClus = new Cluster[NCLUS]; + + //CLEAR THE MAP + for(int i = 0;i-1){ + std::cout<<"Step2: "<-1)){ + if(inHit[mapL1[2*k]].Amp>SEEDTHR){ + clearClus(outClus[k]); + outClus[k].Seed.mID=inHit[mapL1[2*k]].mID; outClus[k].Seed.bID=inHit[mapL1[2*k]].bID; outClus[k].Seed.Amp=inHit[mapL1[2*k]].Amp; outClus[k].Seed.Time=inHit[mapL1[2*k]].Time; + std::cout<<"I AM DOING CLUSTERS SEED: "<-1){ + if(inHit[mapL1[2*k+1]].Amp>CLUSTHR){ + outClus[k].Sec.mID=inHit[mapL1[2*k+1]].mID; outClus[k].Sec.bID=inHit[mapL1[2*k+1]].bID; outClus[k].Sec.Amp=inHit[mapL1[2*k+1]].Amp; outClus[k].Sec.Time=inHit[mapL1[2*k+1]].Time; + doNextCluster=false; + //You can comment this line to turn it into Serialized + clearHit(inHit[mapL1[2*k+1]]); + + } + } + } + } + if((mapL1[2*k+1]>-1)and(doNextCluster)){ + if(inHit[mapL1[2*k+1]].Amp>SEEDTHR){ + clearClus(outClus[k]); + outClus[k].Seed.mID=inHit[mapL1[2*k+1]].mID; outClus[k].Seed.bID=inHit[mapL1[2*k+1]].bID; outClus[k].Seed.Amp=inHit[mapL1[2*k+1]].Amp; outClus[k].Seed.Time=inHit[mapL1[2*k+1]].Time; + std::cout<<"I AM DOING CLUSTERS SEED: "<-1){ + if(inHit[mapL1[2*k+2]].Amp>CLUSTHR){ + outClus[k].Sec.mID=inHit[mapL1[2*k+2]].mID; outClus[k].Sec.bID=inHit[mapL1[2*k+2]].bID; outClus[k].Sec.Amp=inHit[mapL1[2*k+2]].Amp; outClus[k].Sec.Time=inHit[mapL1[2*k+2]].Time; + //You can comment this line to turn it into Serialized + clearHit(inHit[mapL1[2*k+2]]); + } + } + } + } + } + //GOING TO TRY FOR KICKS AND SHITS; IT SWAPS THE SEED AND SEC IF SEC IS LARGER + //if(outClus[k].Sec.Amp>outClus[k].Seed.Amp){ + //ap_int<12> holder1=outClus[k].Sec.Amp; + //ap_int<12> holder2=outClus[k].Sec.bID; + //outClus[k].Sec.bID=outClus[k].Seed.bID; + //outClus[k].Sec.Amp=outClus[k].Seed.Amp; + //outClus[k].Seed.bID=holder1; + //outClus[k].Seed.bID=holder2; + //} + } + + return outClus; +} + diff --git a/TrigScint/src/TrigScint/trackproducer_hw.cxx b/TrigScint/src/TrigScint/trackproducer_hw.cxx new file mode 100755 index 000000000..c962dd5f7 --- /dev/null +++ b/TrigScint/src/TrigScint/trackproducer_hw.cxx @@ -0,0 +1,98 @@ +#include +#include +#include "TrigScint/objdef.h" +#include "TrigScint/trackproducer.h" + + +void trackproducer_hw(Cluster Pad1[NTRK],Cluster Pad2[NCLUS],Cluster Pad3[NCLUS],Track outTrk[NTRK],ap_int<12> lookup[NCENT][COMBO][2]){ + #pragma HLS ARRAY_PARTITION variable=Pad1 dim=0 complete + #pragma HLS ARRAY_PARTITION variable=Pad2 dim=0 complete + #pragma HLS ARRAY_PARTITION variable=Pad3 dim=0 complete + #pragma HLS ARRAY_PARTITION variable=outTrk dim=0 complete + #pragma HLS ARRAY_PARTITION variable=lookup dim=0 complete + #pragma HLS PIPELINE II=10 + std::cout<<"HELLO 2 HERE 1"<Pad1[i].Seed.Amp){ + // Pad1[i].Seed.bID=Pad1[i].Sec.bID; + //} + std::cout<<"Pad1 Amp and 2 bID: "<100){continue;} + for(int I = 0;I0)){continue;}//Continue if Seed not Satisfied + ap_int<12> centroid = 2*Pad1[i].Seed.bID; + if(Pad1[i].Sec.Amp>0){ + centroid+=1; + } + std::cout<<"Pad2 Amp and 2 bID: "<0)){continue;}//Continue if Seed not Satisfied + std::cout<<"HELLO 2 HERE 2.14"<=0)or(Pad2[lookup[centroid][I][0]/4].Seed.bID%2==1))){continue;}//Continue if Sec is not Expected, and not Empty + std::cout<<"HELLO 2 HERE 2.15"<=0)or(Pad2[lookup[centroid][I][0]/4].Seed.bID%2==0))){continue;}//Continue if Sec is not Expected, and not Empty + std::cout<<"HELLO 2 HERE 2.17"<0)){continue;}//Continue if Seed not Satisfied + std::cout<<"HELLO 2 HERE 2.2"<=0)or(Pad3[lookup[centroid][I][1]/4].Seed.bID%2==1))){continue;}//Continue if Sec is not Expected, and not Empty + std::cout<<"HELLO 2 HERE 2.21"<=0)or(Pad3[lookup[centroid][I][1]/4].Seed.bID%2==0))){continue;}//Continue if Sec is not Expected, and not Empty + std::cout<<"HELLO 2 HERE 2.23"<=0)){ + if(outTrk[i-1].resid<=outTrk[i].resid){clearTrack(outTrk[i]);}else{clearTrack(outTrk[i-1]);} + } + if((outTrk[i].Pad2.Seed.bID==outTrk[i+1].Pad2.Seed.bID)and(outTrk[i+1].Pad2.Seed.bID>=0)){ + if(outTrk[i+1].resid<=outTrk[i].resid){clearTrack(outTrk[i]);}else{clearTrack(outTrk[i+1]);} + } + if((outTrk[i-1].Pad2.Seed.bID==outTrk[i+1].Pad2.Seed.bID)and(outTrk[i+1].Pad2.Seed.bID>=0)){ + if(outTrk[i-1].resid<=outTrk[i+1].resid){clearTrack(outTrk[i+1]);}else{clearTrack(outTrk[i-1]);} + } + } + //GETS RID OF SHARED CLUSTERS IN THE THIRD PAD + for(int i = 1;i=0)){ + if(outTrk[i-1].resid<=outTrk[i].resid){clearTrack(outTrk[i]);}else{clearTrack(outTrk[i-1]);} + } + if((outTrk[i].Pad3.Seed.bID==outTrk[i+1].Pad3.Seed.bID)and(outTrk[i+1].Pad3.Seed.bID>=0)){ + if(outTrk[i+1].resid<=outTrk[i].resid){clearTrack(outTrk[i]);}else{clearTrack(outTrk[i+1]);} + } + if((outTrk[i-1].Pad3.Seed.bID==outTrk[i+1].Pad3.Seed.bID)and(outTrk[i+1].Pad3.Seed.bID>=0)){ + if(outTrk[i-1].resid<=outTrk[i+1].resid){clearTrack(outTrk[i+1]);}else{clearTrack(outTrk[i-1]);} + } + } + return; +} From 6fd09bd885d5dd36ecc0c26371a0b9f1f098e5dc Mon Sep 17 00:00:00 2001 From: rodwyer100 Date: Mon, 16 Sep 2024 22:08:14 -0700 Subject: [PATCH 03/19] Update TrigScint/include/TrigScint/TrigScintFirmwareTracker.h Co-authored-by: Tamas Vami --- TrigScint/include/TrigScint/TrigScintFirmwareTracker.h | 3 --- 1 file changed, 3 deletions(-) diff --git a/TrigScint/include/TrigScint/TrigScintFirmwareTracker.h b/TrigScint/include/TrigScint/TrigScintFirmwareTracker.h index c06ce0743..70381f50a 100644 --- a/TrigScint/include/TrigScint/TrigScintFirmwareTracker.h +++ b/TrigScint/include/TrigScint/TrigScintFirmwareTracker.h @@ -38,9 +38,6 @@ class TrigScintFirmwareTracker : public framework::Producer { * add a hit at index idx to a cluster */ - void onProcessStart() override; - - void onProcessEnd() override; private: // collection of clusters produced From cc149760c1cd02c8b58ca89caa25253f83e365e7 Mon Sep 17 00:00:00 2001 From: rodwyer100 Date: Mon, 16 Sep 2024 22:08:26 -0700 Subject: [PATCH 04/19] Update TrigScint/src/TrigScint/TrigScintFirmwareTracker.cxx Co-authored-by: Tamas Vami --- TrigScint/src/TrigScint/TrigScintFirmwareTracker.cxx | 12 ------------ 1 file changed, 12 deletions(-) diff --git a/TrigScint/src/TrigScint/TrigScintFirmwareTracker.cxx b/TrigScint/src/TrigScint/TrigScintFirmwareTracker.cxx index ec4f56ba1..9837145a3 100644 --- a/TrigScint/src/TrigScint/TrigScintFirmwareTracker.cxx +++ b/TrigScint/src/TrigScint/TrigScintFirmwareTracker.cxx @@ -273,18 +273,6 @@ ldmx::TrigScintTrack TrigScintFirmwareTracker::makeTrack(Track outTrk) { return tr; } -void TrigScintFirmwareTracker::onProcessStart() { - ldmx_log(debug) << "Process starts!"; - - return; -} - -void TrigScintFirmwareTracker::onProcessEnd() { - ldmx_log(debug) << "Process ends!"; - - return; -} - } // namespace trigscint DECLARE_PRODUCER_NS(trigscint, TrigScintFirmwareTracker); From 8f5ad74b0a9c2d2fe220a710b8cd370b69292f7d Mon Sep 17 00:00:00 2001 From: rodwyer100 Date: Mon, 16 Sep 2024 22:08:54 -0700 Subject: [PATCH 05/19] Update TrigScint/exampleConfigs/firmwareEx.py Co-authored-by: Tamas Vami --- TrigScint/exampleConfigs/firmwareEx.py | 4 ---- 1 file changed, 4 deletions(-) diff --git a/TrigScint/exampleConfigs/firmwareEx.py b/TrigScint/exampleConfigs/firmwareEx.py index a207e7521..2eb594935 100644 --- a/TrigScint/exampleConfigs/firmwareEx.py +++ b/TrigScint/exampleConfigs/firmwareEx.py @@ -130,10 +130,6 @@ tsClustersUp =TrigScintClusterProducer.pad1() tsClustersDown =TrigScintClusterProducer.pad3() -if "v12" in version : - tsClustersTag.pad_time = -2. - tsClustersUp.pad_time = 0. - tsClustersDown.pad_time = 0. tsDigisUp.verbosity=0 tsClustersUp.verbosity=1 From 2d3820f4909e63e13f0cb8d8496912fbb7bf1680 Mon Sep 17 00:00:00 2001 From: rodwyer100 Date: Mon, 16 Sep 2024 22:09:29 -0700 Subject: [PATCH 06/19] Update TrigScint/exampleConfigs/firmwareEx.py Co-authored-by: Tamas Vami --- TrigScint/exampleConfigs/firmwareEx.py | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/TrigScint/exampleConfigs/firmwareEx.py b/TrigScint/exampleConfigs/firmwareEx.py index 2eb594935..c8e41ab18 100644 --- a/TrigScint/exampleConfigs/firmwareEx.py +++ b/TrigScint/exampleConfigs/firmwareEx.py @@ -98,10 +98,7 @@ from LDMX.TrigScint.trigScint import trigScintTrack from LDMX.TrigScint.trigScint import TrigScintFirmwareTracker -if "v12" in version : - tsSimColls=[ "TriggerPadTagSimHits", "TriggerPadUpSimHits", "TriggerPadDnSimHits" ] -else : - tsSimColls=[ "TriggerPad2SimHits", "TriggerPad3SimHits", "TriggerPad1SimHits" ] +tsSimColls=[ "TriggerPad2SimHits", "TriggerPad3SimHits", "TriggerPad1SimHits" ] # ecal digi chain # ecalDigi =eDigi.EcalDigiProducer('EcalDigis') From a6bf860c74ce6b89bc5c9239fbb6f8a03689ebbd Mon Sep 17 00:00:00 2001 From: rodwyer100 Date: Mon, 16 Sep 2024 22:33:07 -0700 Subject: [PATCH 07/19] Commiting removal of print out statements --- .../TrigScint/TrigScintFirmwareTracker.cxx | 37 ++----------------- .../src/TrigScint/clusterproducer_sw.cxx | 15 -------- TrigScint/src/TrigScint/trackproducer_hw.cxx | 27 -------------- 3 files changed, 4 insertions(+), 75 deletions(-) diff --git a/TrigScint/src/TrigScint/TrigScintFirmwareTracker.cxx b/TrigScint/src/TrigScint/TrigScintFirmwareTracker.cxx index 9837145a3..91814b514 100644 --- a/TrigScint/src/TrigScint/TrigScintFirmwareTracker.cxx +++ b/TrigScint/src/TrigScint/TrigScintFirmwareTracker.cxx @@ -3,7 +3,7 @@ #include "TrigScint/trackproducer.h" #include "TrigScint/clusterproducer.h" #include "TrigScint/objdef.h" -#include // std::next +#include #include namespace trigscint { @@ -89,7 +89,6 @@ void TrigScintFirmwareTracker::produce(framework::Event &event) { clearTrack(outTrk[j]); } - //std::cout<<"I GOT HERE 1"<(digis1_collection_, passName_)}; const auto digis3_{ @@ -97,19 +96,14 @@ void TrigScintFirmwareTracker::produce(framework::Event &event) { const auto digis2_{ event.getCollection(digis3_collection_, passName_)}; - std::cout<<"I GOT HERE 2"< - minThr_)and(digi.getBarID()<=50)and(digi.getBarID()>=0)){//and(digi.getTime()>padTime_+timeTolerance_)) { - std::cout<<"I AM LOOPING IN DIGIS1 "<=0)){ ap_int<12> bID = (ap_int<12>)(digi.getBarID()); ap_int<12> Amp = (ap_int<12>)(digi.getPE()); int index=count; @@ -122,7 +116,6 @@ void TrigScintFirmwareTracker::produce(framework::Event &event) { } }else{ HPad1[count].bID=(ap_int<12>)(digi.getBarID()); - std::cout<<(ap_int<12>)(digi.getBarID())<)(digi.getModuleID()); HPad1[count].Amp=(ap_int<12>)(digi.getPE()); HPad1[count].Time=(ap_int<12>)(digi.getTime()); @@ -132,14 +125,13 @@ void TrigScintFirmwareTracker::produce(framework::Event &event) { } } - std::cout<<"I GOT HERE 3"< - minThr_)and(digi.getBarID()<=50)and(digi.getBarID()>=0)){//and(digi.getTime()>padTime_+timeTolerance_)) { + minThr_)and(digi.getBarID()<=50)and(digi.getBarID()>=0)){ ap_int<12> bID = (ap_int<12>)(digi.getBarID()); ap_int<12> Amp = (ap_int<12>)(digi.getPE()); int index=count; @@ -160,14 +152,13 @@ void TrigScintFirmwareTracker::produce(framework::Event &event) { } } } - //std::cout<<"I GOT HERE 4"< - minThr_)and(digi.getBarID()<=50)and(digi.getBarID()>=0)){//and(digi.getTime()>padTime_+timeTolerance_)) { + minThr_)and(digi.getBarID()<=50)and(digi.getBarID()>=0)){ ap_int<12> bID = (ap_int<12>)(digi.getBarID()); ap_int<12> Amp = (ap_int<12>)(digi.getPE()); int index=count; @@ -189,22 +180,12 @@ void TrigScintFirmwareTracker::produce(framework::Event &event) { } } count=0; - std::cout<<"GOT HERE"<30)and(Point1[i].Seed.bID<51)and(Point1[i].Seed.bID>=0)and(Point1[i].Sec.Amp<450)and(counterN=topSeed){ cpyHit(Pad1[counterN].Seed,Point1[i].Seed);cpyHit(Pad1[counterN].Sec,Point1[i].Sec); calcCent(Pad1[counterN]); @@ -216,11 +197,8 @@ void TrigScintFirmwareTracker::produce(framework::Event &event) { Cluster* Point2=clusterproducer_sw(HPad2); topSeed=0; for(int i = 0; i30)and(Point2[i].Seed.bID<51)and(Point2[i].Seed.bID>=0)and(Point2[i].Sec.Amp<450)){ if(Point2[i].Seed.bID>=topSeed){ - std::cout<30)and(Point3[i].Seed.bID<51)and(Point3[i].Seed.bID>=0)and(Point3[i].Sec.Amp<450)){ - std::cout<<"Top Seed "<=topSeed){ - std::cout<0){ - std::cout<-1){ - std::cout<<"Step2: "<SEEDTHR){ clearClus(outClus[k]); outClus[k].Seed.mID=inHit[mapL1[2*k]].mID; outClus[k].Seed.bID=inHit[mapL1[2*k]].bID; outClus[k].Seed.Amp=inHit[mapL1[2*k]].Amp; outClus[k].Seed.Time=inHit[mapL1[2*k]].Time; - std::cout<<"I AM DOING CLUSTERS SEED: "<-1){ if(inHit[mapL1[2*k+1]].Amp>CLUSTHR){ outClus[k].Sec.mID=inHit[mapL1[2*k+1]].mID; outClus[k].Sec.bID=inHit[mapL1[2*k+1]].bID; outClus[k].Sec.Amp=inHit[mapL1[2*k+1]].Amp; outClus[k].Sec.Time=inHit[mapL1[2*k+1]].Time; @@ -48,8 +44,6 @@ Cluster* clusterproducer_sw(Hit inHit[NHITS]){ if(inHit[mapL1[2*k+1]].Amp>SEEDTHR){ clearClus(outClus[k]); outClus[k].Seed.mID=inHit[mapL1[2*k+1]].mID; outClus[k].Seed.bID=inHit[mapL1[2*k+1]].bID; outClus[k].Seed.Amp=inHit[mapL1[2*k+1]].Amp; outClus[k].Seed.Time=inHit[mapL1[2*k+1]].Time; - std::cout<<"I AM DOING CLUSTERS SEED: "<-1){ if(inHit[mapL1[2*k+2]].Amp>CLUSTHR){ @@ -61,15 +55,6 @@ Cluster* clusterproducer_sw(Hit inHit[NHITS]){ } } } - //GOING TO TRY FOR KICKS AND SHITS; IT SWAPS THE SEED AND SEC IF SEC IS LARGER - //if(outClus[k].Sec.Amp>outClus[k].Seed.Amp){ - //ap_int<12> holder1=outClus[k].Sec.Amp; - //ap_int<12> holder2=outClus[k].Sec.bID; - //outClus[k].Sec.bID=outClus[k].Seed.bID; - //outClus[k].Sec.Amp=outClus[k].Seed.Amp; - //outClus[k].Seed.bID=holder1; - //outClus[k].Seed.bID=holder2; - //} } return outClus; diff --git a/TrigScint/src/TrigScint/trackproducer_hw.cxx b/TrigScint/src/TrigScint/trackproducer_hw.cxx index c962dd5f7..39de952c7 100755 --- a/TrigScint/src/TrigScint/trackproducer_hw.cxx +++ b/TrigScint/src/TrigScint/trackproducer_hw.cxx @@ -11,65 +11,38 @@ void trackproducer_hw(Cluster Pad1[NTRK],Cluster Pad2[NCLUS],Cluster Pad3[NCLUS] #pragma HLS ARRAY_PARTITION variable=outTrk dim=0 complete #pragma HLS ARRAY_PARTITION variable=lookup dim=0 complete #pragma HLS PIPELINE II=10 - std::cout<<"HELLO 2 HERE 1"<Pad1[i].Seed.Amp){ - // Pad1[i].Seed.bID=Pad1[i].Sec.bID; - //} - std::cout<<"Pad1 Amp and 2 bID: "<100){continue;} for(int I = 0;I0)){continue;}//Continue if Seed not Satisfied ap_int<12> centroid = 2*Pad1[i].Seed.bID; if(Pad1[i].Sec.Amp>0){ centroid+=1; } - std::cout<<"Pad2 Amp and 2 bID: "<0)){continue;}//Continue if Seed not Satisfied - std::cout<<"HELLO 2 HERE 2.14"<=0)or(Pad2[lookup[centroid][I][0]/4].Seed.bID%2==1))){continue;}//Continue if Sec is not Expected, and not Empty - std::cout<<"HELLO 2 HERE 2.15"<=0)or(Pad2[lookup[centroid][I][0]/4].Seed.bID%2==0))){continue;}//Continue if Sec is not Expected, and not Empty - std::cout<<"HELLO 2 HERE 2.17"<0)){continue;}//Continue if Seed not Satisfied - std::cout<<"HELLO 2 HERE 2.2"<=0)or(Pad3[lookup[centroid][I][1]/4].Seed.bID%2==1))){continue;}//Continue if Sec is not Expected, and not Empty - std::cout<<"HELLO 2 HERE 2.21"<=0)or(Pad3[lookup[centroid][I][1]/4].Seed.bID%2==0))){continue;}//Continue if Sec is not Expected, and not Empty - std::cout<<"HELLO 2 HERE 2.23"<=0)){ if(outTrk[i-1].resid<=outTrk[i].resid){clearTrack(outTrk[i]);}else{clearTrack(outTrk[i-1]);} From 7e94f215ef6b64c46b0eedc6e47c11d46958b483 Mon Sep 17 00:00:00 2001 From: rodwyer100 Date: Mon, 16 Sep 2024 23:34:18 -0700 Subject: [PATCH 08/19] Commiting removal of superfluous firmware ap int libraries --- TrigScint/include/TrigScint/ap_common.h | 376 --- TrigScint/include/TrigScint/ap_decl.h | 212 -- TrigScint/include/TrigScint/ap_fixed.h | 360 --- TrigScint/include/TrigScint/ap_fixed_base.h | 2354 ----------------- TrigScint/include/TrigScint/ap_fixed_ref.h | 718 ----- .../include/TrigScint/ap_fixed_special.h | 230 -- TrigScint/include/TrigScint/ap_int.h | 330 --- TrigScint/include/TrigScint/ap_int_base.h | 1885 ------------- TrigScint/include/TrigScint/ap_int_ref.h | 1346 ---------- TrigScint/include/TrigScint/ap_int_special.h | 223 -- TrigScint/include/TrigScint/objdef.h | 2 +- 11 files changed, 1 insertion(+), 8035 deletions(-) delete mode 100644 TrigScint/include/TrigScint/ap_common.h delete mode 100644 TrigScint/include/TrigScint/ap_decl.h delete mode 100644 TrigScint/include/TrigScint/ap_fixed.h delete mode 100644 TrigScint/include/TrigScint/ap_fixed_base.h delete mode 100644 TrigScint/include/TrigScint/ap_fixed_ref.h delete mode 100644 TrigScint/include/TrigScint/ap_fixed_special.h delete mode 100644 TrigScint/include/TrigScint/ap_int.h delete mode 100644 TrigScint/include/TrigScint/ap_int_base.h delete mode 100644 TrigScint/include/TrigScint/ap_int_ref.h delete mode 100644 TrigScint/include/TrigScint/ap_int_special.h diff --git a/TrigScint/include/TrigScint/ap_common.h b/TrigScint/include/TrigScint/ap_common.h deleted file mode 100644 index 994851902..000000000 --- a/TrigScint/include/TrigScint/ap_common.h +++ /dev/null @@ -1,376 +0,0 @@ -/* - * Copyright 2011-2019 Xilinx, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef __AP_COMMON_H__ -#define __AP_COMMON_H__ - -// ---------------------------------------------------------------------- - -// Forward declaration of all AP types. -#include - - -#ifdef __SYNTHESIS__ -#error "The open-source version of AP types does not support synthesis." -#endif // ifdef __SYNTHESIS__ -#define _AP_ENABLE_HALF_ 0 - - -#if _AP_ENABLE_HALF_ == 1 -// Before ap_private definition. -#ifdef __SYNTHESIS__ -#define _HLS_HALF_DEFINED_ -typedef __fp16 half; -#else -class half; -#endif // __SYNTHESIS__ -#endif // _AP_ENABLE_HALF_ - -// ---------------------------------------------------------------------- - -// Macro functions -#define AP_MAX(a, b) ((a) > (b) ? (a) : (b)) -#define AP_MIN(a, b) ((a) < (b) ? (a) : (b)) -#define AP_ABS(a) ((a) >= 0 ? (a) : -(a)) - -#ifndef AP_ASSERT -#ifndef __SYNTHESIS__ -#include -#define AP_ASSERT(cond, msg) assert((cond) && (msg)) -#else -#define AP_ASSERT(cond, msg) -#endif // ifndef __SYNTHESIS__ -#endif // ifndef AP_ASSERT - -#ifndef __SYNTHESIS__ -// for fprintf messages. -#include -// for exit on error. -#include -#endif - -// same disable condition as assert. -#if !defined(__SYNTHESIS__) && !defined(NDEBUG) - -#define _AP_DEBUG(cond, ...) \ - do { \ - if ((cond)) { \ - fprintf(stderr, "DEBUG: " __VA_ARGS__); \ - fprintf(stderr, "\n"); \ - } \ - } while (0) -#define _AP_WARNING(cond, ...) \ - do { \ - if ((cond)) { \ - fprintf(stderr, "WARNING: " __VA_ARGS__); \ - fprintf(stderr, "\n"); \ - } \ - } while (0) -#define _AP_ERROR(cond, ...) \ - do { \ - if ((cond)) { \ - fprintf(stderr, "ERROR: " __VA_ARGS__); \ - fprintf(stderr, "\n"); \ - abort(); \ - } \ - } while (0) - -#else // if !defined(__SYNTHESIS__) && !defined(NDEBUG) - -#define __AP_VOID_CAST static_cast -#define _AP_DEBUG(cond, ...) (__AP_VOID_CAST(0)) -#define _AP_WARNING(cond, ...) (__AP_VOID_CAST(0)) -#define _AP_ERROR(cond, ...) (__AP_VOID_CAST(0)) - -#endif // if !defined(__SYNTHESIS__) && !defined(NDEBUG) else - -// ---------------------------------------------------------------------- - -// Attribute only for synthesis -#ifdef __SYNTHESIS__ -#define INLINE inline __attribute__((always_inline)) -//#define INLINE inline __attribute__((noinline)) -#else -#define INLINE inline -#endif - -#define AP_WEAK -// __attribute__((weak)) - -#ifndef AP_INT_MAX_W -#define AP_INT_MAX_W 1024 -#endif - -#define BIT_WIDTH_UPPER_LIMIT (1 << 15) -#if AP_INT_MAX_W > BIT_WIDTH_UPPER_LIMIT -#error "Bitwidth exceeds 32768 (1 << 15), the maximum allowed value" -#endif - -#define MAX_MODE(BITS) ((BITS + 1023) / 1024) - -// ---------------------------------------------------------------------- - -// XXX apcc cannot handle global std::ios_base::Init() brought in by -#ifndef AP_AUTOCC -#ifndef __SYNTHESIS__ -// for overload operator<< -#include -#endif -#endif // ifndef AP_AUTOCC - -#ifndef __SYNTHESIS__ -// for string format. -#include -// for string. -#include -#endif - -// for detecting if char is signed. -enum { CHAR_IS_SIGNED = (char)-1 < 0 }; - -// TODO we have similar traits in x_hls_utils.h, should consider unify. -namespace _ap_type { -template -struct is_signed { - static const bool value = _Tp(-1) < _Tp(1); -}; - -template -struct is_integral { - static const bool value = false; -}; -#define DEF_IS_INTEGRAL(CTYPE) \ - template <> \ - struct is_integral { \ - static const bool value = true; \ - }; -DEF_IS_INTEGRAL(bool) -DEF_IS_INTEGRAL(char) -DEF_IS_INTEGRAL(signed char) -DEF_IS_INTEGRAL(unsigned char) -DEF_IS_INTEGRAL(short) -DEF_IS_INTEGRAL(unsigned short) -DEF_IS_INTEGRAL(int) -DEF_IS_INTEGRAL(unsigned int) -DEF_IS_INTEGRAL(long) -DEF_IS_INTEGRAL(unsigned long) -DEF_IS_INTEGRAL(ap_slong) -DEF_IS_INTEGRAL(ap_ulong) -#undef DEF_IS_INTEGRAL - -template -struct enable_if {}; -// partial specialization for true -template -struct enable_if { - typedef _Tp type; -}; - -template -struct remove_const { - typedef _Tp type; -}; - -template -struct remove_const<_Tp const> { - typedef _Tp type; -}; -} // namespace _ap_type - -// ---------------------------------------------------------------------- - -// Define ssdm_int and _ssdm_op. -// XXX deleted in open-source version - -#ifndef NON_C99STRING -#define _AP_C99 true -#else -#define _AP_C99 false -#endif - -static inline unsigned char guess_radix(const char* s) { - unsigned char rd = 10; ///< default radix - const char* p = s; - // skip neg sign if it exists - if (p[0] == '-' || p[0] == '+') ++p; - // guess based on following two bits. - if (p[0] == '0') { - if (p[1] == 'b' || p[1] == 'B') { - rd = 2; - } else if (p[1] == 'o' || p[1] == 'O') { - rd = 8; - } else if (p[1] == 'x' || p[1] == 'X') { - rd = 16; - } else if (p[1] == 'd' || p[1] == 'D') { - rd = 10; - } - } - return rd; -} - -// ---------------------------------------------------------------------- - -// Basic integral struct upon which ap_int and ap_fixed are defined. -#ifdef __SYNTHESIS__ -// Use ssdm_int, a compiler dependent, attribute constrained integeral type as -// basic data type. -#define _AP_ROOT_TYPE ssdm_int -// Basic ops. -#define _AP_ROOT_op_concat(Ret, X, Y) _ssdm_op_concat(Ret, X, Y) -#define _AP_ROOT_op_get_bit(Val, Bit) _ssdm_op_get_bit(Val, Bit) -#define _AP_ROOT_op_set_bit(Val, Bit, Repl) _ssdm_op_set_bit(Val, Bit, Repl) -#define _AP_ROOT_op_get_range(Val, Lo, Hi) _ssdm_op_get_range(Val, Lo, Hi) -#define _AP_ROOT_op_set_range(Val, Lo, Hi, Repl) \ - _ssdm_op_set_range(Val, Lo, Hi, Repl) -#define _AP_ROOT_op_reduce(Op, Val) _ssdm_op_reduce(Op, Val) -#else // ifdef __SYNTHESIS__ -// Use ap_private for compiler-independent basic data type -template -class ap_private; -/// model ssdm_int in standard C++ for simulation. -template -struct ssdm_int_sim { - /// integral type with template-specified width and signedness. - ap_private<_AP_W, _AP_S> V; - ssdm_int_sim() {} -}; -#define _AP_ROOT_TYPE ssdm_int_sim -// private's ref uses _AP_ROOT_TYPE. -#include -// XXX The C-sim model cannot use GCC-extension -// Basic ops. Ret and Val are ap_private. -template -inline _Tp1 _AP_ROOT_op_concat(const _Tp1& Ret, const _Tp2& X, const _Tp3& Y) { - _Tp1 r = (X).operator,(Y); - return r; -} -#define _AP_ROOT_op_get_bit(Val, Bit) (Val).get_bit((Bit)) -template -inline _Tp1& _AP_ROOT_op_set_bit(_Tp1& Val, const _Tp2& Bit, const _Tp3& Repl) { - (Val).set_bit((Bit), (Repl)); - return Val; -} -// notice the order of high and low index is different in ssdm call and -// ap_private.range()... -#define _AP_ROOT_op_get_range(Val, Lo, Hi) (Val).range((Hi), (Lo)) -template -inline _Tp1& _AP_ROOT_op_set_range(_Tp1& Val, const _Tp2& Lo, const _Tp3& Hi, - const _Tp4& Repl) { - (Val).range((Hi), (Lo)) = Repl; - return (Val); -} -#define _AP_ROOT_op_and_reduce(Val) (Val).and_reduce() -#define _AP_ROOT_op_nand_reduce(Val) (Val).nand_reduce() -#define _AP_ROOT_op_or_reduce(Val) (Val).or_reduce() -#define _AP_ROOT_op_xor_reduce(Val) (Val).xor_reduce() -// ## is the concatenation in preprocessor: -#define _AP_ROOT_op_reduce(Op, Val) _AP_ROOT_op_##Op##_reduce(Val) -#endif // ifdef __SYNTHESIS__ else - -// ---------------------------------------------------------------------- - -// Constants for half, single, double pricision floating points -#define HALF_MAN 10 -#define FLOAT_MAN 23 -#define DOUBLE_MAN 52 - -#define HALF_EXP 5 -#define FLOAT_EXP 8 -#define DOUBLE_EXP 11 - -#define BIAS(e) ((1L << (e - 1L)) - 1L) -#define HALF_BIAS BIAS(HALF_EXP) -#define FLOAT_BIAS BIAS(FLOAT_EXP) -#define DOUBLE_BIAS BIAS(DOUBLE_EXP) - -#define APFX_IEEE_DOUBLE_E_MAX DOUBLE_BIAS -#define APFX_IEEE_DOUBLE_E_MIN (-DOUBLE_BIAS + 1) - -INLINE ap_ulong doubleToRawBits(double pf) { - union { - ap_ulong __L; - double __D; - } LD; - LD.__D = pf; - return LD.__L; -} - -INLINE unsigned int floatToRawBits(float pf) { - union { - unsigned int __L; - float __D; - } LD; - LD.__D = pf; - return LD.__L; -} - -#if _AP_ENABLE_HALF_ == 1 -INLINE unsigned short halfToRawBits(half pf) { -#ifdef __SYNTHESIS__ - union { - unsigned short __L; - half __D; - } LD; - LD.__D = pf; - return LD.__L; -#else - return pf.get_bits(); -#endif -} -#endif - -// usigned long long is at least 64-bit -INLINE double rawBitsToDouble(ap_ulong pi) { - union { - ap_ulong __L; - double __D; - } LD; - LD.__L = pi; - return LD.__D; -} - -// long is at least 32-bit -INLINE float rawBitsToFloat(unsigned long pi) { - union { - unsigned int __L; - float __D; - } LD; - LD.__L = pi; - return LD.__D; -} - -#if _AP_ENABLE_HALF_ == 1 -// short is at least 16-bit -INLINE half rawBitsToHalf(unsigned short pi) { -#ifdef __SYNTHESIS__ - union { - unsigned short __L; - half __D; - } LD; - LD.__L = pi; - return LD.__D; -#else - // sim model of half has a non-trivial constructor - half __D; - __D.set_bits(pi); - return __D; -#endif -} -#endif - -#endif // ifndef __AP_COMMON_H__ - -// -*- cpp -*- diff --git a/TrigScint/include/TrigScint/ap_decl.h b/TrigScint/include/TrigScint/ap_decl.h deleted file mode 100644 index ddd00f1c7..000000000 --- a/TrigScint/include/TrigScint/ap_decl.h +++ /dev/null @@ -1,212 +0,0 @@ -/* - * Copyright 2011-2019 Xilinx, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef __AP_DECL_H__ -#define __AP_DECL_H__ - -// ---------------------------------------------------------------------- - -#if !defined(__AP_FIXED_H__) && !defined(__AP_INT_H__) && !defined(__AUTOPILOT_CBE_H__) && !defined(__HLS_HALF_H__) -#error "Only ap_fixed.h and ap_int.h can be included directly in user code." -#endif - -// Test __SYNTHESIS__ only for mode -#if !defined(__SYNTHESIS__) && (defined(AESL_SYN) || defined(__HLS_SYN__)) -//#pragma message "AESL_SYN and __HLS_SYN__ should be replaced by __SYNTHESIS__" -#define __SYNTHESIS__ -#endif - -/* for safety*/ -#if (defined(_AP_N) || defined(_AP_C)) -#error One or more of the following is defined: _AP_N, _AP_C. Definition conflicts with their usage as template parameters. -#endif - -/* for safety*/ -#if (defined(_AP_W) || defined(_AP_I) || defined(_AP_S) || defined(_AP_Q) || \ - defined(_AP_O) || defined(_AP_W2) || defined(_AP_I2) || \ - defined(_AP_S2) || defined(_AP_Q2) || defined(_AP_O2) || \ - defined(_AP_N) || defined(_AP_N2)) -#error \ - "One or more of the following is defined: _AP_W, _AP_I, _AP_S, _AP_Q, _AP_O, _AP_N, _AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2. Definition conflicts with their usage as template parameters." -#endif - -/*for safety*/ -#if (defined(_AP_W3) || defined(_AP_S3) || defined(_AP_W4) || defined(_AP_S4)) -#error \ - "One or more of the following is defined: _AP_W3, _AP_S3, _AP_W4,_AP_S4. Definition conflicts with their usage as template parameters." -#endif - -#if (defined(_AP_W1) || defined(_AP_S1) || defined(_AP_T) || \ - defined(_AP_T1) || defined(_AP_T2) || defined(_AP_T3) || defined(_AP_T4)) -#error \ - "One or more of the following is defined: _AP_W1, _AP_S1, _AP_T, _AP_T1, _AP_T2, _AP_T3, _AP_T4. Definition conflicts with their usage as template parameters." -#endif - -#ifndef __cplusplus -#error "AP data type can only be used in C++" -#endif - -// ---------------------------------------------------------------------- - -#ifndef __SC_COMPATIBLE__ -/// ap_fixed quantification mode -enum ap_q_mode { - AP_RND, //< rounding to plus infinity - AP_RND_ZERO, //< rounding to zero - AP_RND_MIN_INF, //< rounding to minus infinity - AP_RND_INF, //< rounding to infinity - AP_RND_CONV, //< convergent rounding - AP_TRN, //< truncation - AP_TRN_ZERO, //< truncation to zero -}; - -// FIXME for legacy code -#ifndef SYSTEMC_INCLUDED -#define SC_RND AP_RND -#define SC_RND_ZERO AP_RND_ZERO -#define SC_RND_MIN_INF AP_RND_MIN_INF -#define SC_RND_INF AP_RND_INF -#define SC_RND_CONV AP_RND_CONV -#define SC_TRN AP_TRN -#define SC_TRN_ZERO AP_TRN_ZERO -#endif // !defined(SYSTEMC_INCLUDED) - -/// ap_fixed saturation mode -enum ap_o_mode { - AP_SAT, //< saturation - AP_SAT_ZERO, //< saturation to zero - AP_SAT_SYM, //< symmetrical saturation - AP_WRAP, //< wrap-around (*) - AP_WRAP_SM, //< sign magnitude wrap-around (*) -}; - -// FIXME for legacy code -#ifndef SYSTEMC_INCLUDED -#define SC_SAT AP_SAT -#define SC_SAT_ZERO AP_SAT_ZERO -#define SC_SAT_SYM AP_SAT_SYM -#define SC_WRAP AP_WRAP -#define SC_WRAP_SM AP_WRAP_SM -#endif // !defined(SYSTEMC_INCLUDED) - -#else // defined(__SC_COMPATIBLE__) - -// There will not be sc_fxdefs.h, and the emu should be defined by ap_fixed. - -/// ap_fixed quantification mode -enum ap_q_mode { - SC_RND, //< rounding to plus infinity - SC_RND_ZERO, //< rounding to zero - SC_RND_MIN_INF, //< rounding to minus infinity - SC_RND_INF, //< rounding to infinity - SC_RND_CONV, //< convergent rounding - SC_TRN, //< truncation - SC_TRN_ZERO, //< truncation to zero -}; - -#define AP_RND SC_RND -#define AP_RND_ZERO SC_RND_ZERO -#define AP_RND_MIN_INF SC_RND_MIN_INF -#define AP_RND_INF SC_RND_INF -#define AP_RND_CONV SC_RND_CONV -#define AP_TRN SC_TRN -#define AP_TRN_ZERO SC_TRN_ZERO - -/// ap_fixed saturation mode -enum ap_o_mode { - SC_SAT, //< saturation - SC_SAT_ZERO, //< saturation to zero - SC_SAT_SYM, //< symmetrical saturation - SC_WRAP, //< wrap-around (*) - SC_WRAP_SM, //< sign magnitude wrap-around (*) -}; - -#define AP_SAT SC_SAT -#define AP_SAT_ZERO SC_SAT_ZERO -#define AP_SAT_SYM SC_SAT_SYM -#define AP_WRAP SC_WRAP -#define AP_WRAP_SM SC_WRAP_SM - -#endif // defined(__SC_COMPATIBLE__) - -template -struct ap_int_base; - -template -struct ap_int; - -template -struct ap_uint; - -template -struct ap_range_ref; - -template -struct ap_bit_ref; - -template -struct ap_concat_ref; - -template -struct ap_fixed_base; - -template -struct ap_fixed; - -template -struct ap_ufixed; - -template -struct af_range_ref; - -template -struct af_bit_ref; - -/// string base mode -enum BaseMode { AP_BIN = 2, AP_OCT = 8, AP_DEC = 10, AP_HEX = 16 }; - -#ifndef SYSTEMC_INCLUDED -#define SC_BIN 2 -#define SC_OCT 8 -#define SC_DEC 10 -#define SC_HEX 16 -#endif // !defined(SYSTEMC_INCLUDED) - -// Alias C data types -#ifdef _MSC_VER -typedef signed __int64 ap_slong; -typedef unsigned __int64 ap_ulong; -#else // !defined(_MSC_VER) -typedef signed long long ap_slong; -typedef unsigned long long ap_ulong; -#endif // !defined(_MSC_VER) - -enum { - _AP_SIZE_char = 8, - _AP_SIZE_short = sizeof(short) * 8, - _AP_SIZE_int = sizeof(int) * 8, - _AP_SIZE_long = sizeof(long) * 8, - _AP_SIZE_ap_slong = sizeof(ap_slong) * 8 -}; - -#endif // !defined(__AP_DECL_H__) - -// -*- cpp -*- diff --git a/TrigScint/include/TrigScint/ap_fixed.h b/TrigScint/include/TrigScint/ap_fixed.h deleted file mode 100644 index 6362f3d71..000000000 --- a/TrigScint/include/TrigScint/ap_fixed.h +++ /dev/null @@ -1,360 +0,0 @@ -/* - * Copyright 2011-2019 Xilinx, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef __AP_FIXED_H__ -#define __AP_FIXED_H__ - -#include -#include -#include - -//--------------------------------------------------------------- - -/// Signed Arbitrary Precision Fixed-Point Type. -// default for _AP_Q, _AP_O and _AP_N set in ap_decl.h -template -struct ap_fixed : ap_fixed_base<_AP_W, _AP_I, true, _AP_Q, _AP_O, _AP_N> { - typedef ap_fixed_base<_AP_W, _AP_I, true, _AP_Q, _AP_O, _AP_N> Base; - // Constructor - /// default ctor - INLINE ap_fixed() : Base() {} - - /// default copy ctor - INLINE ap_fixed(const ap_fixed& op) { Base::V = op.V; } - - /// copy ctor from ap_fixed_base. - template - INLINE ap_fixed(const ap_fixed_base<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, - _AP_O2, _AP_N2>& op) - : Base(op) {} - - template - INLINE ap_fixed(const volatile ap_fixed_base<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, - _AP_O2, _AP_N2>& op) - : Base(op) {} - - //// from ap_fixed - //template - //INLINE ap_fixed( - // const ap_fixed<_AP_W2, _AP_I2, _AP_Q2, _AP_O2, _AP_N2>& op) - // : Base(ap_fixed_base<_AP_W2, _AP_I2, true, _AP_Q2, _AP_O2, _AP_N2>(op)) {} - - //template - //INLINE ap_fixed( - // const volatile ap_fixed<_AP_W2, _AP_I2, _AP_Q2, _AP_O2, _AP_N2>& op) - // : Base(ap_fixed_base<_AP_W2, _AP_I2, true, _AP_Q2, _AP_O2, _AP_N2>(op)) {} - - //// from ap_ufixed. - //template - //INLINE ap_fixed( - // const ap_ufixed<_AP_W2, _AP_I2, _AP_Q2, _AP_O2, _AP_N2>& op) - // : Base(ap_fixed_base<_AP_W2, _AP_I2, false, _AP_Q2, _AP_O2, _AP_N2>(op)) { - //} - - //template - //INLINE ap_fixed( - // const volatile ap_ufixed<_AP_W2, _AP_I2, _AP_Q2, _AP_O2, _AP_N2>& op) - // : Base(ap_fixed_base<_AP_W2, _AP_I2, false, _AP_Q2, _AP_O2, _AP_N2>(op)) { - //} - - /// copy ctor from ap_int_base. - template - INLINE ap_fixed(const ap_int_base<_AP_W2, _AP_S2>& op) : Base(op) {} - - template - INLINE ap_fixed(const volatile ap_int_base<_AP_W2, _AP_S2>& op) : Base(op) {} - - //// from ap_int. - //template - //INLINE ap_fixed(const ap_int<_AP_W2>& op) - // : Base(ap_int_base<_AP_W2, true>(op)) {} - - //template - //INLINE ap_fixed(const volatile ap_int<_AP_W2>& op) - // : Base(ap_int_base<_AP_W2, true>(op)) {} - - //// from ap_uint. - //template - //INLINE ap_fixed(const ap_uint<_AP_W2>& op) - // : Base(ap_int_base<_AP_W2, false>(op)) {} - - //template - //INLINE ap_fixed(const volatile ap_uint<_AP_W2>& op) - // : Base(ap_int_base<_AP_W2, false>(op)) {} - - // from ap_bit_ref. - template - INLINE ap_fixed(const ap_bit_ref<_AP_W2, _AP_S2>& op) : Base(op) {} - - // from ap_range_ref. - template - INLINE ap_fixed(const ap_range_ref<_AP_W2, _AP_S2>& op) : Base(op) {} - - // from ap_concat_ref. - template - INLINE ap_fixed(const ap_concat_ref<_AP_W2, _AP_T2, _AP_W3, _AP_T3>& op) - : Base(op) {} - - // from af_bit_ref. - template - INLINE ap_fixed( - const af_bit_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2>& op) - : Base(op) {} - - // from af_range_ref. - template - INLINE ap_fixed( - const af_range_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2>& op) - : Base(op) {} - -// from c types. -#define CTOR(TYPE) \ - INLINE ap_fixed(TYPE v) : Base(v) {} - - CTOR(bool) - CTOR(char) - CTOR(signed char) - CTOR(unsigned char) - CTOR(short) - CTOR(unsigned short) - CTOR(int) - CTOR(unsigned int) - CTOR(long) - CTOR(unsigned long) - CTOR(ap_slong) - CTOR(ap_ulong) -#if _AP_ENABLE_HALF_ == 1 - CTOR(half) -#endif - CTOR(float) - CTOR(double) -#undef CTOR - - INLINE ap_fixed(const char* s) : Base(s) {} - - INLINE ap_fixed(const char* s, signed char rd) : Base(s, rd) {} - - // Assignment - // The assignment operator is technically inherited; however, it is always - // hidden by an explicitly or implicitly defined assignment operator for the - // derived class. - /* XXX ctor will be used when right is not of proper type. */ - INLINE ap_fixed& operator=( - const ap_fixed<_AP_W, _AP_I, _AP_Q, _AP_O, _AP_N>& op) { - Base::V = op.V; - return *this; - } - - INLINE void operator=( - const ap_fixed<_AP_W, _AP_I, _AP_Q, _AP_O, _AP_N>& op) volatile { - Base::V = op.V; - } - - INLINE ap_fixed& operator=( - const volatile ap_fixed<_AP_W, _AP_I, _AP_Q, _AP_O, _AP_N>& op) { - Base::V = op.V; - return *this; - } - - INLINE void operator=( - const volatile ap_fixed<_AP_W, _AP_I, _AP_Q, _AP_O, _AP_N>& op) volatile { - Base::V = op.V; - } -}; // struct ap_fixed. - -//------------------------------------------------------------------- - -// Unsigned Arbitrary Precision Fixed-Point Type. -// default for _AP_Q, _AP_O and _AP_N set in ap_decl.h -template -struct ap_ufixed : ap_fixed_base<_AP_W, _AP_I, false, _AP_Q, _AP_O, _AP_N> { - typedef ap_fixed_base<_AP_W, _AP_I, false, _AP_Q, _AP_O, _AP_N> Base; - // Constructor - /// default ctor - INLINE ap_ufixed() : Base() {} - - /// default copy ctor - INLINE ap_ufixed(const ap_ufixed& op) { Base::V = op.V; } - - /// copy ctor from ap_fixed_base - template - INLINE ap_ufixed(const ap_fixed_base<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, - _AP_O2, _AP_N2>& op) - : Base(op) {} - - /// copy ctor from ap_fixed_base - template - INLINE ap_ufixed(const volatile ap_fixed_base<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, - _AP_O2, _AP_N2>& op) - : Base(op) {} - - //template - //INLINE ap_ufixed( - // const ap_fixed<_AP_W2, _AP_I2, _AP_Q2, _AP_O2, _AP_N2>& op) - // : Base(ap_fixed_base<_AP_W2, _AP_I2, true, _AP_Q2, _AP_O2, _AP_N2>(op)) {} - - //template - //INLINE ap_ufixed( - // const volatile ap_fixed<_AP_W2, _AP_I2, _AP_Q2, _AP_O2, _AP_N2>& op) - // : Base(ap_fixed_base<_AP_W2, _AP_I2, true, _AP_Q2, _AP_O2, _AP_N2>(op)) {} - - //template - //INLINE ap_ufixed( - // const ap_ufixed<_AP_W2, _AP_I2, _AP_Q2, _AP_O2, _AP_N2>& op) - // : Base(ap_fixed_base<_AP_W2, _AP_I2, false, _AP_Q2, _AP_O2, _AP_N2>(op)) { - //} - - //template - //INLINE ap_ufixed( - // const volatile ap_ufixed<_AP_W2, _AP_I2, _AP_Q2, _AP_O2, _AP_N2>& op) - // : Base(ap_fixed_base<_AP_W2, _AP_I2, false, _AP_Q2, _AP_O2, _AP_N2>(op)) { - //} - - /// copy ctor from ap_int_base. - template - INLINE ap_ufixed(const ap_int_base<_AP_W2, _AP_S2>& op) : Base(op) {} - - template - INLINE ap_ufixed(const volatile ap_int_base<_AP_W2, _AP_S2>& op) : Base(op) {} - - //template - //INLINE ap_ufixed(const ap_int<_AP_W2>& op) - // : Base(ap_int_base<_AP_W2, true>(op)) {} - - //template - //INLINE ap_ufixed(const volatile ap_int<_AP_W2>& op) - // : Base(ap_int_base<_AP_W2, true>(op)) {} - - //template - //INLINE ap_ufixed(const ap_uint<_AP_W2>& op) - // : Base(ap_int_base<_AP_W2, false>(op)) {} - - //template - //INLINE ap_ufixed(const volatile ap_uint<_AP_W2>& op) - // : Base(ap_int_base<_AP_W2, false>(op)) {} - - template - INLINE ap_ufixed(const ap_bit_ref<_AP_W2, _AP_S2>& op) : Base(op) {} - - template - INLINE ap_ufixed(const ap_range_ref<_AP_W2, _AP_S2>& op) : Base(op) {} - - template - INLINE ap_ufixed(const ap_concat_ref<_AP_W2, _AP_T2, _AP_W3, _AP_T3>& op) - : Base(op) {} - - template - INLINE ap_ufixed( - const af_bit_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2>& op) - : Base(op) {} - - template - INLINE ap_ufixed( - const af_range_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2>& op) - : Base(op) {} - -#define CTOR(TYPE) \ - INLINE ap_ufixed(TYPE v) : Base(v) {} - - CTOR(bool) - CTOR(char) - CTOR(signed char) - CTOR(unsigned char) - CTOR(short) - CTOR(unsigned short) - CTOR(int) - CTOR(unsigned int) - CTOR(long) - CTOR(unsigned long) - CTOR(ap_slong) - CTOR(ap_ulong) -#if _AP_ENABLE_HALF_ == 1 - CTOR(half) -#endif - CTOR(float) - CTOR(double) -#undef CTOR - - INLINE ap_ufixed(const char* s) : Base(s) {} - - INLINE ap_ufixed(const char* s, signed char rd) : Base(s, rd) {} - - // Assignment - INLINE ap_ufixed& operator=( - const ap_ufixed<_AP_W, _AP_I, _AP_Q, _AP_O, _AP_N>& op) { - Base::V = op.V; - return *this; - } - - INLINE void operator=( - const ap_ufixed<_AP_W, _AP_I, _AP_Q, _AP_O, _AP_N>& op) volatile { - Base::V = op.V; - } - - INLINE ap_ufixed& operator=( - const volatile ap_ufixed<_AP_W, _AP_I, _AP_Q, _AP_O, _AP_N>& op) { - Base::V = op.V; - return *this; - } - - INLINE void operator=(const volatile ap_ufixed<_AP_W, _AP_I, _AP_Q, _AP_O, - _AP_N>& op) volatile { - Base::V = op.V; - } -}; // struct ap_ufixed - - -#if !defined(__SYNTHESIS__) && (defined(SYSTEMC_H) || defined(SYSTEMC_INCLUDED)) -// XXX sc_trace overload for ap_fixed is already included in -// "ap_sysc/ap_sc_extras.h", so do not define in synthesis. -template -INLINE void sc_trace(sc_core::sc_trace_file* tf, - const ap_fixed<_AP_W, _AP_I, _AP_Q, _AP_O, _AP_N>& op, - const std::string& name) { - tf->trace(sc_dt::sc_lv<_AP_W>(op.to_string(2).c_str()), name); -} - -template -INLINE void sc_trace(sc_core::sc_trace_file* tf, - const ap_ufixed<_AP_W, _AP_I, _AP_Q, _AP_O, _AP_N>& op, - const std::string& name) { - tf->trace(sc_dt::sc_lv<_AP_W>(op.to_string(2).c_str()), name); -} -#endif // System C sim - -// Specialization of std containers, so that std::complex can have its -// image part automatically zero-initialized when only real part is provided. -#include - -#endif // ifndef __AP_FIXED_H__ - -// -*- cpp -*- diff --git a/TrigScint/include/TrigScint/ap_fixed_base.h b/TrigScint/include/TrigScint/ap_fixed_base.h deleted file mode 100644 index eb2bdbf5d..000000000 --- a/TrigScint/include/TrigScint/ap_fixed_base.h +++ /dev/null @@ -1,2354 +0,0 @@ -/* - * Copyright 2011-2019 Xilinx, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef __AP_FIXED_BASE_H__ -#define __AP_FIXED_BASE_H__ - -#ifndef __AP_FIXED_H__ -#error "Only ap_fixed.h and ap_int.h can be included directly in user code." -#endif - -// for ap_int_base and its reference types. -#include -#ifndef __SYNTHESIS__ -#if _AP_ENABLE_HALF_ == 1 -// for half type -#include -#endif -// for std io -#include -#endif - -#ifndef __cplusplus -#error "C++ is required to include this header file" -#else // __cplusplus - -// for warning on unsupported rounding mode in conversion to float/double. -#if !defined(__SYNTHESIS__) && __cplusplus >= 201103L && \ - (defined(__gnu_linux__) || defined(_WIN32)) -#define AP_FIXED_ENABLE_CPP_FENV 1 -#include -#endif - -// ---------------------------------------------------------------------- - -/* Major TODO - long double support: constructor, assign and other operators. - binary operators with ap_fixed_base and const char*. - return ap_fixed/ap_ufixed when result signedness is known. -*/ - -// Helper function in conversion to floating point types. - -#ifdef __SYNTHESIS__ -#define _AP_ctype_op_get_bit(var, index) _AP_ROOT_op_get_bit(var, index) -#define _AP_ctype_op_set_bit(var, index, x) _AP_ROOT_op_set_bit(var, index, x) -#define _AP_ctype_op_get_range(var, low, high) \ - _AP_ROOT_op_get_range(var, low, high) -#define _AP_ctype_op_set_range(var, low, high, x) \ - _AP_ROOT_op_set_range(var, low, high, x) -#else // ifdef __SYNTHESIS__ -template -inline bool _AP_ctype_op_get_bit(_Tp1& var, const _Tp2& index) { - return !!(var & (1ull << (index))); -} -template -inline _Tp1 _AP_ctype_op_set_bit(_Tp1& var, const _Tp2& index, const _Tp3& x) { - var |= (((x) ? 1ull : 0ull) << (index)); - return var; -} -template -inline _Tp1 _AP_ctype_op_get_range(_Tp1& var, const _Tp2& low, - const _Tp3& high) { - _Tp1 r = var; - ap_ulong mask = -1ll; - mask >>= (sizeof(_Tp1) * 8 - ((high) - (low) + 1)); - r >>= (low); - r &= mask; - return r; -} -template -inline _Tp1 _AP_ctype_op_set_range(_Tp1& var, const _Tp2& low, const _Tp3& high, - const _Tp4& x) { - ap_ulong mask = -1ll; - mask >>= (_AP_SIZE_ap_slong - ((high) - (low) + 1)); - var &= ~(mask << (low)); - var |= ((mask & x) << (low)); - return var; -} -#endif // ifdef __SYNTHESIS__ - - -// trait for letting base class to return derived class. -// Notice that derived class template is incomplete, and we cannot use -// the member of the derived class. -template -struct _ap_fixed_factory; -template -struct _ap_fixed_factory<_AP_W2, _AP_I2, true> { - typedef ap_fixed<_AP_W2, _AP_I2> type; -}; -template -struct _ap_fixed_factory<_AP_W2, _AP_I2, false> { - typedef ap_ufixed<_AP_W2, _AP_I2> type; -}; - -/// ap_fixed_base: AutoPilot fixed point. -/** partial specialization of signed. - @tparam _AP_W width. - @tparam _AP_I integral part width. - @tparam _AP_S signed. - @tparam _AP_Q quantization mode. Default is AP_TRN. - @tparam _AP_O saturation mode. Default is AP_WRAP. - @tparam _AP_N saturation wrap value. Default is 0. - */ -// default for _AP_Q, _AP_O and _AP_N set in ap_decl.h -template -struct ap_fixed_base : _AP_ROOT_TYPE<_AP_W, _AP_S> { - public: - typedef _AP_ROOT_TYPE<_AP_W, _AP_S> Base; - static const int width = _AP_W; - static const int iwidth = _AP_I; - static const ap_q_mode qmode = _AP_Q; - static const ap_o_mode omode = _AP_O; - - /// Return type trait. - template - struct RType { - enum { - _AP_F = _AP_W - _AP_I, - F2 = _AP_W2 - _AP_I2, - mult_w = _AP_W + _AP_W2, - mult_i = _AP_I + _AP_I2, - mult_s = _AP_S || _AP_S2, - plus_w = AP_MAX(_AP_I + (_AP_S2 && !_AP_S), _AP_I2 + (_AP_S && !_AP_S2)) + - 1 + AP_MAX(_AP_F, F2), - plus_i = - AP_MAX(_AP_I + (_AP_S2 && !_AP_S), _AP_I2 + (_AP_S && !_AP_S2)) + 1, - plus_s = _AP_S || _AP_S2, - minus_w = - AP_MAX(_AP_I + (_AP_S2 && !_AP_S), _AP_I2 + (_AP_S && !_AP_S2)) + 1 + - AP_MAX(_AP_F, F2), - minus_i = - AP_MAX(_AP_I + (_AP_S2 && !_AP_S), _AP_I2 + (_AP_S && !_AP_S2)) + 1, - minus_s = true, -#ifndef __SC_COMPATIBLE__ - div_w = _AP_S2 + _AP_W + AP_MAX(F2, 0), -#else - div_w = _AP_S2 + _AP_W + AP_MAX(F2, 0) + AP_MAX(_AP_I2, 0), -#endif - div_i = _AP_S2 + _AP_I + F2, - div_s = _AP_S || _AP_S2, - logic_w = - AP_MAX(_AP_I + (_AP_S2 && !_AP_S), _AP_I2 + (_AP_S && !_AP_S2)) + - AP_MAX(_AP_F, F2), - logic_i = AP_MAX(_AP_I + (_AP_S2 && !_AP_S), _AP_I2 + (_AP_S && !_AP_S2)), - logic_s = _AP_S || _AP_S2 - }; - - typedef ap_fixed_base<_AP_W, _AP_I, _AP_S> lhs; - typedef ap_fixed_base<_AP_W2, _AP_I2, _AP_S2> rhs; - - typedef ap_fixed_base mult_base; - typedef ap_fixed_base plus_base; - typedef ap_fixed_base minus_base; - typedef ap_fixed_base logic_base; - typedef ap_fixed_base div_base; - typedef ap_fixed_base<_AP_W, _AP_I, _AP_S> arg1_base; - - typedef typename _ap_fixed_factory::type mult; - typedef typename _ap_fixed_factory::type plus; - typedef typename _ap_fixed_factory::type minus; - typedef typename _ap_fixed_factory::type logic; - typedef typename _ap_fixed_factory::type div; - typedef typename _ap_fixed_factory<_AP_W, _AP_I, _AP_S>::type arg1; - }; - - private: -#ifndef __SYNTHESIS__ - // This cannot handle hex float format string. - void fromString(const std::string& val, unsigned char radix) { - _AP_ERROR(!(radix == 2 || radix == 8 || radix == 10 || radix == 16), - "ap_fixed_base::fromString(%s, %d)", val.c_str(), radix); - - Base::V = 0; - int startPos = 0; - int endPos = val.length(); - int decPos = val.find("."); - if (decPos == -1) decPos = endPos; - - // handle sign - bool isNegative = false; - if (val[0] == '-') { - isNegative = true; - ++startPos; - } else if (val[0] == '+') - ++startPos; - - // If there are no integer bits, e.g.: - // .0000XXXX, then keep at least one bit. - // If the width is greater than the number of integer bits, e.g.: - // XXXX.XXXX, then we keep the integer bits - // if the number of integer bits is greater than the width, e.g.: - // XXX000 then we keep the integer bits. - // Always keep one bit. - ap_fixed_base - integer_bits = 0; - - // Figure out if we can shift instead of multiply - unsigned shift = (radix == 16 ? 4 : radix == 8 ? 3 : radix == 2 ? 1 : 0); - - //std::cout << "\n\n" << val << "\n"; - //std::cout << startPos << " " << decPos << " " << endPos << "\n"; - - bool sticky_int = false; - - // Traverse the integer digits from the MSD, multiplying by radix as we go. - for (int i = startPos; i < decPos; i++) { - // Get a digit - char cdigit = val[i]; - if (cdigit == '\0') continue; - unsigned digit = ap_private_ops::decode_digit(cdigit, radix); - - sticky_int |= integer_bits[AP_MAX(_AP_I, 4) + 4 - 1] | - integer_bits[AP_MAX(_AP_I, 4) + 4 - 2] | - integer_bits[AP_MAX(_AP_I, 4) + 4 - 3] | - integer_bits[AP_MAX(_AP_I, 4) + 4 - 4]; - // Shift or multiply the value by the radix - if (shift) - integer_bits <<= shift; - else - integer_bits *= radix; - - // Add in the digit we just interpreted - integer_bits += digit; - //std::cout << "idigit = " << digit << " " << integer_bits.to_string() - // << " " << sticky_int << "\n"; - } - integer_bits[AP_MAX(_AP_I, 4) + 4 - 3] = - integer_bits[AP_MAX(_AP_I, 4) + 4 - 3] | sticky_int; - - ap_fixed_base fractional_bits = 0; - bool sticky = false; - - // Traverse the fractional digits from the LSD, dividing by radix as we go. - for (int i = endPos - 1; i >= decPos + 1; i--) { - // Get a digit - char cdigit = val[i]; - if (cdigit == '\0') continue; - unsigned digit = ap_private_ops::decode_digit(cdigit, radix); - // Add in the digit we just interpreted - fractional_bits += digit; - - sticky |= fractional_bits[0] | fractional_bits[1] | fractional_bits[2] | - fractional_bits[3]; - // Shift or divide the value by the radix - if (shift) - fractional_bits >>= shift; - else - fractional_bits /= radix; - - //std::cout << "fdigit = " << digit << " " << fractional_bits.to_string() - // << " " << sticky << "\n"; - } - - //std::cout << "Int =" << integer_bits.to_string() << " " << - // fractional_bits.to_string() << "\n"; - - fractional_bits[0] = fractional_bits[0] | sticky; - - if (isNegative) - *this = -(integer_bits + fractional_bits); - else - *this = integer_bits + fractional_bits; - - //std::cout << "end = " << this->to_string(16) << "\n"; - } - - /// report invalid constrction of ap_fixed_base - INLINE void report() { - if (!_AP_S && _AP_O == AP_WRAP_SM) { - fprintf(stderr, "ap_ufxied<...> cannot support AP_WRAP_SM.\n"); - exit(1); - } - if (_AP_W > MAX_MODE(AP_INT_MAX_W) * 1024) { - fprintf(stderr, - "[E] ap_%sfixed<%d, ...>: Bitwidth exceeds the " - "default max value %d. Please use macro " - "AP_INT_MAX_W to set a larger max value.\n", - _AP_S ? "" : "u", _AP_W, MAX_MODE(AP_INT_MAX_W) * 1024); - exit(1); - } - } -#else - INLINE void report() {} -#endif // ifdef __SYNTHESIS__ - - /// @name helper functions. - // @{ - INLINE void overflow_adjust(bool underflow, bool overflow, bool lD, - bool sign) { - if (!underflow && !overflow) return; - if (_AP_O == AP_WRAP) { - if (_AP_N == 0) return; - if (_AP_S) { - // signed AP_WRAP - // n_bits == 1 - Base::V = _AP_ROOT_op_set_bit(Base::V, _AP_W - 1, sign); - if (_AP_N > 1) { - // n_bits > 1 - ap_int_base<_AP_W, false> mask(-1); - if (sign) mask.V = 0; - Base::V = - _AP_ROOT_op_set_range(Base::V, _AP_W - _AP_N, _AP_W - 2, mask.V); - } - } else { - // unsigned AP_WRAP - ap_int_base<_AP_W, false> mask(-1); - Base::V = - _AP_ROOT_op_set_range(Base::V, _AP_W - _AP_N, _AP_W - 1, mask.V); - } - } else if (_AP_O == AP_SAT_ZERO) { - Base::V = 0; - } else if (_AP_O == AP_WRAP_SM && _AP_S) { - bool Ro = _AP_ROOT_op_get_bit(Base::V, _AP_W - 1); - if (_AP_N == 0) { - if (lD != Ro) { - Base::V = ~Base::V; - Base::V = _AP_ROOT_op_set_bit(Base::V, _AP_W - 1, lD); - } - } else { - if (_AP_N == 1 && sign != Ro) { - Base::V = ~Base::V; - } else if (_AP_N > 1) { - bool lNo = _AP_ROOT_op_get_bit(Base::V, _AP_W - _AP_N); - if (lNo == sign) Base::V = ~Base::V; - ap_int_base<_AP_W, false> mask(-1); - if (sign) mask.V = 0; - Base::V = - _AP_ROOT_op_set_range(Base::V, _AP_W - _AP_N, _AP_W - 2, mask.V); - } - Base::V = _AP_ROOT_op_set_bit(Base::V, _AP_W - 1, sign); - } - } else { - if (_AP_S) { - if (overflow) { - Base::V = 1; - Base::V <<= _AP_W - 1; - Base::V = ~Base::V; - } else if (underflow) { - Base::V = 1; - Base::V <<= _AP_W - 1; - if (_AP_O == AP_SAT_SYM) Base::V |= 1; - } - } else { - if (overflow) - Base::V = ~(ap_int_base<_AP_W, false>(0).V); - else if (underflow) - Base::V = 0; - } - } - } - - INLINE bool quantization_adjust(bool qb, bool r, bool s) { - bool carry = (bool)_AP_ROOT_op_get_bit(Base::V, _AP_W - 1); - if (_AP_Q == AP_TRN) return false; - if (_AP_Q == AP_RND_ZERO) - qb &= s || r; - else if (_AP_Q == AP_RND_MIN_INF) - qb &= r; - else if (_AP_Q == AP_RND_INF) - qb &= !s || r; - else if (_AP_Q == AP_RND_CONV) - qb &= _AP_ROOT_op_get_bit(Base::V, 0) || r; - else if (_AP_Q == AP_TRN_ZERO) - qb = s && (qb || r); - Base::V += qb; - return carry && (!(bool)_AP_ROOT_op_get_bit(Base::V, _AP_W - 1)); - } - // @} - - public: - /// @name constructors. - // @{ - /// default ctor. - INLINE ap_fixed_base() {} - - /// copy ctor. - template - INLINE ap_fixed_base( - const ap_fixed_base<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2>& op) { - operator=(op); - report(); - } - - template - INLINE ap_fixed_base( - const volatile ap_fixed_base<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2>& op) { - operator=(op); - report(); - } - - template - INLINE ap_fixed_base(const ap_int_base<_AP_W2, _AP_S2>& op) { - ap_fixed_base<_AP_W2, _AP_W2, _AP_S2> tmp; - tmp.V = op.V; - operator=(tmp); - report(); - } - - template - INLINE ap_fixed_base(const volatile ap_int_base<_AP_W2, _AP_S2>& op) { - ap_fixed_base<_AP_W2, _AP_W2, _AP_S2> tmp; - tmp.V = op.V; - operator=(tmp); - report(); - } - -#ifndef __SYNTHESIS__ -#ifndef NON_C99STRING - INLINE ap_fixed_base(const char* s, signed char rd = 0) { - unsigned char radix = rd; - std::string str = ap_private_ops::parseString(s, radix); // will guess rd, default 10 - _AP_ERROR(radix == 0, "ap_fixed_base(const char* \"%s\", %d), str=%s, radix = %d", - s, rd, str.c_str(), radix); // TODO remove this check - fromString(str, radix); - } -#else - INLINE ap_fixed_base(const char* s, signed char rd = 10) { - ap_int_base<_AP_W, _AP_S> t(s, rd); - Base::V = t.V; - } -#endif // ifndef NON_C99STRING -#else // ifndef __SYNTHESIS__ - // XXX _ssdm_string2bits only takes const string and const radix. - // It seems XFORM will do compile time processing of the string. - INLINE ap_fixed_base(const char* s) { - typeof(Base::V) t; - _ssdm_string2bits((void*)(&t), (const char*)(s), 10, _AP_I, _AP_S, _AP_Q, - _AP_O, _AP_N, _AP_C99); - Base::V = t; - } - INLINE ap_fixed_base(const char* s, signed char rd) { - typeof(Base::V) t; - _ssdm_string2bits((void*)(&t), (const char*)(s), rd, _AP_I, _AP_S, _AP_Q, - _AP_O, _AP_N, _AP_C99); - Base::V = t; - } -#endif // ifndef __SYNTHESIS__ else - - template - INLINE ap_fixed_base(const ap_bit_ref<_AP_W2, _AP_S2>& op) { - *this = ((bool)op); - report(); - } - - template - INLINE ap_fixed_base(const ap_range_ref<_AP_W2, _AP_S2>& op) { - *this = (ap_int_base<_AP_W2, false>(op)); - report(); - } - - template - INLINE ap_fixed_base( - const ap_concat_ref<_AP_W2, _AP_T2, _AP_W3, _AP_T3>& op) { - *this = (ap_int_base<_AP_W2 + _AP_W3, false>(op)); - report(); - } - - template - INLINE ap_fixed_base( - const af_bit_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2>& op) { - *this = (bool(op)); - report(); - } - - template - INLINE ap_fixed_base( - const af_range_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2>& op) { - *this = (ap_int_base<_AP_W2, false>(op)); - report(); - } - - // ctors from c types. - // make a temp ap_fixed_base first, and use ap_fixed_base.operator= -#define CTOR_FROM_INT(C_TYPE, _AP_W2, _AP_S2) \ - INLINE ap_fixed_base(const C_TYPE x) { \ - ap_fixed_base<(_AP_W2), (_AP_W2), (_AP_S2)> tmp; \ - tmp.V = x; \ - *this = tmp; \ - } - - CTOR_FROM_INT(bool, 1, false) - CTOR_FROM_INT(char, 8, CHAR_IS_SIGNED) - CTOR_FROM_INT(signed char, 8, true) - CTOR_FROM_INT(unsigned char, 8, false) - CTOR_FROM_INT(short, _AP_SIZE_short, true) - CTOR_FROM_INT(unsigned short, _AP_SIZE_short, false) - CTOR_FROM_INT(int, _AP_SIZE_int, true) - CTOR_FROM_INT(unsigned int, _AP_SIZE_int, false) - CTOR_FROM_INT(long, _AP_SIZE_long, true) - CTOR_FROM_INT(unsigned long, _AP_SIZE_long, false) - CTOR_FROM_INT(ap_slong, _AP_SIZE_ap_slong, true) - CTOR_FROM_INT(ap_ulong, _AP_SIZE_ap_slong, false) -#undef CTOR_FROM_INT -/* - * TODO: - *Theere used to be several funtions which were AP_WEAK. - *Now they're all INLINE expect ap_fixed_base(double d) - *Maybe we can use '#pragma HLS inline' instead of INLINE. - */ - AP_WEAK ap_fixed_base(double d) { - ap_int_base<64, false> ireg; - ireg.V = doubleToRawBits(d); - bool isneg = _AP_ROOT_op_get_bit(ireg.V, 63); - - ap_int_base exp; - ap_int_base exp_tmp; - exp_tmp.V = - _AP_ROOT_op_get_range(ireg.V, DOUBLE_MAN, DOUBLE_MAN + DOUBLE_EXP - 1); - exp = exp_tmp - DOUBLE_BIAS; - ap_int_base man; - man.V = _AP_ROOT_op_get_range(ireg.V, 0, DOUBLE_MAN - 1); - // do not support NaN - _AP_WARNING(exp == APFX_IEEE_DOUBLE_E_MAX + 1 && man.V != 0, - "assign NaN to fixed point value"); - man.V = _AP_ROOT_op_set_bit(man.V, DOUBLE_MAN, 1); - if (isneg) man = -man; - if ((ireg.V & 0x7fffffffffffffffLL) == 0) { - Base::V = 0; - } else { - int _AP_W2 = DOUBLE_MAN + 2, _AP_I2 = exp.V + 2, _AP_F = _AP_W - _AP_I, - F2 = _AP_W2 - _AP_I2; - bool _AP_S2 = true, - QUAN_INC = F2 > _AP_F && - !(_AP_Q == AP_TRN || (_AP_Q == AP_TRN_ZERO && !_AP_S2)); - bool carry = false; - // handle quantization - unsigned sh_amt = (F2 > _AP_F) ? F2 - _AP_F : _AP_F - F2; - if (F2 == _AP_F) - Base::V = man.V; - else if (F2 > _AP_F) { - if (sh_amt < DOUBLE_MAN + 2) - Base::V = man.V >> sh_amt; - else { - Base::V = isneg ? -1 : 0; - } - if ((_AP_Q != AP_TRN) && !((_AP_Q == AP_TRN_ZERO) && !_AP_S2)) { - bool qb = (F2 - _AP_F > _AP_W2) ? isneg : (bool)_AP_ROOT_op_get_bit( - man.V, F2 - _AP_F - 1); - bool r = - (F2 > _AP_F + 1) - ? _AP_ROOT_op_get_range(man.V, 0, (F2 - _AP_F - 2 < _AP_W2) - ? (F2 - _AP_F - 2) - : (_AP_W2 - 1)) != 0 - : false; - carry = quantization_adjust(qb, r, isneg); - } - } else { // no quantization - Base::V = man.V; - if (sh_amt < _AP_W) - Base::V = Base::V << sh_amt; - else - Base::V = 0; - } - // handle overflow/underflow - if ((_AP_O != AP_WRAP || _AP_N != 0) && - ((!_AP_S && _AP_S2) || - _AP_I - _AP_S < - _AP_I2 - _AP_S2 + - (QUAN_INC || - (_AP_S2 && (_AP_O == AP_SAT_SYM))))) { // saturation - bool deleted_zeros = _AP_S2 ? true : !carry, deleted_ones = true; - bool neg_src = isneg; - bool lD = false; - int pos1 = F2 - _AP_F + _AP_W; - int pos2 = F2 - _AP_F + _AP_W + 1; - bool newsignbit = _AP_ROOT_op_get_bit(Base::V, _AP_W - 1); - if (pos1 < _AP_W2 && pos1 >= 0) - // lD = _AP_ROOT_op_get_bit(man.V, pos1); - lD = (man.V >> pos1) & 1; - if (pos1 < _AP_W2) { - bool Range1_all_ones = true; - bool Range1_all_zeros = true; - bool Range2_all_ones = true; - ap_int_base Range2; - ap_int_base all_ones(-1); - - if (pos2 >= 0 && pos2 < _AP_W2) { - // Range2.V = _AP_ROOT_op_get_range(man.V, - // pos2, _AP_W2 - 1); - Range2.V = man.V; - Range2.V >>= pos2; - Range2_all_ones = Range2 == (all_ones >> pos2); - } else if (pos2 < 0) - Range2_all_ones = false; - if (pos1 >= 0 && pos2 < _AP_W2) { - Range1_all_ones = Range2_all_ones && lD; - Range1_all_zeros = !Range2.V && !lD; - } else if (pos2 == _AP_W2) { - Range1_all_ones = lD; - Range1_all_zeros = !lD; - } else if (pos1 < 0) { - Range1_all_zeros = !man.V; - Range1_all_ones = false; - } - - deleted_zeros = - deleted_zeros && (carry ? Range1_all_ones : Range1_all_zeros); - deleted_ones = - carry ? Range2_all_ones && (pos1 < 0 || !lD) : Range1_all_ones; - neg_src = isneg && !(carry && Range1_all_ones); - } else - neg_src = isneg && newsignbit; - bool neg_trg = _AP_S && newsignbit; - bool overflow = (neg_trg || !deleted_zeros) && !isneg; - bool underflow = (!neg_trg || !deleted_ones) && neg_src; - if ((_AP_O == AP_SAT_SYM) && _AP_S2 && _AP_S) - underflow |= - neg_src && - (_AP_W > 1 ? _AP_ROOT_op_get_range(Base::V, 0, _AP_W - 2) == 0 - : true); - overflow_adjust(underflow, overflow, lD, neg_src); - } - } - report(); - } - - // TODO more optimized implementation. - INLINE ap_fixed_base(float d) { *this = ap_fixed_base(double(d)); } - -#if _AP_ENABLE_HALF_ == 1 - // TODO more optimized implementation. - INLINE ap_fixed_base(half d) { *this = ap_fixed_base(double(d)); } -#endif - // @} - - /// @name assign operator - /// assign, using another ap_fixed_base of same template parameters. - /* - INLINE ap_fixed_base& operator=( - const ap_fixed_base<_AP_W, _AP_I, _AP_S, _AP_Q, _AP_O, _AP_N>& op) { - Base::V = op.V; - return *this; - } - */ - - template - INLINE ap_fixed_base& operator=( - const ap_fixed_base<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2>& op) { - - const int _AP_F = _AP_W - _AP_I; - const int F2 = _AP_W2 - _AP_I2; - const int QUAN_INC = - F2 > _AP_F && !(_AP_Q == AP_TRN || (_AP_Q == AP_TRN_ZERO && !_AP_S2)); - - if (!op) Base::V = 0; - bool carry = false; - bool signbit = _AP_ROOT_op_get_bit(op.V, _AP_W2 - 1); - bool isneg = signbit && _AP_S2; - if (F2 == _AP_F) - Base::V = op.V; - else if (F2 > _AP_F) { - unsigned int sh_amt = F2 - _AP_F; - // moves bits right, handle quantization. - if (sh_amt < _AP_W2) { - Base::V = op.V >> sh_amt; - } else { - Base::V = isneg ? -1 : 0; - } - if (_AP_Q != AP_TRN && !(_AP_Q == AP_TRN_ZERO && !_AP_S2)) { - bool qbit = _AP_ROOT_op_get_bit(op.V, F2 - _AP_F - 1); - // bit after LSB. - bool qb = (F2 - _AP_F > _AP_W2) ? _AP_S2 && signbit : qbit; - enum { hi = ((F2 - _AP_F - 2) < _AP_W2) ? (F2 - _AP_F - 2) : (_AP_W2 - 1) }; - // bits after qb. - bool r = (F2 > _AP_F + 1) ? (_AP_ROOT_op_get_range(op.V, 0, hi) != 0) : false; - carry = quantization_adjust(qb, r, isneg); - } - } else { - unsigned sh_amt = _AP_F - F2; - // moves bits left, no quantization - if (sh_amt < _AP_W) { - if (_AP_W > _AP_W2) { - // extend and then shift, avoid losing bits. - Base::V = op.V; - Base::V <<= sh_amt; - } else { - // shift and truncate. - Base::V = op.V << sh_amt; - } - } else { - Base::V = 0; - } - } - // handle overflow/underflow - if ((_AP_O != AP_WRAP || _AP_N != 0) && - ((!_AP_S && _AP_S2) || - _AP_I - _AP_S < - _AP_I2 - _AP_S2 + - (QUAN_INC || (_AP_S2 && _AP_O == AP_SAT_SYM)))) { // saturation - bool deleted_zeros = _AP_S2 ? true : !carry; - bool deleted_ones = true; - bool neg_src = isneg; - bool newsignbit = _AP_ROOT_op_get_bit(Base::V, _AP_W - 1); - enum { pos1 = F2 - _AP_F + _AP_W, pos2 = F2 - _AP_F + _AP_W + 1 }; - bool lD = (pos1 < _AP_W2 && pos1 >= 0) ? _AP_ROOT_op_get_bit(op.V, pos1) - : false; - if (pos1 < _AP_W2) { - bool Range1_all_ones = true; - bool Range1_all_zeros = true; - bool Range2_all_ones = true; - ap_int_base<_AP_W2, false> all_ones(-1); - - if (pos2 < _AP_W2 && pos2 >= 0) { - ap_int_base<_AP_W2, false> Range2; - Range2.V = _AP_ROOT_op_get_range(op.V, pos2, _AP_W2 - 1); - Range2_all_ones = Range2 == (all_ones >> pos2); - } else if (pos2 < 0) { - Range2_all_ones = false; - } - - if (pos1 >= 0 && pos2 < _AP_W2) { - ap_int_base<_AP_W2, false> Range1; - Range1.V = _AP_ROOT_op_get_range(op.V, pos1, _AP_W2 - 1); - Range1_all_ones = Range1 == (all_ones >> pos1); - Range1_all_zeros = !Range1.V; - } else if (pos2 == _AP_W2) { - Range1_all_ones = lD; - Range1_all_zeros = !lD; - } else if (pos1 < 0) { - Range1_all_zeros = !op.V; - Range1_all_ones = false; - } - - deleted_zeros = - deleted_zeros && (carry ? Range1_all_ones : Range1_all_zeros); - deleted_ones = - carry ? Range2_all_ones && (pos1 < 0 || !lD) : Range1_all_ones; - neg_src = isneg && !(carry && Range1_all_ones); - } else - neg_src = isneg && newsignbit; - bool neg_trg = _AP_S && newsignbit; - bool overflow = (neg_trg || !deleted_zeros) && !isneg; - bool underflow = (!neg_trg || !deleted_ones) && neg_src; - if ((_AP_O == AP_SAT_SYM) && _AP_S2 && _AP_S) - underflow |= - neg_src && - (_AP_W > 1 ? _AP_ROOT_op_get_range(Base::V, 0, _AP_W - 2) == 0 - : true); - - overflow_adjust(underflow, overflow, lD, neg_src); - } - return *this; - } // operator= - - template - INLINE ap_fixed_base& operator=( - const volatile ap_fixed_base<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2>& op) { - operator=(const_cast&>(op)); - return *this; - } - - /// Set this ap_fixed_base with ULL. - INLINE ap_fixed_base& setBits(ap_ulong bv) { - // TODO when ull is not be long enough... - Base::V = bv; - return *this; - } - - /// Return a ap_fixed_base object whose this->V is assigned by bv. - static INLINE ap_fixed_base bitsToFixed(ap_ulong bv) { - // TODO fix when ull is not be long enough... - ap_fixed_base t; -#ifdef __SYNTHESIS__ - t.V = bv; -#else - t.V.set_bits(bv); -#endif - return t; - } - - // Explicit conversion functions to ap_int_base. - /** Captures all integer bits, in truncate mode. - * @param[in] Cnative follow conversion from double to int. - */ - INLINE ap_int_base to_ap_int_base( - bool Cnative = true) const { - ap_int_base ret; - if (_AP_I == 0) { - ret.V = 0; - } else if (_AP_I > 0 && _AP_I <= _AP_W) { - ret.V = _AP_ROOT_op_get_range(Base::V, _AP_W - _AP_I, _AP_W - 1); - } else if (_AP_I > _AP_W) { - ret.V = _AP_ROOT_op_get_range(Base::V, 0, _AP_W - 1); - ret.V <<= (_AP_I - _AP_W); - } - /* Consider the following case - * float f = -7.5f; - * ap_fixed<8,4> t = f; // -8 0 0 0 . 0.5 - * int i = t.to_int(); - * the result should be -7 instead of -8. - * Therefore, after truncation, the value should be increated by 1. - * For (-1, 0), carry to MSB will happen, but result 0 is still correct. - */ - if (Cnative && _AP_I < _AP_W) { - // Follow C native data type, conversion from double to int - if (_AP_S && _AP_ROOT_op_get_bit(Base::V, _AP_W - 1) && (_AP_I < _AP_W) && - (_AP_ROOT_op_get_range( - Base::V, 0, _AP_I < 0 ? _AP_W - 1 : _AP_W - _AP_I - 1) != 0)) - ++ret; - } else { - // Follow OSCI library, conversion from sc_fixed to sc_int - } - return ret; - }; - - public: - template - INLINE operator ap_int_base<_AP_W2, _AP_S2>() const { - return ap_int_base<_AP_W2, _AP_S2>(to_ap_int_base()); - } - - // Explicit conversion function to C built-in integral type. - INLINE char to_char() const { return to_ap_int_base().to_char(); } - - INLINE int to_int() const { return to_ap_int_base().to_int(); } - - INLINE unsigned to_uint() const { return to_ap_int_base().to_uint(); } - - INLINE ap_slong to_int64() const { return to_ap_int_base().to_int64(); } - - INLINE ap_ulong to_uint64() const { return to_ap_int_base().to_uint64(); } - - /// covert function to double. - /** only round-half-to-even mode supported, does not obey FE env. */ - INLINE double to_double() const { -#if defined(AP_FIXED_ENABLE_CPP_FENV) - _AP_WARNING(std::fegetround() != FE_TONEAREST, - "Only FE_TONEAREST is supported"); -#endif - enum { BITS = DOUBLE_MAN + DOUBLE_EXP + 1 }; - if (!Base::V) return 0.0f; - bool s = _AP_S && _AP_ROOT_op_get_bit(Base::V, _AP_W - 1); ///< sign. - ap_int_base<_AP_W, false> tmp; - if (s) - tmp.V = -Base::V; // may truncate one bit extra from neg in sim. - else - tmp.V = Base::V; - int l = tmp.countLeadingZeros(); ///< number of leading zeros. - int e = _AP_I - l - 1 + DOUBLE_BIAS; ///< exponent - int lsb_index = _AP_W - l - 1 - DOUBLE_MAN; - // more than 0.5? - bool a = (lsb_index >=2) ? - (_AP_ROOT_op_get_range(tmp.V, 0, lsb_index - 2) != 0) : 0; - // round to even - a |= (lsb_index >=0) ? _AP_ROOT_op_get_bit(tmp.V, lsb_index) : 0; - // ull is at least 64-bit - ap_ulong m; - // may actually left shift, ensure buffer is wide enough. - if (_AP_W > BITS) { - m = (lsb_index >= 1) ? (ap_ulong)(tmp.V >> (lsb_index - 1)) - : (ap_ulong)(tmp.V << (1 - lsb_index)); - } else { - m = (ap_ulong)tmp.V; - m = (lsb_index >= 1) ? (m >> (lsb_index - 1)) - : (m << (1 - lsb_index)); - } - m += a; - m >>= 1; - //std::cout << '\n' << std::hex << m << '\n'; // TODO delete this - // carry to MSB, increase exponent - if (_AP_ctype_op_get_bit(m, DOUBLE_MAN + 1)) { - e += 1; - } - // set sign and exponent - m = _AP_ctype_op_set_bit(m, BITS - 1, s); - //std::cout << m << '\n'; // TODO delete this - m = _AP_ctype_op_set_range(m, DOUBLE_MAN, DOUBLE_MAN + DOUBLE_EXP - 1, e); - //std::cout << std::hex << m << std::dec << std::endl; // TODO delete this - // cast to fp - return rawBitsToDouble(m); - } - - /// convert function to float. - /** only round-half-to-even mode supported, does not obey FE env. */ - INLINE float to_float() const { -#if defined(AP_FIXED_ENABLE_CPP_FENV) - _AP_WARNING(std::fegetround() != FE_TONEAREST, - "Only FE_TONEAREST is supported"); -#endif - enum { BITS = FLOAT_MAN + FLOAT_EXP + 1 }; - if (!Base::V) return 0.0f; - bool s = _AP_S && _AP_ROOT_op_get_bit(Base::V, _AP_W - 1); ///< sign. - ap_int_base<_AP_W, false> tmp; - if (s) - tmp.V = -Base::V; // may truncate one bit extra from neg in sim. - else - tmp.V = Base::V; - int l = tmp.countLeadingZeros(); ///< number of leading zeros. - int e = _AP_I - l - 1 + FLOAT_BIAS; ///< exponent - int lsb_index = _AP_W - l - 1 - FLOAT_MAN; - // more than 0.5? - bool a = (lsb_index >=2) ? - (_AP_ROOT_op_get_range(tmp.V, 0, lsb_index - 2) != 0) : 0; - // round to even - a |= (lsb_index >=0) ? _AP_ROOT_op_get_bit(tmp.V, lsb_index) : 0; - // ul is at least 32-bit - unsigned long m; - // may actually left shift, ensure buffer is wide enough. - if (_AP_W > BITS) { - m = (lsb_index >= 1) ? (unsigned long)(tmp.V >> (lsb_index - 1)) - : (unsigned long)(tmp.V << (1 - lsb_index)); - } else { - m = (unsigned long)tmp.V; - m = (lsb_index >= 1) ? (m >> (lsb_index - 1)) - : (m << (1 - lsb_index)); - } - m += a; - m >>= 1; - // carry to MSB, increase exponent - if (_AP_ctype_op_get_bit(m, FLOAT_MAN + 1)) { - e += 1; - } - // set sign and exponent - m = _AP_ctype_op_set_bit(m, BITS - 1, s); - m = _AP_ctype_op_set_range(m, FLOAT_MAN, FLOAT_MAN + FLOAT_EXP - 1, e); - // cast to fp - return rawBitsToFloat(m); - } - -#if _AP_ENABLE_HALF_ == 1 - /// convert function to half. - /** only round-half-to-even mode supported, does not obey FE env. */ - INLINE half to_half() const { -#if defined(AP_FIXED_ENABLE_CPP_FENV) - _AP_WARNING(std::fegetround() != FE_TONEAREST, - "Only FE_TONEAREST is supported"); -#endif - enum { BITS = HALF_MAN + HALF_EXP + 1 }; - if (!Base::V) return 0.0f; - bool s = _AP_S && _AP_ROOT_op_get_bit(Base::V, _AP_W - 1); ///< sign. - ap_int_base<_AP_W, false> tmp; - if (s) - tmp.V = -Base::V; // may truncate one bit extra from neg in sim. - else - tmp.V = Base::V; - int l = tmp.countLeadingZeros(); ///< number of leading zeros. - int e = _AP_I - l - 1 + HALF_BIAS; ///< exponent - int lsb_index = _AP_W - l - 1 - HALF_MAN; - // more than 0.5? - bool a = (lsb_index >=2) ? - (_AP_ROOT_op_get_range(tmp.V, 0, lsb_index - 2) != 0) : 0; - // round to even - a |= (lsb_index >=0) ? _AP_ROOT_op_get_bit(tmp.V, lsb_index) : 0; - // short is at least 16-bit - unsigned short m; - // may actually left shift, ensure buffer is wide enough. - if (_AP_W > BITS) { - m = (lsb_index >= 1) ? (unsigned short)(tmp.V >> (lsb_index - 1)) - : (unsigned short)(tmp.V << (1 - lsb_index)); - } else { - m = (unsigned short)tmp.V; - m = (lsb_index >= 1) ? (m >> (lsb_index - 1)) - : (m << (1 - lsb_index)); - } - m += a; - m >>= 1; - // carry to MSB, increase exponent - if (_AP_ctype_op_get_bit(m, HALF_MAN + 1)) { - e += 1; - } - // set sign and exponent - m = _AP_ctype_op_set_bit(m, BITS - 1, s); - m = _AP_ctype_op_set_range(m, HALF_MAN, HALF_MAN + HALF_EXP - 1, e); - // cast to fp - return rawBitsToHalf(m); - } -#endif - - // FIXME inherited from old code, this may loose precision! - INLINE operator long double() const { return (long double)to_double(); } - - INLINE operator double() const { return to_double(); } - - INLINE operator float() const { return to_float(); } - -#if _AP_ENABLE_HALF_ == 1 - INLINE operator half() const { return to_half(); } -#endif - - INLINE operator bool() const { return (bool)Base::V != 0; } - - INLINE operator char() const { return (char)to_int(); } - - INLINE operator signed char() const { return (signed char)to_int(); } - - INLINE operator unsigned char() const { return (unsigned char)to_uint(); } - - INLINE operator short() const { return (short)to_int(); } - - INLINE operator unsigned short() const { return (unsigned short)to_uint(); } - - INLINE operator int() const { return to_int(); } - - INLINE operator unsigned int() const { return to_uint(); } - -// FIXME don't assume data width... -#ifdef __x86_64__ - INLINE operator long() const { return (long)to_int64(); } - - INLINE operator unsigned long() const { return (unsigned long)to_uint64(); } -#else - INLINE operator long() const { return (long)to_int(); } - - INLINE operator unsigned long() const { return (unsigned long)to_uint(); } -#endif // ifdef __x86_64__ else - - INLINE operator ap_ulong() const { return to_uint64(); } - - INLINE operator ap_slong() const { return to_int64(); } - - INLINE int length() const { return _AP_W; }; - - // bits_to_int64 deleted. -#ifndef __SYNTHESIS__ - // Used in autowrap, when _AP_W < 64. - INLINE ap_ulong bits_to_uint64() const { - return (Base::V).to_uint64(); - } -#endif - - // Count the number of zeros from the most significant bit - // to the first one bit. Note this is only for ap_fixed_base whose - // _AP_W <= 64, otherwise will incur assertion. - INLINE int countLeadingZeros() { -#ifdef __SYNTHESIS__ - // TODO: used llvm.ctlz intrinsic ? - if (_AP_W <= 32) { - ap_int_base<32, false> t(-1ULL); - t.range(_AP_W - 1, 0) = this->range(0, _AP_W - 1); - return __builtin_ctz(t.V); - } else if (_AP_W <= 64) { - ap_int_base<64, false> t(-1ULL); - t.range(_AP_W - 1, 0) = this->range(0, _AP_W - 1); - return __builtin_ctzll(t.V); - } else { - enum {__N = (_AP_W + 63) / 64}; - int NZeros = 0; - int i = 0; - bool hitNonZero = false; - for (i = 0; i < __N - 1; ++i) { - ap_int_base<64, false> t; - t.range(0, 63) = this->range(_AP_W - i * 64 - 64, _AP_W - i * 64 - 1); - NZeros += hitNonZero ? 0 : __builtin_clzll(t.V); - hitNonZero |= (t != 0); - } - if (!hitNonZero) { - ap_int_base<64, false> t(-1ULL); - t.range(63 - (_AP_W - 1) % 64, 63) = this->range(0, (_AP_W - 1) % 64); - NZeros += __builtin_clzll(t.V); - } - return NZeros; - } -#else - return Base::V.countLeadingZeros(); -#endif - } - - // Arithmetic : Binary - // ------------------------------------------------------------------------- - template - INLINE typename RType<_AP_W2, _AP_I2, _AP_S2>::mult operator*( - const ap_fixed_base<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2>& op2) - const { - typename RType<_AP_W2, _AP_I2, _AP_S2>::mult_base r, t; - r.V = Base::V; - t.V = op2.V; - r.V *= op2.V; - return r; - } - - // multiply function deleted. - - template - INLINE typename RType<_AP_W2, _AP_I2, _AP_S2>::div operator/( - const ap_fixed_base<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2>& op2) - const { - typename RType<_AP_W2, _AP_I2, _AP_S2>::div_base r; -#ifndef __SYNTHESIS__ - enum {F2 = _AP_W2-_AP_I2, - _W1=AP_MAX(_AP_W + AP_MAX(F2, 0) + ((_AP_S2 && !_AP_S) ? 1 : 0), _AP_W2 + ((_AP_S && !_AP_S2) ? 1 : 0))}; - ap_int_base<_W1,_AP_S||_AP_S2> dividend,divisior; - ap_int_base<_W1,_AP_S> tmp1; - ap_int_base<_W1,_AP_S2> tmp2; - tmp1.V = Base::V; - tmp1.V <<= AP_MAX(F2,0); - tmp2.V = op2.V; - dividend = tmp1; - divisior = tmp2; - r.V = ((_AP_S||_AP_S2) ? dividend.V.sdiv(divisior.V): dividend.V.udiv(divisior.V)); -#else - #ifndef __SC_COMPATIBLE__ - ap_fixed_base<_AP_W + AP_MAX(_AP_W2 - _AP_I2, 0),_AP_I, _AP_S> t(*this); - #else - ap_fixed_base<_AP_W + AP_MAX(_AP_W2 - _AP_I2, 0) + AP_MAX(_AP_I2, 0),_AP_I, _AP_S> t(*this); - #endif - r.V = t.V / op2.V; -#endif -/* - enum { - F2 = _AP_W2 - _AP_I2, - shl = AP_MAX(F2, 0) + AP_MAX(_AP_I2, 0), -#ifndef __SC_COMPATIBLE__ - shr = AP_MAX(_AP_I2, 0), -#else - shr = 0, -#endif - W3 = _AP_S2 + _AP_W + shl, - S3 = _AP_S || _AP_S2, - }; - ap_int_base dividend, t; - dividend.V = Base::V; - // multiply both by (1 << F2), and than do integer division. - dividend.V <<= (int) shl; -#ifdef __SYNTHESIS__ - // .V's have right signedness, and will have right extending. - t.V = dividend.V / op2.V; -#else - // XXX op2 may be wider than dividend, and sdiv and udiv takes the same with - // as left hand operand, so data might be truncated by mistake if not - // handled here. - t.V = S3 ? dividend.V.sdiv(op2.V) : dividend.V.udiv(op2.V); -#endif - r.V = t.V >> (int) shr; -*/ - return r; - } - -#define OP_BIN_AF(Sym, Rty) \ - template \ - INLINE typename RType<_AP_W2, _AP_I2, _AP_S2>::Rty operator Sym( \ - const ap_fixed_base<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2>& \ - op2) const { \ - typename RType<_AP_W2, _AP_I2, _AP_S2>::Rty##_base ret, lhs(*this), \ - rhs(op2); \ - ret.V = lhs.V Sym rhs.V; \ - return ret; \ - } - - OP_BIN_AF(+, plus) - OP_BIN_AF(-, minus) - OP_BIN_AF(&, logic) - OP_BIN_AF(|, logic) - OP_BIN_AF(^, logic) - -// Arithmetic : assign -// ------------------------------------------------------------------------- -#define OP_ASSIGN_AF(Sym) \ - template \ - INLINE ap_fixed_base& operator Sym##=( \ - const ap_fixed_base<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2>& \ - op2) { \ - *this = operator Sym(op2); \ - return *this; \ - } - - OP_ASSIGN_AF(*) - OP_ASSIGN_AF(/) - OP_ASSIGN_AF(+) - OP_ASSIGN_AF(-) - OP_ASSIGN_AF(&) - OP_ASSIGN_AF(|) - OP_ASSIGN_AF(^) - - // Prefix and postfix increment and decrement. - // ------------------------------------------------------------------------- - - /// Prefix increment - INLINE ap_fixed_base& operator++() { - operator+=(ap_fixed_base<_AP_W - _AP_I + 1, 1, false>(1)); - return *this; - } - - /// Prefix decrement. - INLINE ap_fixed_base& operator--() { - operator-=(ap_fixed_base<_AP_W - _AP_I + 1, 1, false>(1)); - return *this; - } - - /// Postfix increment - INLINE const ap_fixed_base operator++(int) { - ap_fixed_base r(*this); - operator++(); - return r; - } - - /// Postfix decrement - INLINE const ap_fixed_base operator--(int) { - ap_fixed_base r(*this); - operator--(); - return r; - } - - // Unary arithmetic. - // ------------------------------------------------------------------------- - INLINE ap_fixed_base operator+() { return *this; } - - INLINE ap_fixed_base<_AP_W + 1, _AP_I + 1, true> operator-() const { - ap_fixed_base<_AP_W + 1, _AP_I + 1, true> r(*this); - r.V = -r.V; - return r; - } - - INLINE ap_fixed_base<_AP_W, _AP_I, true, _AP_Q, _AP_O, _AP_N> getNeg() { - ap_fixed_base<_AP_W, _AP_I, true, _AP_Q, _AP_O, _AP_N> r(*this); - r.V = -r.V; - return r; - } - - // Not (!) - // ------------------------------------------------------------------------- - INLINE bool operator!() const { return Base::V == 0; } - - // Bitwise complement - // ------------------------------------------------------------------------- - // XXX different from Mentor's ac_fixed. - INLINE ap_fixed_base<_AP_W, _AP_I, _AP_S> operator~() const { - ap_fixed_base<_AP_W, _AP_I, _AP_S> r; - r.V = ~Base::V; - return r; - } - - // Shift - // ------------------------------------------------------------------------- - // left shift is the same as moving point right, i.e. increate I. - template - INLINE ap_fixed_base<_AP_W, _AP_I + _AP_SHIFT, _AP_S> lshift() const { - ap_fixed_base<_AP_W, _AP_I + _AP_SHIFT, _AP_S> r; - r.V = Base::V; - return r; - } - - template - INLINE ap_fixed_base<_AP_W, _AP_I - _AP_SHIFT, _AP_S> rshift() const { - ap_fixed_base<_AP_W, _AP_I - _AP_SHIFT, _AP_S> r; - r.V = Base::V; - return r; - } - - // Because the return type is the type of the the first operand, shift assign - // operators do not carry out any quantization or overflow - // While systemc, shift assigns for sc_fixed/sc_ufixed will result in - // quantization or overflow (depending on the mode of the first operand) - INLINE ap_fixed_base operator<<(unsigned int sh) const { - ap_fixed_base r; - r.V = Base::V << sh; -// TODO check shift overflow? -#ifdef __SC_COMPATIBLE__ - if (sh == 0) return r; - if (_AP_O != AP_WRAP || _AP_N != 0) { - bool neg_src = _AP_S && _AP_ROOT_op_get_bit(Base::V, _AP_W - 1); - bool allones, allzeros; - ap_int_base<_AP_W, false> ones(-1); - if (sh <= _AP_W) { - ap_int_base<_AP_W, false> range1; - range1.V = _AP_ROOT_op_get_range( - const_cast(this)->Base::V, _AP_W - sh, _AP_W - 1); - allones = range1 == (ones >> (_AP_W - sh)); - allzeros = range1 == 0; - } else { - allones = false; - allzeros = Base::V == 0; - } - bool overflow = !allzeros && !neg_src; - bool underflow = !allones && neg_src; - if ((_AP_O == AP_SAT_SYM) && _AP_S) - underflow |= - neg_src && - (_AP_W > 1 ? _AP_ROOT_op_get_range(r.V, 0, _AP_W - 2) == 0 : true); - bool lD = false; - if (sh < _AP_W) lD = _AP_ROOT_op_get_bit(Base::V, _AP_W - sh - 1); - r.overflow_adjust(underflow, overflow, lD, neg_src); - } -#endif - return r; - } - - INLINE ap_fixed_base operator>>(unsigned int sh) const { - ap_fixed_base r; - r.V = Base::V >> sh; -// TODO check shift overflow? -#ifdef __SC_COMPATIBLE__ - if (sh == 0) return r; - if (_AP_Q != AP_TRN) { - bool qb = false; - if (sh <= _AP_W) qb = _AP_ROOT_op_get_bit(Base::V, sh - 1); - bool rb = false; - if (sh > 1 && sh <= _AP_W) - rb = _AP_ROOT_op_get_range(const_cast(this)->Base::V, 0, - sh - 2) != 0; - else if (sh > _AP_W) - rb = Base::V != 0; - r.quantization_adjust(qb, rb, - _AP_S && _AP_ROOT_op_get_bit(Base::V, _AP_W - 1)); - } -#endif - return r; - } - - // left and right shift for int - INLINE ap_fixed_base operator<<(int sh) const { - ap_fixed_base r; - bool isNeg = sh < 0; - unsigned int ush = isNeg ? -sh : sh; - if (isNeg) { - return operator>>(ush); - } else { - return operator<<(ush); - } - } - - INLINE ap_fixed_base operator>>(int sh) const { - bool isNeg = sh < 0; - unsigned int ush = isNeg ? -sh : sh; - if (isNeg) { - return operator<<(ush); - } else { - return operator>>(ush); - } - } - - // left and right shift for ap_int. - template - INLINE ap_fixed_base operator<<(const ap_int_base<_AP_W2, true>& op2) const { - // TODO the code seems not optimal. ap_fixed<8,8> << ap_int<2> needs only a - // small mux, but integer need a big one! - int sh = op2.to_int(); - return operator<<(sh); - } - - template - INLINE ap_fixed_base operator>>(const ap_int_base<_AP_W2, true>& op2) const { - int sh = op2.to_int(); - return operator>>(sh); - } - - // left and right shift for ap_uint. - template - INLINE ap_fixed_base operator<<(const ap_int_base<_AP_W2, false>& op2) const { - unsigned int sh = op2.to_uint(); - return operator<<(sh); - } - - template - INLINE ap_fixed_base operator>>(const ap_int_base<_AP_W2, false>& op2) const { - unsigned int sh = op2.to_uint(); - return operator>>(sh); - } - - // left and right shift for ap_fixed - template - INLINE ap_fixed_base operator<<( - const ap_fixed_base<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2>& - op2) { - return operator<<(op2.to_ap_int_base()); - } - - template - INLINE ap_fixed_base operator>>( - const ap_fixed_base<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2>& - op2) { - return operator>>(op2.to_ap_int_base()); - } - - // Shift assign. - // ------------------------------------------------------------------------- - - // left shift assign. - INLINE ap_fixed_base& operator<<=(const int sh) { - *this = operator<<(sh); - return *this; - } - - INLINE ap_fixed_base& operator<<=(const unsigned int sh) { - *this = operator<<(sh); - return *this; - } - - template - INLINE ap_fixed_base& operator<<=(const ap_int_base<_AP_W2, _AP_S2>& sh) { - *this = operator<<(sh.to_int()); - return *this; - } - - template - INLINE ap_fixed_base& operator<<=( - const ap_fixed_base<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2>& - sh) { - *this = operator<<(sh.to_int()); - return *this; - } - - // right shift assign. - INLINE ap_fixed_base& operator>>=(const int sh) { - *this = operator>>(sh); - return *this; - } - - INLINE ap_fixed_base& operator>>=(const unsigned int sh) { - *this = operator>>(sh); - return *this; - } - - template - INLINE ap_fixed_base& operator>>=(const ap_int_base<_AP_W2, _AP_S2>& sh) { - *this = operator>>(sh.to_int()); - return *this; - } - - template - INLINE ap_fixed_base& operator>>=( - const ap_fixed_base<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2>& - sh) { - *this = operator>>(sh.to_int()); - return *this; - } - -// Comparisons. -// ------------------------------------------------------------------------- -#define OP_CMP_AF(Sym) \ - template \ - INLINE bool operator Sym(const ap_fixed_base<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, \ - _AP_O2, _AP_N2>& op2) const { \ - enum { _AP_F = _AP_W - _AP_I, F2 = _AP_W2 - _AP_I2 }; \ - if (_AP_F == F2) \ - return Base::V Sym op2.V; \ - else if (_AP_F > F2) \ - return Base::V Sym ap_fixed_base(op2).V; \ - else \ - return ap_fixed_base(*this).V Sym op2.V; \ - return false; \ - } - - OP_CMP_AF(>) - OP_CMP_AF(<) - OP_CMP_AF(>=) - OP_CMP_AF(<=) - OP_CMP_AF(==) - OP_CMP_AF(!=) -// FIXME: Move compare with double out of struct ap_fixed_base defination -// and combine it with compare operator(double, ap_fixed_base) -#define DOUBLE_CMP_AF(Sym) \ - INLINE bool operator Sym(double d) const { return to_double() Sym d; } - - DOUBLE_CMP_AF(>) - DOUBLE_CMP_AF(<) - DOUBLE_CMP_AF(>=) - DOUBLE_CMP_AF(<=) - DOUBLE_CMP_AF(==) - DOUBLE_CMP_AF(!=) - - // Bit and Slice Select - INLINE af_bit_ref<_AP_W, _AP_I, _AP_S, _AP_Q, _AP_O, _AP_N> operator[]( - unsigned index) { - _AP_WARNING(index >= _AP_W, "Attempting to read bit beyond MSB"); - return af_bit_ref<_AP_W, _AP_I, _AP_S, _AP_Q, _AP_O, _AP_N>(this, index); - } - - template - INLINE af_bit_ref<_AP_W, _AP_I, _AP_S, _AP_Q, _AP_O, _AP_N> operator[]( - const ap_int_base<_AP_W2, _AP_S2>& index) { - _AP_WARNING(index < 0, "Attempting to read bit with negative index"); - _AP_WARNING(index >= _AP_W, "Attempting to read bit beyond MSB"); - return af_bit_ref<_AP_W, _AP_I, _AP_S, _AP_Q, _AP_O, _AP_N>(this, - index.to_int()); - } - - INLINE bool operator[](unsigned index) const { - _AP_WARNING(index >= _AP_W, "Attempting to read bit beyond MSB"); - return _AP_ROOT_op_get_bit(const_cast(this)->V, index); - } - - INLINE af_bit_ref<_AP_W, _AP_I, _AP_S, _AP_Q, _AP_O, _AP_N> bit( - unsigned index) { - _AP_WARNING(index >= _AP_W, "Attempting to read bit beyond MSB"); - return af_bit_ref<_AP_W, _AP_I, _AP_S, _AP_Q, _AP_O, _AP_N>(this, index); - } - - template - INLINE af_bit_ref<_AP_W, _AP_I, _AP_S, _AP_Q, _AP_O, _AP_N> bit( - const ap_int_base<_AP_W2, _AP_S2>& index) { - _AP_WARNING(index < 0, "Attempting to read bit with negative index"); - _AP_WARNING(index >= _AP_W, "Attempting to read bit beyond MSB"); - return af_bit_ref<_AP_W, _AP_I, _AP_S, _AP_Q, _AP_O, _AP_N>(this, - index.to_int()); - } - - INLINE bool bit(unsigned index) const { - _AP_WARNING(index >= _AP_W, "Attempting to read bit beyond MSB"); - return _AP_ROOT_op_get_bit(const_cast(this)->V, index); - } - - template - INLINE af_bit_ref<_AP_W, _AP_I, _AP_S, _AP_Q, _AP_O, _AP_N> get_bit( - const ap_int_base<_AP_W2, true>& index) { - _AP_WARNING(index < _AP_I - _AP_W, - "Attempting to read bit with negative index"); - _AP_WARNING(index >= _AP_I, "Attempting to read bit beyond MSB"); - return af_bit_ref<_AP_W, _AP_I, _AP_S, _AP_Q, _AP_O, _AP_N>( - this, index.to_int() + _AP_W - _AP_I); - } - - INLINE bool get_bit(int index) const { - _AP_WARNING(index >= _AP_I, "Attempting to read bit beyond MSB"); - _AP_WARNING(index < _AP_I - _AP_W, "Attempting to read bit beyond MSB"); - return _AP_ROOT_op_get_bit(const_cast(this)->V, - index + _AP_W - _AP_I); - } -#if 0 - INLINE af_bit_ref<_AP_W, _AP_I, _AP_S, _AP_Q, _AP_O, _AP_N> get_bit( - int index) { - _AP_WARNING(index < _AP_I - _AP_W, - "Attempting to read bit with negative index"); - _AP_WARNING(index >= _AP_I, "Attempting to read bit beyond MSB"); - return af_bit_ref<_AP_W, _AP_I, _AP_S, _AP_Q, _AP_O, _AP_N>( - this, index + _AP_W - _AP_I); - } -#endif - - template - INLINE bool get_bit(const ap_int_base<_AP_W2, true>& index) const { - _AP_WARNING(index >= _AP_I, "Attempting to read bit beyond MSB"); - _AP_WARNING(index < _AP_I - _AP_W, "Attempting to read bit beyond MSB"); - return _AP_ROOT_op_get_bit(const_cast(this)->V, - index.to_int() + _AP_W - _AP_I); - } - - INLINE af_range_ref<_AP_W, _AP_I, _AP_S, _AP_Q, _AP_O, _AP_N> range(int Hi, - int Lo) { - _AP_WARNING((Hi >= _AP_W) || (Lo >= _AP_W), "Out of bounds in range()"); - return af_range_ref<_AP_W, _AP_I, _AP_S, _AP_Q, _AP_O, _AP_N>(this, Hi, Lo); - } - - // This is a must to strip constness to produce reference type. - INLINE af_range_ref<_AP_W, _AP_I, _AP_S, _AP_Q, _AP_O, _AP_N> range( - int Hi, int Lo) const { - _AP_WARNING((Hi >= _AP_W) || (Lo >= _AP_W), "Out of bounds in range()"); - return af_range_ref<_AP_W, _AP_I, _AP_S, _AP_Q, _AP_O, _AP_N>( - const_cast(this), Hi, Lo); - } - - template - INLINE af_range_ref<_AP_W, _AP_I, _AP_S, _AP_Q, _AP_O, _AP_N> range( - const ap_int_base<_AP_W2, _AP_S2>& HiIdx, - const ap_int_base<_AP_W3, _AP_S3>& LoIdx) { - int Hi = HiIdx.to_int(); - int Lo = LoIdx.to_int(); - return this->range(Hi, Lo); - } - - template - INLINE af_range_ref<_AP_W, _AP_I, _AP_S, _AP_Q, _AP_O, _AP_N> range( - const ap_int_base<_AP_W2, _AP_S2>& HiIdx, - const ap_int_base<_AP_W3, _AP_S3>& LoIdx) const { - int Hi = HiIdx.to_int(); - int Lo = LoIdx.to_int(); - return this->range(Hi, Lo); - } - - INLINE af_range_ref<_AP_W, _AP_I, _AP_S, _AP_Q, _AP_O, _AP_N> range() { - return this->range(_AP_W - 1, 0); - } - - INLINE af_range_ref<_AP_W, _AP_I, _AP_S, _AP_Q, _AP_O, _AP_N> range() const { - return this->range(_AP_W - 1, 0); - } - - INLINE af_range_ref<_AP_W, _AP_I, _AP_S, _AP_Q, _AP_O, _AP_N> operator()( - int Hi, int Lo) { - return this->range(Hi, Lo); - } - - INLINE af_range_ref<_AP_W, _AP_I, _AP_S, _AP_Q, _AP_O, _AP_N> operator()( - int Hi, int Lo) const { - return this->range(Hi, Lo); - } - - template - INLINE af_range_ref<_AP_W, _AP_I, _AP_S, _AP_Q, _AP_O, _AP_N> operator()( - const ap_int_base<_AP_W2, _AP_S2>& HiIdx, - const ap_int_base<_AP_W3, _AP_S3>& LoIdx) { - int Hi = HiIdx.to_int(); - int Lo = LoIdx.to_int(); - return this->range(Hi, Lo); - } - - template - INLINE af_range_ref<_AP_W, _AP_I, _AP_S, _AP_Q, _AP_O, _AP_N> operator()( - const ap_int_base<_AP_W2, _AP_S2>& HiIdx, - const ap_int_base<_AP_W3, _AP_S3>& LoIdx) const { - int Hi = HiIdx.to_int(); - int Lo = LoIdx.to_int(); - return this->range(Hi, Lo); - } - - INLINE bool is_zero() const { return Base::V == 0; } - - INLINE bool is_neg() const { - if (_AP_S && _AP_ROOT_op_get_bit(Base::V, _AP_W - 1)) return true; - return false; - } - - INLINE int wl() const { return _AP_W; } - - INLINE int iwl() const { return _AP_I; } - - INLINE ap_q_mode q_mode() const { return _AP_Q; } - - INLINE ap_o_mode o_mode() const { return _AP_O; } - - INLINE int n_bits() const { return _AP_N; } - - // print a string representation of this number in the given radix. - // Radix support is 2, 8, 10, or 16. - // The result will include a prefix indicating the radix, except for decimal, - // where no prefix is needed. The default is to output a signed representation - // of signed numbers, or an unsigned representation of unsigned numbers. For - // non-decimal formats, this can be changed by the 'sign' argument. -#ifndef __SYNTHESIS__ - std::string to_string(unsigned char radix = 2, bool sign = _AP_S) const { - // XXX in autosim/autowrap.tcl "(${name}).to_string(2).c_str()" is used to - // initialize sc_lv, which seems incapable of handling format "-0b". - if (radix == 2) sign = false; - - std::string str; - str.clear(); - char step = 0; - bool isNeg = sign && (Base::V < 0); - - // Extend to take care of the -MAX case. - ap_fixed_base<_AP_W + 1, _AP_I + 1> tmp(*this); - if (isNeg) { - tmp = -tmp; - str += '-'; - } - std::string prefix; - switch (radix) { - case 2: - prefix = "0b"; - step = 1; - break; - case 8: - prefix = "0o"; - step = 3; - break; - case 16: - prefix = "0x"; - step = 4; - break; - default: - break; - } - - if (_AP_I > 0) { - // Note we drop the quantization and rounding flags here. The - // integer part is always in range, and the fractional part we - // want to drop. Also, the number is always positive, because - // of the absolute value above. - ap_int_base int_part; - // [1] [ I ] d [ W - I ] - // | | | - // | W-I 0 - // W - int_part.V = _AP_ROOT_op_get_range( - tmp.V, _AP_W - _AP_I, _AP_W); - str += int_part.to_string(radix, false); - } else { - str += prefix; - str += '0'; - } - - ap_fixed_base frac_part = tmp; - - if (radix == 10) { - if (frac_part != 0) { - str += "."; - while (frac_part != 0) { - char digit = (frac_part * radix).to_char(); - str += static_cast(digit + '0'); - frac_part *= radix; - } - } - } else { - if (frac_part != 0) { - str += "."; - for (signed i = _AP_W - _AP_I - 1; i >= 0; i -= step) { - char digit = frac_part.range(i, AP_MAX(0, i - step + 1)).to_char(); - // If we have a partial bit pattern at the end, then we need - // to put it in the high-order bits of 'digit'. - int offset = AP_MIN(0, i - step + 1); - digit <<= -offset; - str += digit < 10 ? static_cast(digit + '0') - : static_cast(digit - 10 + 'a'); - } - if (radix == 16) - str += "p0"; // C99 Hex constants are required to have an exponent. - } - } - return str; - } -#else - // XXX HLS will delete this in synthesis - INLINE char* to_string(unsigned char radix = 2, bool sign = _AP_S) const { - return 0; - } -#endif -}; // struct ap_fixed_base. - -template -INLINE void b_not( - ap_fixed_base<_AP_W, _AP_I, _AP_S, _AP_Q, _AP_O, _AP_N>& ret, - const ap_fixed_base<_AP_W, _AP_I, _AP_S, _AP_Q, _AP_O, _AP_N>& op) { - ret.V = ~op.V; -} - -template -INLINE void b_and( - ap_fixed_base<_AP_W, _AP_I, _AP_S, _AP_Q, _AP_O, _AP_N>& ret, - const ap_fixed_base<_AP_W, _AP_I, _AP_S, _AP_Q, _AP_O, _AP_N>& op1, - const ap_fixed_base<_AP_W, _AP_I, _AP_S, _AP_Q, _AP_O, _AP_N>& op2) { - ret.V = op1.V & op2.V; -} - -template -INLINE void b_or( - ap_fixed_base<_AP_W, _AP_I, _AP_S, _AP_Q, _AP_O, _AP_N>& ret, - const ap_fixed_base<_AP_W, _AP_I, _AP_S, _AP_Q, _AP_O, _AP_N>& op1, - const ap_fixed_base<_AP_W, _AP_I, _AP_S, _AP_Q, _AP_O, _AP_N>& op2) { - ret.V = op1.V | op2.V; -} - -template -INLINE void b_xor( - ap_fixed_base<_AP_W, _AP_I, _AP_S, _AP_Q, _AP_O, _AP_N>& ret, - const ap_fixed_base<_AP_W, _AP_I, _AP_S, _AP_Q, _AP_O, _AP_N>& op1, - const ap_fixed_base<_AP_W, _AP_I, _AP_S, _AP_Q, _AP_O, _AP_N>& op2) { - ret.V = op1.V ^ op2.V; -} - -template -INLINE void neg( - ap_fixed_base<_AP_W, _AP_I, _AP_S, _AP_Q, _AP_O, _AP_N>& ret, - const ap_fixed_base<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2>& op) { - ap_fixed_base<_AP_W2 + !_AP_S2, _AP_I2 + !_AP_S2, true, _AP_Q2, _AP_O2, - _AP_N2> - t; - t.V = -op.V; - ret = t; -} - -template -INLINE void lshift( - ap_fixed_base<_AP_W, _AP_I, _AP_S, _AP_Q, _AP_O, _AP_N>& ret, - const ap_fixed_base<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2>& op, - int i) { - enum { - F2 = _AP_W2 - _AP_I2, - _AP_I3 = AP_MAX(_AP_I, _AP_I2), - _AP_W3 = _AP_I3 + F2, - }; - // wide buffer - ap_fixed_base<_AP_W3, _AP_I3, _AP_S2, _AP_Q2, _AP_O2, _AP_N2> t; - t.V = op.V; - t.V <<= i; // FIXME overflow? - // handle quantization and overflow - ret = t; -} - -template -INLINE void rshift( - ap_fixed_base<_AP_W, _AP_I, _AP_S, _AP_Q, _AP_O, _AP_N>& ret, - const ap_fixed_base<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2>& op, - int i) { - enum { - F = _AP_W - _AP_I, - F2 = _AP_W2 - _AP_I2, - F3 = AP_MAX(F, F2), - _AP_W3 = _AP_I2 + F3, - sh = F - F2, - }; - // wide buffer - ap_fixed_base<_AP_W3, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2> t; - t.V = op.V; - if (sh >= 0) - t.V <<= (int) sh; - t.V >>= i; - // handle quantization and overflow - ret = t; -} - -//// FIXME -//// These partial specialization ctors allow code like -//// char c = 'a'; -//// ap_fixed_base<8, 8, true> x(c); -//// but what bout ap_fixed_base<9, 9, true> y(c) ? -// - -#ifndef __SYNTHESIS__ -INLINE std::string scientificFormat(std::string& input) { - if (input.length() == 0) return input; - - size_t decPosition = input.find('.'); - if (decPosition == std::string::npos) decPosition = input.length(); - - size_t firstNonZeroPos = 0; - for (; input[firstNonZeroPos] > '9' || input[firstNonZeroPos] < '1'; - firstNonZeroPos++) - ; - - int exp; - if (firstNonZeroPos > decPosition) - exp = decPosition - firstNonZeroPos; - else - exp = decPosition - firstNonZeroPos - 1; - std::string expString = ""; - if (exp == 0) - ; - else if (exp < 0) { - expString += "e-"; - exp = -exp; - } else - expString += "e+"; - - if (exp < 10 && exp > 0) { - expString += '0'; - expString += (char)('0' + exp); - } else if (exp != 0) { - std::string tmp; - - std::ostringstream oss; - oss << exp; - - tmp = oss.str(); - expString += tmp; - } - - int lastNonZeroPos = (int)(input.length() - 1); - for (; lastNonZeroPos >= 0; --lastNonZeroPos) - if (input[lastNonZeroPos] <= '9' && input[lastNonZeroPos] > '0') break; - - std::string ans = ""; - ans += input[firstNonZeroPos]; - if (firstNonZeroPos != (size_t)lastNonZeroPos) { - ans += '.'; - for (int i = firstNonZeroPos + 1; i <= lastNonZeroPos; i++) - if (input[i] != '.') ans += input[i]; - } - - ans += expString; - return ans; -} - -INLINE std::string reduceToPrecision(std::string& input, int precision) { - bool isZero = true; - size_t inputLen = input.length(); - for (size_t i = 0; i < inputLen && isZero; i++) - if (input[i] != '.' && input[i] != '0') isZero = false; - if (isZero) return "0"; - - // Find the first valid number, skip '-' - int FirstNonZeroPos = 0; - int LastNonZeroPos = (int)inputLen - 1; - int truncBitPosition = 0; - size_t decPosition = input.find('.'); - for (; input[FirstNonZeroPos] < '1' || input[FirstNonZeroPos] > '9'; - FirstNonZeroPos++) - ; - - for (; input[LastNonZeroPos] < '1' || input[LastNonZeroPos] > '9'; - LastNonZeroPos--) - ; - - if (decPosition == std::string::npos) decPosition = inputLen; - // Count the valid number, to decide whether we need to truncate - if ((int)decPosition > LastNonZeroPos) { - if (LastNonZeroPos - FirstNonZeroPos + 1 <= precision) return input; - truncBitPosition = FirstNonZeroPos + precision; - } else if ((int)decPosition < FirstNonZeroPos) { // This is pure decimal - if (LastNonZeroPos - FirstNonZeroPos + 1 <= precision) { - if (FirstNonZeroPos - decPosition - 1 < 4) { - return input; - } else { - if (input[0] == '-') { - std::string tmp = input.substr(1, inputLen - 1); - return std::string("-") + scientificFormat(tmp); - } else - return scientificFormat(input); - } - } - truncBitPosition = FirstNonZeroPos + precision; - } else { - if (LastNonZeroPos - FirstNonZeroPos <= precision) return input; - truncBitPosition = FirstNonZeroPos + precision + 1; - } - - // duplicate the input string, we want to add "0" before the valid numbers - // This is easy for quantization, since we may change 9999 to 10000 - std::string ans = ""; - std::string dupInput = "0"; - if (input[0] == '-') { - ans += '-'; - dupInput += input.substr(1, inputLen - 1); - } else { - dupInput += input.substr(0, inputLen); - ++truncBitPosition; - } - - // Add 'carry' after truncation, if necessary - bool carry = dupInput[truncBitPosition] > '4'; - for (int i = truncBitPosition - 1; i >= 0 && carry; i--) { - if (dupInput[i] == '.') continue; - if (dupInput[i] == '9') - dupInput[i] = '0'; - else { - ++dupInput[i]; - carry = false; - } - } - - // bits outside precision range should be set to 0 - if (dupInput[0] == '1') - FirstNonZeroPos = 0; - else { - FirstNonZeroPos = 0; - while (dupInput[FirstNonZeroPos] < '1' || dupInput[FirstNonZeroPos] > '9') - ++FirstNonZeroPos; - } - - unsigned it = FirstNonZeroPos; - int NValidNumber = 0; - while (it < dupInput.length()) { - if (dupInput[it] == '.') { - ++it; - continue; - } - ++NValidNumber; - if (NValidNumber > precision) dupInput[it] = '0'; - ++it; - } - - // Here we wanted to adjust the truncate position and the value - decPosition = dupInput.find('.'); - if (decPosition == std::string::npos) // When this is integer - truncBitPosition = (int)dupInput.length(); - else - for (truncBitPosition = (int)(dupInput.length() - 1); truncBitPosition >= 0; - --truncBitPosition) { - if (dupInput[truncBitPosition] == '.') break; - if (dupInput[truncBitPosition] != '0') { - truncBitPosition++; - break; - } - } - - if (dupInput[0] == '1') - dupInput = dupInput.substr(0, truncBitPosition); - else - dupInput = dupInput.substr(1, truncBitPosition - 1); - - decPosition = dupInput.find('.'); - if (decPosition != std::string::npos) { - size_t it = 0; - for (it = decPosition + 1; dupInput[it] == '0'; it++) - ; - if (it - decPosition - 1 < 4) { - ans += dupInput; - return ans; - } else { - ans += scientificFormat(dupInput); - return ans; - } - } else if ((int)(dupInput.length()) <= precision) { - ans += dupInput; - return ans; - } - - ans += scientificFormat(dupInput); - return ans; -} - -template -INLINE void print( - const ap_fixed_base<_AP_W, _AP_I, _AP_S, _AP_Q, _AP_O, _AP_N>& x) { - if (_AP_I > 0) { - ap_int_base<_AP_I, _AP_S> p1; - p1.V = x.V >> (_AP_W - _AP_I); - print(p1.V); // print overlaod for .V should exit - } else { - printf("0"); - } - printf("."); - if (_AP_I < _AP_W) { - ap_int_base<_AP_W - _AP_I, false> p2; - p2.V = _AP_ROOT_op_get_range(x.V, 0, _AP_W - _AP_I); - print(p2.V, false); // print overlaod for .V should exit - } -} -#endif // ifndef __SYNTHESIS__ - -// XXX the following two functions have to exist in synthesis, -// as some old HLS Video Library code uses the ostream overload, -// although HLS will later delete I/O function call. - -/// Output streaming -//----------------------------------------------------------------------------- -// XXX apcc cannot handle global std::ios_base::Init() brought in by -#ifndef AP_AUTOCC -#ifndef __SYNTHESIS__ -template -INLINE std::ostream& operator<<( - std::ostream& out, - const ap_fixed_base<_AP_W, _AP_I, _AP_S, _AP_Q, _AP_O, _AP_N>& x) { - // TODO support std::ios_base::fmtflags - unsigned width = out.width(); - unsigned precision = out.precision(); - char fill = out.fill(); - std::string str = x.to_string(10, _AP_S); - str = reduceToPrecision(str, precision); - if (width > str.length()) { - for (unsigned i = 0; i < width - str.length(); ++i) - out << fill; - } - out << str; - return out; -} -#endif // ifndef __SYNTHESIS__ - -/// Input streaming -// ----------------------------------------------------------------------------- -#ifndef __SYNTHESIS__ -template -INLINE std::istream& operator>>( - std::istream& in, - ap_fixed_base<_AP_W, _AP_I, _AP_S, _AP_Q, _AP_O, _AP_N>& x) { - double d; - in >> d; - x = ap_fixed_base<_AP_W, _AP_I, _AP_S, _AP_Q, _AP_O, _AP_N>(d); - return in; -} -#endif -#endif // ifndef AP_AUTOCC - -/// Operators mixing Integers with ap_fixed_base -// ----------------------------------------------------------------------------- -#define AF_BIN_OP_WITH_INT_SF(BIN_OP, C_TYPE, _AP_W2, _AP_S2, RTYPE) \ - template \ - INLINE typename ap_fixed_base<_AP_W, _AP_I, _AP_S>::template RType< \ - _AP_W2, _AP_W2, _AP_S2>::RTYPE \ - operator BIN_OP( \ - const ap_fixed_base<_AP_W, _AP_I, _AP_S, _AP_Q, _AP_O, _AP_N>& op, \ - C_TYPE i_op) { \ - return op.operator BIN_OP(ap_int_base<_AP_W2, _AP_S2>(i_op)); \ - } - -#define AF_BIN_OP_WITH_INT(BIN_OP, C_TYPE, _AP_W2, _AP_S2, RTYPE) \ - template \ - INLINE typename ap_fixed_base<_AP_W, _AP_I, _AP_S>::template RType< \ - _AP_W2, _AP_W2, _AP_S2>::RTYPE \ - operator BIN_OP( \ - const ap_fixed_base<_AP_W, _AP_I, _AP_S, _AP_Q, _AP_O, _AP_N>& op, \ - C_TYPE i_op) { \ - return op.operator BIN_OP(ap_fixed_base<_AP_W2, _AP_W2, _AP_S2>(i_op)); \ - } \ - template \ - INLINE typename ap_fixed_base<_AP_W, _AP_I, _AP_S>::template RType< \ - _AP_W2, _AP_W2, _AP_S2>::RTYPE \ - operator BIN_OP( \ - C_TYPE i_op, \ - const ap_fixed_base<_AP_W, _AP_I, _AP_S, _AP_Q, _AP_O, _AP_N>& op) { \ - return ap_fixed_base<_AP_W2, _AP_W2, _AP_S2>(i_op).operator BIN_OP(op); \ - } - -#define AF_REL_OP_WITH_INT(REL_OP, C_TYPE, _AP_W2, _AP_S2) \ - template \ - INLINE bool operator REL_OP( \ - const ap_fixed_base<_AP_W, _AP_I, _AP_S, _AP_Q, _AP_O, _AP_N>& op, \ - C_TYPE i_op) { \ - return op.operator REL_OP(ap_fixed_base<_AP_W2, _AP_W2, _AP_S2>(i_op)); \ - } \ - template \ - INLINE bool operator REL_OP( \ - C_TYPE i_op, \ - const ap_fixed_base<_AP_W, _AP_I, _AP_S, _AP_Q, _AP_O, _AP_N>& op) { \ - return ap_fixed_base<_AP_W2, _AP_W2, _AP_S2>(i_op).operator REL_OP(op); \ - } - -#define AF_ASSIGN_OP_WITH_INT(ASSIGN_OP, C_TYPE, _AP_W2, _AP_S2) \ - template \ - INLINE ap_fixed_base<_AP_W, _AP_I, _AP_S, _AP_Q, _AP_O, _AP_N>& \ - operator ASSIGN_OP( \ - ap_fixed_base<_AP_W, _AP_I, _AP_S, _AP_Q, _AP_O, _AP_N>& op, \ - C_TYPE i_op) { \ - return op.operator ASSIGN_OP(ap_fixed_base<_AP_W2, _AP_W2, _AP_S2>(i_op)); \ - } - -#define AF_ASSIGN_OP_WITH_INT_SF(ASSIGN_OP, C_TYPE, _AP_W2, _AP_S2) \ - template \ - INLINE ap_fixed_base<_AP_W, _AP_I, _AP_S, _AP_Q, _AP_O, _AP_N>& \ - operator ASSIGN_OP( \ - ap_fixed_base<_AP_W, _AP_I, _AP_S, _AP_Q, _AP_O, _AP_N>& op, \ - C_TYPE i_op) { \ - return op.operator ASSIGN_OP(ap_int_base<_AP_W2, _AP_S2>(i_op)); \ - } - -#define ALL_AF_OP_WITH_INT(C_TYPE, BITS, SIGN) \ - AF_BIN_OP_WITH_INT(+, C_TYPE, (BITS), (SIGN), plus) \ - AF_BIN_OP_WITH_INT(-, C_TYPE, (BITS), (SIGN), minus) \ - AF_BIN_OP_WITH_INT(*, C_TYPE, (BITS), (SIGN), mult) \ - AF_BIN_OP_WITH_INT(/, C_TYPE, (BITS), (SIGN), div) \ - AF_BIN_OP_WITH_INT(&, C_TYPE, (BITS), (SIGN), logic) \ - AF_BIN_OP_WITH_INT(|, C_TYPE, (BITS), (SIGN), logic) \ - AF_BIN_OP_WITH_INT(^, C_TYPE, (BITS), (SIGN), logic) \ - AF_BIN_OP_WITH_INT_SF(>>, C_TYPE, (BITS), (SIGN), lhs) \ - AF_BIN_OP_WITH_INT_SF(<<, C_TYPE, (BITS), (SIGN), lhs) \ - \ - AF_ASSIGN_OP_WITH_INT(+=, C_TYPE, (BITS), (SIGN)) \ - AF_ASSIGN_OP_WITH_INT(-=, C_TYPE, (BITS), (SIGN)) \ - AF_ASSIGN_OP_WITH_INT(*=, C_TYPE, (BITS), (SIGN)) \ - AF_ASSIGN_OP_WITH_INT(/=, C_TYPE, (BITS), (SIGN)) \ - AF_ASSIGN_OP_WITH_INT(&=, C_TYPE, (BITS), (SIGN)) \ - AF_ASSIGN_OP_WITH_INT(|=, C_TYPE, (BITS), (SIGN)) \ - AF_ASSIGN_OP_WITH_INT(^=, C_TYPE, (BITS), (SIGN)) \ - AF_ASSIGN_OP_WITH_INT_SF(>>=, C_TYPE, (BITS), (SIGN)) \ - AF_ASSIGN_OP_WITH_INT_SF(<<=, C_TYPE, (BITS), (SIGN)) \ - \ - AF_REL_OP_WITH_INT(>, C_TYPE, (BITS), (SIGN)) \ - AF_REL_OP_WITH_INT(<, C_TYPE, (BITS), (SIGN)) \ - AF_REL_OP_WITH_INT(>=, C_TYPE, (BITS), (SIGN)) \ - AF_REL_OP_WITH_INT(<=, C_TYPE, (BITS), (SIGN)) \ - AF_REL_OP_WITH_INT(==, C_TYPE, (BITS), (SIGN)) \ - AF_REL_OP_WITH_INT(!=, C_TYPE, (BITS), (SIGN)) - -ALL_AF_OP_WITH_INT(bool, 1, false) -ALL_AF_OP_WITH_INT(char, 8, CHAR_IS_SIGNED) -ALL_AF_OP_WITH_INT(signed char, 8, true) -ALL_AF_OP_WITH_INT(unsigned char, 8, false) -ALL_AF_OP_WITH_INT(short, _AP_SIZE_short, true) -ALL_AF_OP_WITH_INT(unsigned short, _AP_SIZE_short, false) -ALL_AF_OP_WITH_INT(int, _AP_SIZE_int, true) -ALL_AF_OP_WITH_INT(unsigned int, _AP_SIZE_int, false) -ALL_AF_OP_WITH_INT(long, _AP_SIZE_long, true) -ALL_AF_OP_WITH_INT(unsigned long, _AP_SIZE_long, false) -ALL_AF_OP_WITH_INT(ap_slong, _AP_SIZE_ap_slong, true) -ALL_AF_OP_WITH_INT(ap_ulong, _AP_SIZE_ap_slong, false) - -#undef ALL_AF_OP_WITH_INT -#undef AF_BIN_OP_WITH_INT -#undef AF_BIN_OP_WITH_INT_SF -#undef AF_ASSIGN_OP_WITH_INT -#undef AF_ASSIGN_OP_WITH_INT_SF -#undef AF_REL_OP_WITH_INT - -/* - * ********************************************************************** - * TODO - * There is no operator defined with float/double/long double, so that - * code like - * ap_fixed<8,4> a = 1.5f; - * a += 0.5f; - * will fail in compilation. - * Operator with warning about conversion might be wanted. - * ********************************************************************** - */ - -#define AF_BIN_OP_WITH_AP_INT(BIN_OP, RTYPE) \ - template \ - INLINE typename ap_fixed_base<_AP_W2, _AP_W2, _AP_S2>::template RType< \ - _AP_W, _AP_I, _AP_S>::RTYPE \ - operator BIN_OP( \ - const ap_int_base<_AP_W2, _AP_S2>& i_op, \ - const ap_fixed_base<_AP_W, _AP_I, _AP_S, _AP_Q, _AP_O, _AP_N>& op) { \ - return ap_fixed_base<_AP_W2, _AP_W2, _AP_S2>(i_op).operator BIN_OP(op); \ - } \ - \ - template \ - INLINE typename ap_fixed_base<_AP_W, _AP_I, _AP_S>::template RType< \ - _AP_W2, _AP_W2, _AP_S2>::RTYPE \ - operator BIN_OP( \ - const ap_fixed_base<_AP_W, _AP_I, _AP_S, _AP_Q, _AP_O, _AP_N>& op, \ - const ap_int_base<_AP_W2, _AP_S2>& i_op) { \ - return op.operator BIN_OP(ap_fixed_base<_AP_W2, _AP_W2, _AP_S2>(i_op)); \ - } - -#define AF_REL_OP_WITH_AP_INT(REL_OP) \ - template \ - INLINE bool operator REL_OP( \ - const ap_fixed_base<_AP_W, _AP_I, _AP_S, _AP_Q, _AP_O, _AP_N>& op, \ - const ap_int_base<_AP_W2, _AP_S2>& i_op) { \ - return op.operator REL_OP(ap_fixed_base<_AP_W2, _AP_W2, _AP_S2>(i_op)); \ - } \ - \ - template \ - INLINE bool operator REL_OP( \ - const ap_int_base<_AP_W2, _AP_S2>& i_op, \ - const ap_fixed_base<_AP_W, _AP_I, _AP_S, _AP_Q, _AP_O, _AP_N>& op) { \ - return ap_fixed_base<_AP_W2, _AP_W2, _AP_S2>(i_op).operator REL_OP(op); \ - } - -#define AF_ASSIGN_OP_WITH_AP_INT(ASSIGN_OP) \ - template \ - INLINE ap_fixed_base<_AP_W, _AP_I, _AP_S, _AP_Q, _AP_O, _AP_N>& \ - operator ASSIGN_OP( \ - ap_fixed_base<_AP_W, _AP_I, _AP_S, _AP_Q, _AP_O, _AP_N>& op, \ - const ap_int_base<_AP_W2, _AP_S2>& i_op) { \ - return op.operator ASSIGN_OP(ap_fixed_base<_AP_W2, _AP_W2, _AP_S2>(i_op)); \ - } \ - \ - template \ - INLINE ap_int_base<_AP_W2, _AP_S2>& operator ASSIGN_OP( \ - ap_int_base<_AP_W2, _AP_S2>& i_op, \ - const ap_fixed_base<_AP_W, _AP_I, _AP_S, _AP_Q, _AP_O, _AP_N>& op) { \ - return i_op.operator ASSIGN_OP(op.to_ap_int_base()); \ - } - -AF_BIN_OP_WITH_AP_INT(+, plus) -AF_BIN_OP_WITH_AP_INT(-, minus) -AF_BIN_OP_WITH_AP_INT(*, mult) -AF_BIN_OP_WITH_AP_INT(/, div) -AF_BIN_OP_WITH_AP_INT(&, logic) -AF_BIN_OP_WITH_AP_INT(|, logic) -AF_BIN_OP_WITH_AP_INT(^, logic) - -#undef AF_BIN_OP_WITH_AP_INT - -AF_ASSIGN_OP_WITH_AP_INT(+=) -AF_ASSIGN_OP_WITH_AP_INT(-=) -AF_ASSIGN_OP_WITH_AP_INT(*=) -AF_ASSIGN_OP_WITH_AP_INT(/=) -AF_ASSIGN_OP_WITH_AP_INT(&=) -AF_ASSIGN_OP_WITH_AP_INT(|=) -AF_ASSIGN_OP_WITH_AP_INT(^=) - -#undef AF_ASSIGN_OP_WITH_AP_INT - -AF_REL_OP_WITH_AP_INT(==) -AF_REL_OP_WITH_AP_INT(!=) -AF_REL_OP_WITH_AP_INT(>) -AF_REL_OP_WITH_AP_INT(>=) -AF_REL_OP_WITH_AP_INT(<) -AF_REL_OP_WITH_AP_INT(<=) - -#undef AF_REL_OP_WITH_AP_INT - -// Relational Operators with double -template -INLINE bool operator==( - double op1, - const ap_fixed_base<_AP_W, _AP_I, _AP_S, _AP_Q, _AP_O, _AP_N>& op2) { - return op2.operator==(op1); -} - -template -INLINE bool operator!=( - double op1, - const ap_fixed_base<_AP_W, _AP_I, _AP_S, _AP_Q, _AP_O, _AP_N>& op2) { - return op2.operator!=(op1); -} - -template -INLINE bool operator>( - double op1, - const ap_fixed_base<_AP_W, _AP_I, _AP_S, _AP_Q, _AP_O, _AP_N>& op2) { - return op2.operator<(op1); -} - -template -INLINE bool operator>=( - double op1, - const ap_fixed_base<_AP_W, _AP_I, _AP_S, _AP_Q, _AP_O, _AP_N>& op2) { - return op2.operator<=(op1); -} - -template -INLINE bool operator<( - double op1, - const ap_fixed_base<_AP_W, _AP_I, _AP_S, _AP_Q, _AP_O, _AP_N>& op2) { - return op2.operator>(op1); -} - -template -INLINE bool operator<=( - double op1, - const ap_fixed_base<_AP_W, _AP_I, _AP_S, _AP_Q, _AP_O, _AP_N>& op2) { - return op2.operator>=(op1); -} - -#endif // ifndef __cplusplus else - -#endif // ifndef __AP_FIXED_BASE_H__ else - -// -*- cpp -*- diff --git a/TrigScint/include/TrigScint/ap_fixed_ref.h b/TrigScint/include/TrigScint/ap_fixed_ref.h deleted file mode 100644 index aefda0a67..000000000 --- a/TrigScint/include/TrigScint/ap_fixed_ref.h +++ /dev/null @@ -1,718 +0,0 @@ -/* - * Copyright 2011-2019 Xilinx, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef __AP_FIXED_REF_H__ -#define __AP_FIXED_REF_H__ - -#ifndef __AP_FIXED_H__ -#error "Only ap_fixed.h and ap_int.h can be included directly in user code." -#endif - -#ifndef __cplusplus -#error "C++ is required to include this header file" - -#else -#ifndef __SYNTHESIS__ -#include -#endif -/// Proxy class, which allows bit selection to be used as both rvalue (for -/// reading) and lvalue (for writing) -template -struct af_bit_ref { -#ifdef _MSC_VER -#pragma warning(disable : 4521 4522) -#endif - typedef ap_fixed_base<_AP_W, _AP_I, _AP_S, _AP_Q, _AP_O, _AP_N> ref_type; - ref_type& d_bv; - int d_index; - - public: - INLINE af_bit_ref( - const af_bit_ref<_AP_W, _AP_I, _AP_S, _AP_Q, _AP_O, _AP_N>& ref) - : d_bv(ref.d_bv), d_index(ref.d_index) { -#ifndef __SYNTHESIS__ - _AP_WARNING(d_index < 0, "Index of bit vector (%d) cannot be negative.", - d_index); - _AP_WARNING(d_index >= _AP_W, "Index of bit vector (%d) out of range (%d).", - d_index, _AP_W); -#endif - } - - INLINE af_bit_ref(ref_type* bv, int index = 0) : d_bv(*bv), d_index(index) {} - - INLINE af_bit_ref(const ref_type* bv, int index = 0) - : d_bv(*const_cast(bv)), d_index(index) {} - - /// convert operators. - INLINE operator bool() const { return _AP_ROOT_op_get_bit(d_bv.V, d_index); } - - /// @name assign operators - // @{ - INLINE af_bit_ref& operator=(bool val) { - d_bv.V = _AP_ROOT_op_set_bit(d_bv.V, d_index, val); - return *this; - } - - // Be explicit to prevent it from being deleted, as field d_bv - // is of reference type. - INLINE af_bit_ref& operator=(const af_bit_ref& val) { - return operator=(bool(val)); - } - - template - INLINE af_bit_ref& operator=( - const af_bit_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2>& val) { - return operator=(bool(val)); - } - - template - INLINE af_bit_ref& operator=(const ap_bit_ref<_AP_W2, _AP_S2>& val) { - return operator=(bool(val)); - } - - template - INLINE af_bit_ref& operator=(const ap_int_base<_AP_W2, _AP_S2>& val) { - return operator=(val != 0); - } - - template - INLINE af_bit_ref& operator=(const ap_range_ref<_AP_W2, _AP_S2>& val) { - return operator=(ap_int_base<_AP_W2, false>(val)); - } - - template - INLINE af_bit_ref& operator=( - const af_range_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2>& val) { - return operator=(ap_int_base<_AP_W2, false>(val)); - } - - template - INLINE af_bit_ref& operator=( - const ap_concat_ref<_AP_W2, _AP_T3, _AP_W3, _AP_T3>& val) { - return operator=(ap_int_base<_AP_W2 + _AP_W3, false>(val)); - } - // @} - - /// @name concatenate operators - // @{ - template - INLINE ap_concat_ref<1, af_bit_ref, _AP_W2, ap_int_base<_AP_W2, _AP_S2> > - operator,(ap_int_base<_AP_W2, _AP_S2> &op) { - return ap_concat_ref<1, af_bit_ref, _AP_W2, ap_int_base<_AP_W2, _AP_S2> >( - *this, op); - } - - template - INLINE ap_concat_ref<1, af_bit_ref, 1, ap_bit_ref<_AP_W2, _AP_S2> > operator,( - const ap_bit_ref<_AP_W2, _AP_S2> &op) { - return ap_concat_ref<1, af_bit_ref, 1, ap_bit_ref<_AP_W2, _AP_S2> >(*this, - op); - } - - template - INLINE ap_concat_ref<1, af_bit_ref, _AP_W2, ap_range_ref<_AP_W2, _AP_S2> > - operator,(const ap_range_ref<_AP_W2, _AP_S2> &op) { - return ap_concat_ref<1, af_bit_ref, _AP_W2, ap_range_ref<_AP_W2, _AP_S2> >( - *this, op); - } - - template - INLINE ap_concat_ref<1, af_bit_ref, _AP_W2 + _AP_W3, - ap_concat_ref<_AP_W2, _AP_T2, _AP_W3, _AP_T3> > - operator,(const ap_concat_ref<_AP_W2, _AP_T2, _AP_W3, _AP_T3> &op) { - return ap_concat_ref<1, af_bit_ref, _AP_W2 + _AP_W3, - ap_concat_ref<_AP_W2, _AP_T2, _AP_W3, _AP_T3> >(*this, - op); - } - - template - INLINE ap_concat_ref< - 1, af_bit_ref, _AP_W2, - af_range_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2> > - operator,( - const af_range_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2> &op) { - return ap_concat_ref< - 1, af_bit_ref, _AP_W2, - af_range_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2> >(*this, - op); - } - - template - INLINE ap_concat_ref<1, af_bit_ref, 1, af_bit_ref<_AP_W2, _AP_I2, _AP_S2, - _AP_Q2, _AP_O2, _AP_N2> > - operator,( - const af_bit_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2> &op) { - return ap_concat_ref<1, af_bit_ref, 1, af_bit_ref<_AP_W2, _AP_I2, _AP_S2, - _AP_Q2, _AP_O2, _AP_N2> >( - *this, - const_cast&>( - op)); - } - // @} - - /// @name comparison - // @{ - template - INLINE bool operator==( - const af_bit_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2>& op) { - return get() == op.get(); - } - - template - INLINE bool operator!=( - const af_bit_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2>& op) { - return get() != op.get(); - } - // @} - - INLINE bool operator~() const { - bool bit = _AP_ROOT_op_get_bit(d_bv.V, d_index); - return bit ? false : true; - } - - INLINE bool get() const { return _AP_ROOT_op_get_bit(d_bv.V, d_index); } - - INLINE int length() const { return 1; } - -#ifndef __SYNTHESIS__ - std::string to_string() const { return get() ? "1" : "0"; } -#else - // XXX HLS will delete this in synthesis - INLINE char* to_string() const { return 0; } -#endif -}; // struct af_bit_ref - -// XXX apcc cannot handle global std::ios_base::Init() brought in by -#ifndef AP_AUTOCC -#ifndef __SYNTHESIS__ -template -INLINE std::ostream& operator<<( - std::ostream& os, - const af_bit_ref<_AP_W, _AP_I, _AP_S, _AP_Q, _AP_O, _AP_N>& x) { - os << x.to_string(); - return os; -} -#endif // ifndef __SYNTHESIS__ -#endif // ifndef AP_AUTOCC - -/// Range (slice) reference. -template -struct af_range_ref { -#ifdef _MSC_VER -#pragma warning(disable : 4521 4522) -#endif - typedef ap_fixed_base<_AP_W, _AP_I, _AP_S, _AP_Q, _AP_O, _AP_N> ref_type; - ref_type& d_bv; - int l_index; - int h_index; - - public: - /// copy ctor - INLINE af_range_ref( - const af_range_ref<_AP_W, _AP_I, _AP_S, _AP_Q, _AP_O, _AP_N>& ref) - : d_bv(ref.d_bv), l_index(ref.l_index), h_index(ref.h_index) {} - - /// ctor from ap_fixed_base, higher and lower bound. - /** if h is less than l, the bits selected will be returned in reverse order. - */ - INLINE af_range_ref(ref_type* bv, int h, int l) - : d_bv(*bv), l_index(l), h_index(h) { -#ifndef __SYNTHESIS__ - _AP_WARNING(h < 0 || l < 0, - "Higher bound(%d) and lower(%d) bound cannot be negative.", h, - l); - _AP_WARNING(h >= _AP_W || l >= _AP_W, - "Higher bound(%d) or lower(%d) bound out of range.", h, l); - _AP_WARNING(h < l, "The bits selected will be returned in reverse order."); -#endif - } - - INLINE af_range_ref(const ref_type* bv, int h, int l) - : d_bv(*const_cast(bv)), l_index(l), h_index(h) { -#ifndef __SYNTHESIS__ - _AP_WARNING(h < 0 || l < 0, - "Higher bound(%d) and lower(%d) bound cannot be negative.", h, - l); - _AP_WARNING(h >= _AP_W || l >= _AP_W, - "Higher bound(%d) or lower(%d) bound out of range.", h, l); - _AP_WARNING(h < l, "The bits selected will be returned in reverse order."); -#endif - } - - /// @name assign operators - // @{ - -#define ASSIGN_CTYPE_TO_AF_RANGE(DATA_TYPE) \ - INLINE af_range_ref& operator=(const DATA_TYPE val) { \ - ap_int_base<_AP_W, false> loc(val); \ - d_bv.V = _AP_ROOT_op_set_range(d_bv.V, l_index, h_index, loc.V); \ - return *this; \ - } - - ASSIGN_CTYPE_TO_AF_RANGE(bool) - ASSIGN_CTYPE_TO_AF_RANGE(char) - ASSIGN_CTYPE_TO_AF_RANGE(signed char) - ASSIGN_CTYPE_TO_AF_RANGE(unsigned char) - ASSIGN_CTYPE_TO_AF_RANGE(short) - ASSIGN_CTYPE_TO_AF_RANGE(unsigned short) - ASSIGN_CTYPE_TO_AF_RANGE(int) - ASSIGN_CTYPE_TO_AF_RANGE(unsigned int) - ASSIGN_CTYPE_TO_AF_RANGE(long) - ASSIGN_CTYPE_TO_AF_RANGE(unsigned long) - ASSIGN_CTYPE_TO_AF_RANGE(ap_slong) - ASSIGN_CTYPE_TO_AF_RANGE(ap_ulong) -#if _AP_ENABLE_HALF_ == 1 - ASSIGN_CTYPE_TO_AF_RANGE(half) -#endif - ASSIGN_CTYPE_TO_AF_RANGE(float) - ASSIGN_CTYPE_TO_AF_RANGE(double) -#undef ASSIGN_CTYPE_TO_AF_RANGE - - /// assgin using a string. XXX crucial for cosim. - INLINE af_range_ref& operator=(const char* val) { - const ap_int_base<_AP_W, false> tmp(val); // XXX figure out radix - d_bv.V = _AP_ROOT_op_set_range(d_bv.V, l_index, h_index, tmp.V); - return *this; - } - - /// assign from ap_int_base. - // NOTE Base of other assgin operators. - template - INLINE af_range_ref& operator=(const ap_int_base<_AP_W3, _AP_S3>& val) { - d_bv.V = _AP_ROOT_op_set_range(d_bv.V, l_index, h_index, val.V); - return *this; - } - - /// assign from range reference to ap_int_base. - template - INLINE af_range_ref& operator=(const ap_range_ref<_AP_W2, _AP_S2>& val) { - const ap_int_base<_AP_W2, false> tmp(val); - return operator=(tmp); - } - - /// assign from bit reference to ap_int_base.. - template - INLINE af_range_ref& operator=(const ap_bit_ref<_AP_W2, _AP_S2>& val) { - const ap_int_base<1, false> tmp((bool)val); - return operator=(tmp); - } - - /// assgin from ap_fixed_base. - template - INLINE af_range_ref& operator=( - const ap_fixed_base<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2>& - val) { - d_bv.V = _AP_ROOT_op_set_range(d_bv.V, l_index, h_index, val.V); - return *this; - } - - /// copy assgin. - // XXX This has to be explicit, otherwise it will be deleted, as d_bv is - // of reference type. - INLINE af_range_ref& operator=(const af_range_ref& val) { - ap_int_base<_AP_W, false> tmp(val); - return operator=(tmp); - } - - /// assign from range reference to ap_fixed_base. - template - INLINE af_range_ref& operator=( - const af_range_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2>& val) { - ap_int_base<_AP_W2, false> tmp(val); - return operator=(tmp); - } - - /// assign from bit reference to ap_fixed_base. - template - INLINE af_range_ref& operator=( - const af_bit_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2>& val) { - ap_int_base<1, false> tmp((bool)val); - return operator=(tmp); - } - - /// assign from compound reference. - template - INLINE af_range_ref& operator=( - const ap_concat_ref<_AP_W2, _AP_T3, _AP_W3, _AP_T3>& val) { - const ap_int_base<_AP_W2 + _AP_W3, false> tmp(val); - return operator=(tmp); - } - // @} - - /// @name comparison operators with ap_range_ref. - // @{ - template - INLINE bool operator==(const ap_range_ref<_AP_W2, _AP_S2>& op2) { - ap_int_base<_AP_W, false> lop(*this); - ap_int_base<_AP_W2, false> rop(op2); - return lop == rop; - } - - template - INLINE bool operator!=(const ap_range_ref<_AP_W2, _AP_S2>& op2) { - return !(operator==(op2)); - } - - template - INLINE bool operator<(const ap_range_ref<_AP_W2, _AP_S2>& op2) { - ap_int_base<_AP_W, false> lop(*this); - ap_int_base<_AP_W2, false> rop(op2); - return lop < rop; - } - - template - INLINE bool operator>(const ap_range_ref<_AP_W2, _AP_S2>& op2) { - ap_int_base<_AP_W, false> lop(*this); - ap_int_base<_AP_W2, false> rop(op2); - return lop > rop; - } - - template - INLINE bool operator<=(const ap_range_ref<_AP_W2, _AP_S2>& op2) { - return !(operator>(op2)); - } - - template - INLINE bool operator>=(const ap_range_ref<_AP_W2, _AP_S2>& op2) { - return !(operator<(op2)); - } - // @} - - /// @name comparison operators with af_range_ref. - // @{ - template - INLINE bool operator==( - const af_range_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2>& op2) { - ap_int_base<_AP_W, false> lop(*this); - ap_int_base<_AP_W2, false> rop(op2); - return lop == rop; - } - - template - INLINE bool operator!=( - const af_range_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2>& op2) { - return !(operator==(op2)); - } - - template - INLINE bool operator<( - const af_range_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2>& op2) { - ap_int_base<_AP_W, false> lop(*this); - ap_int_base<_AP_W2, false> rop(op2); - return lop < rop; - } - - template - INLINE bool operator>( - const af_range_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2>& op2) { - ap_int_base<_AP_W, false> lop(*this); - ap_int_base<_AP_W2, false> rop(op2); - return lop > rop; - } - - template - INLINE bool operator<=( - const af_range_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2>& op2) { - return !(operator>(op2)); - } - - template - INLINE bool operator>=( - const af_range_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2>& op2) { - return !(operator<(op2)); - } - // @} - - /// @name concatenate operators. - /// @{ - /// concatenate with ap_int_base. - template - INLINE - ap_concat_ref<_AP_W, af_range_ref, _AP_W2, ap_int_base<_AP_W2, _AP_S2> > - operator,(ap_int_base<_AP_W2, _AP_S2> &op) { - return ap_concat_ref<_AP_W, af_range_ref, _AP_W2, - ap_int_base<_AP_W2, _AP_S2> >(*this, op); - } - - /// concatenate with ap_bit_ref. - template - INLINE ap_concat_ref<_AP_W, af_range_ref, 1, ap_bit_ref<_AP_W2, _AP_S2> > - operator,(const ap_bit_ref<_AP_W2, _AP_S2> &op) { - return ap_concat_ref<_AP_W, af_range_ref, 1, ap_bit_ref<_AP_W2, _AP_S2> >( - *this, const_cast&>(op)); - } - - /// concatenate with ap_bit_ref. - template - INLINE ap_concat_ref<_AP_W, af_range_ref, _AP_W2, ap_range_ref<_AP_W2, _AP_S2> > - operator,(const ap_range_ref<_AP_W2, _AP_S2> &op) { - return ap_concat_ref<_AP_W, af_range_ref, _AP_W2, - ap_range_ref<_AP_W2, _AP_S2> >( - *this, const_cast&>(op)); - } - - /// concatenate with ap_concat_ref. - template - INLINE ap_concat_ref<_AP_W, af_range_ref, _AP_W2 + _AP_W3, - ap_concat_ref<_AP_W2, _AP_T2, _AP_W3, _AP_T3> > - operator,(const ap_concat_ref<_AP_W2, _AP_T2, _AP_W3, _AP_T3> &op) { - return ap_concat_ref<_AP_W, af_range_ref, _AP_W2 + _AP_W3, - ap_concat_ref<_AP_W2, _AP_T2, _AP_W3, _AP_T3> >( - *this, const_cast&>(op)); - } - - /// concatenate with another af_range_ref. - template - INLINE - ap_concat_ref<_AP_W, af_range_ref, _AP_W2, - af_range_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2> > - operator,(const af_range_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2> - &op) { - return ap_concat_ref< - _AP_W, af_range_ref, _AP_W2, - af_range_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2> >( - *this, - const_cast&>( - op)); - } - - /// concatenate with another af_bit_ref. - template - INLINE - ap_concat_ref<_AP_W, af_range_ref, 1, - af_bit_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2> > - operator,( - const af_bit_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2> &op) { - return ap_concat_ref< - _AP_W, af_range_ref, 1, - af_bit_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2> >( - *this, - const_cast&>( - op)); - } - // @} - - INLINE operator ap_ulong() const { - ap_int_base<_AP_W, false> ret; - ret.V = _AP_ROOT_op_get_range(d_bv.V, l_index, h_index); - return ret.to_uint64(); - } - - INLINE operator ap_int_base<_AP_W, false>() const { - ap_int_base<_AP_W, false> ret; - ret.V = _AP_ROOT_op_get_range(d_bv.V, l_index, h_index); - return ret; - } - - INLINE ap_int_base<_AP_W, false> to_ap_int_base() const { - ap_int_base<_AP_W, false> ret; - ret.V = _AP_ROOT_op_get_range(d_bv.V, l_index, h_index); - return ret; - } - - // used in ap_fixed_base::to_string() - INLINE char to_char() const { - return (char)(_AP_ROOT_op_get_range(d_bv.V, l_index, h_index)); - } - - INLINE int to_int() const { - return (int)(_AP_ROOT_op_get_range(d_bv.V, l_index, h_index)); - } - - INLINE unsigned to_uint() const { - return (unsigned)(_AP_ROOT_op_get_range(d_bv.V, l_index, h_index)); - } - - INLINE long to_long() const { - return (long)(_AP_ROOT_op_get_range(d_bv.V, l_index, h_index)); - } - - INLINE unsigned long to_ulong() const { - return (unsigned long)(_AP_ROOT_op_get_range(d_bv.V, l_index, h_index)); - } - - INLINE ap_slong to_int64() const { - return (ap_slong)(_AP_ROOT_op_get_range(d_bv.V, l_index, h_index)); - } - - INLINE ap_ulong to_uint64() const { - return (ap_ulong)(_AP_ROOT_op_get_range(d_bv.V, l_index, h_index)); - } - - INLINE ap_int_base<_AP_W, false> get() const { - ap_int_base<_AP_W, false> ret; - ret.V = _AP_ROOT_op_get_range(d_bv.V, l_index, h_index); - return ret; - } - - template - INLINE void set(const ap_int_base<_AP_W2, false>& val) { - d_bv.V = _AP_ROOT_op_set_range(d_bv.V, l_index, h_index, val.V); - } - - INLINE int length() const { - return h_index >= l_index ? h_index - l_index + 1 : l_index - h_index + 1; - } - -#ifndef __SYNTHESIS__ - std::string to_string(signed char rd = 2) const { - ap_int_base<_AP_W, false> ret; - ret.V = _AP_ROOT_op_get_range(d_bv.V, l_index, h_index); - return ret.to_string(rd); - } -#else - // XXX HLS will delete this in synthesis - INLINE char* to_string(signed char rd = 2) const { - return 0; - } -#endif -}; // struct af_range_ref - -// XXX apcc cannot handle global std::ios_base::Init() brought in by -#ifndef AP_AUTOCC -#ifndef __SYNTHESIS__ -template -INLINE std::ostream& operator<<( - std::ostream& os, - const af_range_ref<_AP_W, _AP_I, _AP_S, _AP_Q, _AP_O, _AP_N>& x) { - os << x.to_string(); - return os; -} -#endif -#endif // ifndef AP_AUTOCC - -#define AF_REF_REL_OP_WITH_INT(REL_OP, C_TYPE, _AP_W2, _AP_S2) \ - template \ - INLINE bool operator REL_OP( \ - const af_range_ref<_AP_W, _AP_I, _AP_S, _AP_Q, _AP_O, _AP_N>& op, \ - C_TYPE op2) { \ - return ap_int_base<_AP_W, false>(op) \ - REL_OP ap_int_base<_AP_W2, _AP_S2>(op2); \ - } \ - \ - template \ - INLINE bool operator REL_OP( \ - C_TYPE op2, \ - const af_range_ref<_AP_W, _AP_I, _AP_S, _AP_Q, _AP_O, _AP_N>& op) { \ - return ap_int_base<_AP_W2, _AP_S2>(op2) \ - REL_OP ap_int_base<_AP_W, false>(op); \ - } \ - \ - template \ - INLINE bool operator REL_OP( \ - const af_bit_ref<_AP_W, _AP_I, _AP_S, _AP_Q, _AP_O, _AP_N>& op, \ - C_TYPE op2) { \ - return bool(op) REL_OP op2; \ - } \ - \ - template \ - INLINE bool operator REL_OP( \ - C_TYPE op2, \ - const af_bit_ref<_AP_W, _AP_I, _AP_S, _AP_Q, _AP_O, _AP_N>& op) { \ - return op2 REL_OP bool(op); \ - } - -#define AF_REF_REL_OPS_WITH_INT(C_TYPE, _AP_W2, _AP_S2) \ - AF_REF_REL_OP_WITH_INT(>, C_TYPE, (_AP_W2), (_AP_S2)) \ - AF_REF_REL_OP_WITH_INT(<, C_TYPE, (_AP_W2), (_AP_S2)) \ - AF_REF_REL_OP_WITH_INT(>=, C_TYPE, (_AP_W2), (_AP_S2)) \ - AF_REF_REL_OP_WITH_INT(<=, C_TYPE, (_AP_W2), (_AP_S2)) \ - AF_REF_REL_OP_WITH_INT(==, C_TYPE, (_AP_W2), (_AP_S2)) \ - AF_REF_REL_OP_WITH_INT(!=, C_TYPE, (_AP_W2), (_AP_S2)) - -AF_REF_REL_OPS_WITH_INT(bool, 1, false) -AF_REF_REL_OPS_WITH_INT(char, 8, CHAR_IS_SIGNED) -AF_REF_REL_OPS_WITH_INT(signed char, 8, true) -AF_REF_REL_OPS_WITH_INT(unsigned char, 8, false) -AF_REF_REL_OPS_WITH_INT(short, _AP_SIZE_short, true) -AF_REF_REL_OPS_WITH_INT(unsigned short, _AP_SIZE_short, false) -AF_REF_REL_OPS_WITH_INT(int, _AP_SIZE_int, true) -AF_REF_REL_OPS_WITH_INT(unsigned int, _AP_SIZE_int, false) -AF_REF_REL_OPS_WITH_INT(long, _AP_SIZE_long, true) -AF_REF_REL_OPS_WITH_INT(unsigned long, _AP_SIZE_long, false) -AF_REF_REL_OPS_WITH_INT(ap_slong, _AP_SIZE_ap_slong, true) -AF_REF_REL_OPS_WITH_INT(ap_ulong, _AP_SIZE_ap_slong, false) - -#undef AF_REF_REL_OP_INT -#undef AF_REF_REL_OPS_WITH_INT - -#define AF_REF_REL_OP_WITH_AP_INT(REL_OP) \ - template \ - INLINE bool operator REL_OP( \ - const af_range_ref<_AP_W, _AP_I, _AP_S, _AP_Q, _AP_O, _AP_N>& op, \ - const ap_int_base<_AP_W2, _AP_S>& op2) { \ - return ap_int_base<_AP_W, false>(op) REL_OP op2; \ - } \ - template \ - INLINE bool operator REL_OP( \ - const ap_int_base<_AP_W2, _AP_S2>& op2, \ - const af_range_ref<_AP_W, _AP_I, _AP_S, _AP_Q, _AP_O, _AP_N>& op) { \ - return op2 REL_OP ap_int_base<_AP_W, false>(op); \ - } \ - template \ - INLINE bool operator REL_OP( \ - const af_bit_ref<_AP_W, _AP_I, _AP_S, _AP_Q, _AP_O, _AP_N>& op, \ - const ap_int_base<_AP_W2, _AP_S2>& op2) { \ - return ap_int_base<1, false>(op) REL_OP op2; \ - } \ - template \ - INLINE bool operator REL_OP( \ - const ap_int_base<_AP_W2, _AP_S2>& op2, \ - const af_bit_ref<_AP_W, _AP_I, _AP_S, _AP_Q, _AP_O, _AP_N>& op) { \ - return op2 REL_OP ap_int_base<1, false>(op); \ - } - -AF_REF_REL_OP_WITH_AP_INT(>) -AF_REF_REL_OP_WITH_AP_INT(<) -AF_REF_REL_OP_WITH_AP_INT(>=) -AF_REF_REL_OP_WITH_AP_INT(<=) -AF_REF_REL_OP_WITH_AP_INT(==) -AF_REF_REL_OP_WITH_AP_INT(!=) - -#endif // ifndef __cplusplus - -#endif // ifndef __AP_FIXED_REF_H__ - -// -*- cpp -*- diff --git a/TrigScint/include/TrigScint/ap_fixed_special.h b/TrigScint/include/TrigScint/ap_fixed_special.h deleted file mode 100644 index 0f7a9f7eb..000000000 --- a/TrigScint/include/TrigScint/ap_fixed_special.h +++ /dev/null @@ -1,230 +0,0 @@ -/* - * Copyright 2011-2019 Xilinx, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef __AP_FIXED_SPECIAL_H__ -#define __AP_FIXED_SPECIAL_H__ - -#ifndef __AP_FIXED_H__ -#error "Only ap_fixed.h and ap_int.h can be included directly in user code." -#endif - -#ifndef __SYNTHESIS__ -#include -#include -#endif -// FIXME AP_AUTOCC cannot handle many standard headers, so declare instead of -// include. -// #include -namespace std { -template class complex; -} - -/* - TODO: Modernize the code using C++11/C++14 - 1. constexpr http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2016/p0415r0.html - 2. move constructor -*/ - -namespace std { -/* - Specialize std::complex to zero initialization ap_fixed. - - To reduce the area cost, ap_fixed is not zero initialized, just like basic - types float or double. However, libstdc++ provides specialization for float, - double and long double, initializing image part to 0 when not specified. - - This has become a difficulty in switching legacy code from these C types to - ap_fixed. To ease the tranform of legacy code, we have to implement - specialization of std::complex<> for our type. - - As ap_fixed is a template, it is impossible to specialize only the methods - that causes default initialization of value type in std::complex<>. An - explicit full specialization of the template class has to be done, covering - all the member functions and operators of std::complex<> as specified - in standard 26.2.4 and 26.2.5. -*/ -template -class complex > { - public: - typedef ap_fixed<_AP_W, _AP_I, _AP_Q, _AP_O, _AP_N> _Tp; - typedef _Tp value_type; - - // 26.2.4/1 - // Constructor without argument - // Default initialize, so that in dataflow, the variable is only written once. - complex() : _M_real(_Tp()), _M_imag(_Tp()) {} - // Constructor with ap_fixed. - // Zero initialize image part when not specified, so that `C(1) == C(1,0)` - complex(const _Tp &__r, const _Tp &__i = _Tp(0)) - : _M_real(__r), _M_imag(__i) {} - - // Constructor with another complex number - template - complex(const complex<_Up> &__z) : _M_real(__z.real()), _M_imag(__z.imag()) {} - -#if __cplusplus >= 201103L - const _Tp& real() const { return _M_real; } - const _Tp& imag() const { return _M_imag; } -#else - _Tp& real() { return _M_real; } - const _Tp& real() const { return _M_real; } - _Tp& imag() { return _M_imag; } - const _Tp& imag() const { return _M_imag; } -#endif - - void real(_Tp __val) { _M_real = __val; } - - void imag(_Tp __val) { _M_imag = __val; } - - // Assign this complex number with ap_fixed. - // Zero initialize image poarrt, so that `C c; c = 1; c == C(1,0);` - complex<_Tp> &operator=(const _Tp __t) { - _M_real = __t; - _M_imag = _Tp(0); - return *this; - } - - // 26.2.5/1 - // Add ap_fixed to this complex number. - complex<_Tp> &operator+=(const _Tp &__t) { - _M_real += __t; - return *this; - } - - // 26.2.5/3 - // Subtract ap_fixed from this complex number. - complex<_Tp> &operator-=(const _Tp &__t) { - _M_real -= __t; - return *this; - } - - // 26.2.5/5 - // Multiply this complex number by ap_fixed. - complex<_Tp> &operator*=(const _Tp &__t) { - _M_real *= __t; - _M_imag *= __t; - return *this; - } - - // 26.2.5/7 - // Divide this complex number by ap_fixed. - complex<_Tp> &operator/=(const _Tp &__t) { - _M_real /= __t; - _M_imag /= __t; - return *this; - } - - // Assign complex number to this complex number. - template - complex<_Tp> &operator=(const complex<_Up> &__z) { - _M_real = __z.real(); - _M_imag = __z.imag(); - return *this; - } - - // 26.2.5/9 - // Add complex number to this. - template - complex<_Tp> &operator+=(const complex<_Up> &__z) { - _M_real += __z.real(); - _M_imag += __z.imag(); - return *this; - } - - // 26.2.5/11 - // Subtract complex number from this. - template - complex<_Tp> &operator-=(const complex<_Up> &__z) { - _M_real -= __z.real(); - _M_imag -= __z.imag(); - return *this; - } - - // 26.2.5/13 - // Multiply this by complex number. - template - complex<_Tp> &operator*=(const complex<_Up> &__z) { - const _Tp __r = _M_real * __z.real() - _M_imag * __z.imag(); - _M_imag = _M_real * __z.imag() + _M_imag * __z.real(); - _M_real = __r; - return *this; - } - - // 26.2.5/15 - // Divide this by complex number. - template - complex<_Tp> &operator/=(const complex<_Up> &__z) { - complex<_Tp> cj (__z.real(), -__z.imag()); - complex<_Tp> a = (*this) * cj; - complex<_Tp> b = cj * __z; - _M_real = a.real() / b.real(); - _M_imag = a.imag() / b.real(); - return *this; - } - - private: - _Tp _M_real; - _Tp _M_imag; - -}; // class complex > - -/* - Non-member operations - These operations are not required by standard in 26.2.6, but libstdc++ - defines them for - float, double or long double's specialization. -*/ -// Compare complex number with ap_fixed. -template -inline bool operator==( - const complex > &__x, - const ap_fixed<_AP_W, _AP_I, _AP_Q, _AP_O, _AP_N> &__y) { - return __x.real() == __y && - __x.imag() == 0; -} - -// Compare ap_fixed with complex number. -template -inline bool operator==( - const ap_fixed<_AP_W, _AP_I, _AP_Q, _AP_O, _AP_N> &__x, - const complex > &__y) { - return __x == __y.real() && - 0 == __y.imag(); -} - -// Compare complex number with ap_fixed. -template -inline bool operator!=( - const complex > &__x, - const ap_fixed<_AP_W, _AP_I, _AP_Q, _AP_O, _AP_N> &__y) { - return __x.real() != __y || - __x.imag() != 0; -} - -// Compare ap_fixed with complex number. -template -inline bool operator!=( - const ap_fixed<_AP_W, _AP_I, _AP_Q, _AP_O, _AP_N> &__x, - const complex > &__y) { - return __x != __y.real() || - 0 != __y.imag(); -} - -} // namespace std - -#endif // ifndef __AP_FIXED_SPECIAL_H__ - -// -*- cpp -*- diff --git a/TrigScint/include/TrigScint/ap_int.h b/TrigScint/include/TrigScint/ap_int.h deleted file mode 100644 index fbdc9c413..000000000 --- a/TrigScint/include/TrigScint/ap_int.h +++ /dev/null @@ -1,330 +0,0 @@ -/* - * Copyright 2011-2019 Xilinx, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef __AP_INT_H__ -#define __AP_INT_H__ - -#include -#include -#include - -//--------------------------------------------------------------- - -/// Sign Arbitrary Precision Type. -template -struct ap_int : ap_int_base<_AP_W, true> { - typedef ap_int_base<_AP_W, true> Base; - // Constructor - INLINE ap_int() : Base() {} - - // Copy ctor - INLINE ap_int(const ap_int& op) { Base::V = op.V; } - - template - INLINE ap_int(const ap_int<_AP_W2>& op) { - Base::V = op.V; - } - - template - INLINE ap_int(const volatile ap_int<_AP_W2>& op) { - Base::V = op.V; - } - - template - INLINE ap_int(const ap_uint<_AP_W2>& op) { - Base::V = op.V; - } - - template - INLINE ap_int(const volatile ap_uint<_AP_W2>& op) { - Base::V = op.V; - } - - template - INLINE ap_int(const ap_range_ref<_AP_W2, _AP_S2>& ref) : Base(ref) {} - - template - INLINE ap_int(const ap_bit_ref<_AP_W2, _AP_S2>& ref) : Base(ref) {} - - template - INLINE ap_int(const ap_concat_ref<_AP_W2, _AP_T2, _AP_W3, _AP_T3>& ref) - : Base(ref) {} - - template - INLINE ap_int(const ap_fixed<_AP_W2, _AP_I2, _AP_Q2, _AP_O2, _AP_N2>& op) - : Base((ap_fixed_base<_AP_W2, _AP_I2, true, _AP_Q2, _AP_O2, _AP_N2>)op) {} - - template - INLINE ap_int(const ap_ufixed<_AP_W2, _AP_I2, _AP_Q2, _AP_O2, _AP_N2>& op) - : Base((ap_fixed_base<_AP_W2, _AP_I2, false, _AP_Q2, _AP_O2, _AP_N2>)op) { - } - - template - INLINE ap_int( - const volatile ap_fixed<_AP_W2, _AP_I2, _AP_Q2, _AP_O2, _AP_N2>& op) - : Base((ap_fixed_base<_AP_W2, _AP_I2, true, _AP_Q2, _AP_O2, _AP_N2>)op) {} - - template - INLINE ap_int( - const volatile ap_ufixed<_AP_W2, _AP_I2, _AP_Q2, _AP_O2, _AP_N2>& op) - : Base((ap_fixed_base<_AP_W2, _AP_I2, false, _AP_Q2, _AP_O2, _AP_N2>)op) { - } - - template - INLINE ap_int(const ap_int_base<_AP_W2, _AP_S2>& op) { - Base::V = op.V; - } - - template - INLINE ap_int( - const af_bit_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2>& op) - : Base(op) {} - - template - INLINE ap_int( - const af_range_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2>& op) - : Base(op) {} - - template - INLINE ap_int( - const ap_fixed_base<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2>& op) - : Base(op) {} - -#define CTOR(TYPE) \ - INLINE ap_int(TYPE val) { Base::V = val; } - CTOR(bool) - CTOR(char) - CTOR(signed char) - CTOR(unsigned char) - CTOR(short) - CTOR(unsigned short) - CTOR(int) - CTOR(unsigned int) - CTOR(long) - CTOR(unsigned long) - CTOR(ap_slong) - CTOR(ap_ulong) -#undef CTOR - ap_int(double val) : Base(val) {} - ap_int(float val) : Base(val) {} -#if _AP_ENABLE_HALF_ == 1 - ap_int(half val) : Base(val) {} -#endif - - // ap_int_base will guess radix if radix is not provided. - INLINE ap_int(const char* s) : Base(s) {} - - INLINE ap_int(const char* s, signed char rd) : Base(s, rd) {} - - // Assignment - /* ctor will be used when right is not of proper type. */ - - INLINE ap_int& operator=(const ap_int<_AP_W>& op2) { - Base::V = op2.V; - return *this; - } - - /* cannot bind volatile reference to non-volatile type. */ - INLINE ap_int& operator=(const volatile ap_int<_AP_W>& op2) { - Base::V = op2.V; - return *this; - } - - /* cannot return volatile *this. */ - INLINE void operator=(const ap_int<_AP_W>& op2) volatile { Base::V = op2.V; } - - INLINE void operator=(const volatile ap_int<_AP_W>& op2) volatile { - Base::V = op2.V; - } - -}; // struct ap_int. - -//--------------------------------------------------------------- - -/// Unsigned Arbitrary Precision Type. -template -struct ap_uint : ap_int_base<_AP_W, false> { - typedef ap_int_base<_AP_W, false> Base; - // Constructor - INLINE ap_uint() : Base() {} - - // Copy ctor - INLINE ap_uint(const ap_uint& op) { Base::V = op.V; } - - template - INLINE ap_uint(const ap_uint<_AP_W2>& op) { - Base::V = op.V; - } - - template - INLINE ap_uint(const ap_int<_AP_W2>& op) { - Base::V = op.V; - } - - template - INLINE ap_uint(const volatile ap_uint<_AP_W2>& op) { - Base::V = op.V; - } - - template - INLINE ap_uint(const volatile ap_int<_AP_W2>& op) { - Base::V = op.V; - } - - template - INLINE ap_uint(const ap_range_ref<_AP_W2, _AP_S2>& ref) : Base(ref) {} - - template - INLINE ap_uint(const ap_bit_ref<_AP_W2, _AP_S2>& ref) : Base(ref) {} - - template - INLINE ap_uint(const ap_concat_ref<_AP_W2, _AP_T2, _AP_W3, _AP_T3>& ref) - : Base(ref) {} - - template - INLINE ap_uint(const ap_fixed<_AP_W2, _AP_I2, _AP_Q2, _AP_O2, _AP_N2>& op) - : Base((ap_fixed_base<_AP_W2, _AP_I2, true, _AP_Q2, _AP_O2, _AP_N2>)op) {} - - template - INLINE ap_uint(const ap_ufixed<_AP_W2, _AP_I2, _AP_Q2, _AP_O2, _AP_N2>& op) - : Base((ap_fixed_base<_AP_W2, _AP_I2, false, _AP_Q2, _AP_O2, _AP_N2>)op) { - } - - template - INLINE ap_uint( - const volatile ap_fixed<_AP_W2, _AP_I2, _AP_Q2, _AP_O2, _AP_N2>& op) - : Base((ap_fixed_base<_AP_W2, _AP_I2, true, _AP_Q2, _AP_O2, _AP_N2>)op) {} - - template - INLINE ap_uint( - const volatile ap_ufixed<_AP_W2, _AP_I2, _AP_Q2, _AP_O2, _AP_N2>& op) - : Base((ap_fixed_base<_AP_W2, _AP_I2, false, _AP_Q2, _AP_O2, _AP_N2>)op) { - } - - template - INLINE ap_uint(const ap_int_base<_AP_W2, _AP_S2>& op) { - Base::V = op.V; - } - - template - INLINE ap_uint( - const af_bit_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2>& op) - : Base(op) {} - - template - INLINE ap_uint( - const af_range_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2>& op) - : Base(op) {} - - template - INLINE ap_uint( - const ap_fixed_base<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2>& op) - : Base(op) {} - -#define CTOR(TYPE) \ - INLINE ap_uint(TYPE val) { Base::V = val; } - CTOR(bool) - CTOR(char) - CTOR(signed char) - CTOR(unsigned char) - CTOR(short) - CTOR(unsigned short) - CTOR(int) - CTOR(unsigned int) - CTOR(long) - CTOR(unsigned long) - CTOR(ap_slong) - CTOR(ap_ulong) -#undef CTOR - ap_uint(double val) : Base(val) {} - ap_uint(float val) : Base(val) {} -#if _AP_ENABLE_HALF_ == 1 - ap_uint(half val) : Base(val) {} -#endif - - // ap_int_base will guess radix if radix is not provided. - INLINE ap_uint(const char* s) : Base(s) {} - - INLINE ap_uint(const char* s, signed char rd) : Base(s, rd) {} - - // Assignment - /* XXX ctor will be used when right is not of proper type. */ - - INLINE ap_uint& operator=(const ap_uint<_AP_W>& op2) { - Base::V = op2.V; - return *this; - } - - /* cannot bind volatile reference to non-volatile type. */ - INLINE ap_uint& operator=(const volatile ap_uint<_AP_W>& op2) { - Base::V = op2.V; - return *this; - } - - /* cannot return volatile *this. */ - INLINE void operator=(const ap_uint<_AP_W>& op2) volatile { Base::V = op2.V; } - - INLINE void operator=(const volatile ap_uint<_AP_W>& op2) volatile { - Base::V = op2.V; - } - -}; // struct ap_uint. - -#define ap_bigint ap_int -#define ap_biguint ap_uint - -#if !defined(__SYNTHESIS__) && (defined(SYSTEMC_H) || defined(SYSTEMC_INCLUDED)) -// XXX sc_trace overload for ap_fixed is already included in -// "ap_sysc/ap_sc_extras.h", so do not define in synthesis. -template -INLINE void sc_trace(sc_core::sc_trace_file* tf, const ap_int<_AP_W>& op, - const std::string& name) { - if (tf) tf->trace(sc_dt::sc_lv<_AP_W>(op.to_string(2).c_str()), name); -} - -template -INLINE void sc_trace(sc_core::sc_trace_file* tf, const ap_uint<_AP_W>& op, - const std::string& name) { - if (tf) tf->trace(sc_dt::sc_lv<_AP_W>(op.to_string(2).c_str()), name); -} -#endif // System C sim - -#include - -#endif // ifndef __AP_INT_H__ else - -// FIXME user should include ap_fixed.h when using ap_fixed. -// to avoid circular inclusion, must check whether this is required by -// ap_fixed.h -#ifndef __AP_FIXED_H__ -#include -#endif - -// -*- cpp -*- diff --git a/TrigScint/include/TrigScint/ap_int_base.h b/TrigScint/include/TrigScint/ap_int_base.h deleted file mode 100644 index 795d20717..000000000 --- a/TrigScint/include/TrigScint/ap_int_base.h +++ /dev/null @@ -1,1885 +0,0 @@ -/* - * Copyright 2011-2019 Xilinx, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef __AP_INT_BASE_H__ -#define __AP_INT_BASE_H__ - -#ifndef __AP_INT_H__ -#error "Only ap_fixed.h and ap_int.h can be included directly in user code." -#endif - -#ifndef __cplusplus -#error "C++ is required to include this header file" -#else - -#include -#ifndef __SYNTHESIS__ -#if _AP_ENABLE_HALF_ == 1 -#include -#endif -#include -#include -#endif - -/* ---------------------------------------------------------------- - * ap_int_base: AutoPilot integer/Arbitrary precision integer. - * ---------------------------------------------------------------- - */ - -/* helper trait. Selecting the smallest C type that can hold the value, - * return 64 bit C type if not possible. - */ -template -struct retval; - -// at least 64 bit -template -struct retval<_AP_N, true> { - typedef ap_slong Type; -}; - -template -struct retval<_AP_N, false> { - typedef ap_ulong Type; -}; - -// at least 8 bit -template <> -struct retval<1, true> { - typedef signed char Type; -}; - -template <> -struct retval<1, false> { - typedef unsigned char Type; -}; - -// at least 16 bit -template <> -struct retval<2, true> { - typedef short Type; -}; - -template <> -struct retval<2, false> { - typedef unsigned short Type; -}; - -// at least 32 bit -template <> -struct retval<3, true> { - typedef long Type; -}; - -template <> -struct retval<3, false> { - typedef unsigned long Type; -}; - -template <> -struct retval<4, true> { - typedef long Type; -}; - -template <> -struct retval<4, false> { - typedef unsigned long Type; -}; - -// trait for letting base class to return derived class. -// Notice that derived class template is incomplete, and we cannot use -// the member of the derived class. -template -struct _ap_int_factory; -template -struct _ap_int_factory<_AP_W2,true> { typedef ap_int<_AP_W2> type; }; -template -struct _ap_int_factory<_AP_W2,false> { typedef ap_uint<_AP_W2> type; }; - -template -struct ap_int_base : public _AP_ROOT_TYPE<_AP_W, _AP_S> { - public: - typedef _AP_ROOT_TYPE<_AP_W, _AP_S> Base; - - /* ap_int_base<_AP_W, _AP_S, true> - * typedef typename retval<(_AP_W + 7) / 8, _AP_S>::Type RetType; - * - * ap_int_base<_AP_W, _AP_S, false> - * typedef typename retval<8, _AP_S>::Type RetType; - */ - typedef typename retval::Type RetType; - - static const int width = _AP_W; - - template - struct RType { - enum { - mult_w = _AP_W + _AP_W2, - mult_s = _AP_S || _AP_S2, - plus_w = - AP_MAX(_AP_W + (_AP_S2 && !_AP_S), _AP_W2 + (_AP_S && !_AP_S2)) + 1, - plus_s = _AP_S || _AP_S2, - minus_w = - AP_MAX(_AP_W + (_AP_S2 && !_AP_S), _AP_W2 + (_AP_S && !_AP_S2)) + 1, - minus_s = true, - div_w = _AP_W + _AP_S2, - div_s = _AP_S || _AP_S2, - mod_w = AP_MIN(_AP_W, _AP_W2 + (!_AP_S2 && _AP_S)), - mod_s = _AP_S, - logic_w = AP_MAX(_AP_W + (_AP_S2 && !_AP_S), _AP_W2 + (_AP_S && !_AP_S2)), - logic_s = _AP_S || _AP_S2 - }; - - - typedef ap_int_base mult_base; - typedef ap_int_base plus_base; - typedef ap_int_base minus_base; - typedef ap_int_base logic_base; - typedef ap_int_base div_base; - typedef ap_int_base mod_base; - typedef ap_int_base<_AP_W, _AP_S> arg1_base; - - typedef typename _ap_int_factory::type mult; - typedef typename _ap_int_factory::type plus; - typedef typename _ap_int_factory::type minus; - typedef typename _ap_int_factory::type logic; - typedef typename _ap_int_factory::type div; - typedef typename _ap_int_factory::type mod; - typedef typename _ap_int_factory<_AP_W, _AP_S>::type arg1; - typedef bool reduce; - }; - - /* Constructors. - * ---------------------------------------------------------------- - */ - /// default ctor - INLINE ap_int_base() { - /* - #ifdef __SC_COMPATIBLE__ - Base::V = 0; - #endif - */ - } - - /// copy ctor - template - INLINE ap_int_base(const ap_int_base<_AP_W2, _AP_S2>& op) { - Base::V = op.V; - } - - /// volatile copy ctor - template - INLINE ap_int_base(const volatile ap_int_base<_AP_W2, _AP_S2>& op) { - Base::V = op.V; - } - -// XXX C++11 feature. -// The explicit specifier specifies that a constructor or conversion function -// (since C++11) doesn't allow implicit conversions or copy-initialization. -// ap_int_base x = 1; -// ap_int_base foo() { return 1; } -// but allows -// ap_int_base x(1); -// ap_int_base y {1}; - -/// from all c types. -#define CTOR_FROM_INT(Type, Size, Signed) \ - INLINE ap_int_base(const Type op) { Base::V = op; } - - CTOR_FROM_INT(bool, 1, false) - CTOR_FROM_INT(char, 8, CHAR_IS_SIGNED) - CTOR_FROM_INT(signed char, 8, true) - CTOR_FROM_INT(unsigned char, 8, false) - CTOR_FROM_INT(short, _AP_SIZE_short, true) - CTOR_FROM_INT(unsigned short, _AP_SIZE_short, false) - CTOR_FROM_INT(int, _AP_SIZE_int, true) - CTOR_FROM_INT(unsigned int, _AP_SIZE_int, false) - CTOR_FROM_INT(long, _AP_SIZE_long, true) - CTOR_FROM_INT(unsigned long, _AP_SIZE_long, false) - CTOR_FROM_INT(ap_slong, _AP_SIZE_ap_slong, true) - CTOR_FROM_INT(ap_ulong, _AP_SIZE_ap_slong, false) -#undef CTOR_FROM_INT - -#if _AP_ENABLE_HALF_ == 1 - /// ctor from half. - // TODO optimize - INLINE ap_int_base(half op) { - ap_int_base<_AP_W, _AP_S> t((float)op); - Base::V = t.V; - } -#endif - - /// ctor from float. - INLINE ap_int_base(float op) { - const int BITS = FLOAT_MAN + FLOAT_EXP + 1; - ap_int_base reg; - reg.V = floatToRawBits(op); - bool is_neg = _AP_ROOT_op_get_bit(reg.V, BITS - 1); - - ap_int_base exp = 0; - exp.V = _AP_ROOT_op_get_range(reg.V, FLOAT_MAN, BITS - 2); - exp = exp - FLOAT_BIAS; - - ap_int_base man; - man.V = _AP_ROOT_op_get_range(reg.V, 0, FLOAT_MAN - 1); - // check for NaN - _AP_WARNING(exp == ((unsigned char)(FLOAT_BIAS + 1)) && man.V != 0, - "assign NaN to ap integer value"); - // set leading 1. - man.V = _AP_ROOT_op_set_bit(man.V, FLOAT_MAN, 1); - //if (is_neg) man = -man; - - if ((reg.V & 0x7ffffffful) == 0) { - Base::V = 0; - } else { - int sh_amt = FLOAT_MAN - exp.V; - if (sh_amt == 0) { - Base::V = man.V; - } else if (sh_amt > 0) { - if (sh_amt < FLOAT_MAN + 2) { - Base::V = man.V >> sh_amt; - } else { - if (is_neg) - Base::V = -1; - else - Base::V = 0; - } - } else { - sh_amt = -sh_amt; - if (sh_amt < _AP_W) { - Base::V = man.V; - Base::V <<= sh_amt; - } else { - Base::V = 0; - } - } - } - if (is_neg) *this = -(*this); - } - - /// ctor from double. - INLINE ap_int_base(double op) { - const int BITS = DOUBLE_MAN + DOUBLE_EXP + 1; - ap_int_base reg; - reg.V = doubleToRawBits(op); - bool is_neg = _AP_ROOT_op_get_bit(reg.V, BITS - 1); - - ap_int_base exp = 0; - exp.V = _AP_ROOT_op_get_range(reg.V, DOUBLE_MAN, BITS - 2); - exp = exp - DOUBLE_BIAS; - - ap_int_base man; - man.V = _AP_ROOT_op_get_range(reg.V, 0, DOUBLE_MAN - 1); - // check for NaN - _AP_WARNING(exp == ((unsigned char)(DOUBLE_BIAS + 1)) && man.V != 0, - "assign NaN to ap integer value"); - // set leading 1. - man.V = _AP_ROOT_op_set_bit(man.V, DOUBLE_MAN, 1); - //if (is_neg) man = -man; - - if ((reg.V & 0x7fffffffffffffffull) == 0) { - Base::V = 0; - } else { - int sh_amt = DOUBLE_MAN - exp.V; - if (sh_amt == 0) { - Base::V = man.V; - } else if (sh_amt > 0) { - if (sh_amt < DOUBLE_MAN + 2) { - Base::V = man.V >> sh_amt; - } else { - if (is_neg) - Base::V = -1; - else - Base::V = 0; - } - } else { - sh_amt = -sh_amt; - if (sh_amt < _AP_W) { - Base::V = man.V; - Base::V <<= sh_amt; - } else { - Base::V = 0; - } - } - } - if (is_neg) *this = -(*this); - } - - /// from higer rank type. - template - INLINE ap_int_base( - const ap_fixed_base<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2>& op) { - Base::V = op.to_ap_int_base().V; - } - - template - INLINE ap_int_base(const ap_range_ref<_AP_W2, _AP_S2>& ref) { - Base::V = (ref.get()).V; - } - - template - INLINE ap_int_base(const ap_bit_ref<_AP_W2, _AP_S2>& ref) { - Base::V = ref.operator bool(); - } - - template - INLINE ap_int_base(const ap_concat_ref<_AP_W2, _AP_T2, _AP_W3, _AP_T3>& ref) { - const ap_int_base::_AP_WR, - false> - tmp = ref.get(); - Base::V = tmp.V; - } - - /* radix has default value in set */ - -#ifndef __SYNTHESIS__ - INLINE ap_int_base(const char* s, signed char rd = 0) { - if (rd == 0) - rd = guess_radix(s); - unsigned int length = strlen(s); - Base::V.fromString(s, length, rd); - } -#else - // XXX __builtin_bit_from_string(...) requires const C string and radix. - INLINE ap_int_base(const char* s) { - typeof(Base::V) t; - _ssdm_string2bits((void*)(&t), (const char*)(s), 10, _AP_W, _AP_S, - AP_TRN, AP_WRAP, 0, _AP_C99); - Base::V = t; - } - INLINE ap_int_base(const char* s, signed char rd) { - typeof(Base::V) t; - _ssdm_string2bits((void*)(&t), (const char*)(s), rd, _AP_W, _AP_S, - AP_TRN, AP_WRAP, 0, _AP_C99); - Base::V = t; - } -#endif - - template - INLINE ap_int_base( - const af_range_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2>& val) { - Base::V = (val.get()).V; - } - - template - INLINE ap_int_base( - const af_bit_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2>& val) { - Base::V = val.operator bool(); - } - - INLINE ap_int_base read() volatile { - /*AP_DEBUG(printf("call read %d\n", Base::V););*/ - ap_int_base ret; - ret.V = Base::V; - return ret; - } - - INLINE void write(const ap_int_base<_AP_W, _AP_S>& op2) volatile { - /*AP_DEBUG(printf("call write %d\n", op2.V););*/ - Base::V = op2.V; - } - - /* Another form of "write".*/ - template - INLINE void operator=( - const volatile ap_int_base<_AP_W2, _AP_S2>& op2) volatile { - Base::V = op2.V; - } - - INLINE void operator=( - const volatile ap_int_base<_AP_W, _AP_S>& op2) volatile { - Base::V = op2.V; - } - - template - INLINE void operator=(const ap_int_base<_AP_W2, _AP_S2>& op2) volatile { - Base::V = op2.V; - } - - INLINE void operator=(const ap_int_base<_AP_W, _AP_S>& op2) volatile { - Base::V = op2.V; - } - - template - INLINE ap_int_base& operator=( - const volatile ap_int_base<_AP_W2, _AP_S2>& op2) { - Base::V = op2.V; - return *this; - } - - template - INLINE ap_int_base& operator=(const ap_int_base<_AP_W2, _AP_S2>& op2) { - Base::V = op2.V; - return *this; - } - - INLINE ap_int_base& operator=(const volatile ap_int_base<_AP_W, _AP_S>& op2) { - Base::V = op2.V; - return *this; - } - - INLINE ap_int_base& operator=(const ap_int_base<_AP_W, _AP_S>& op2) { - Base::V = op2.V; - return *this; - } - - -#define ASSIGN_OP_FROM_INT(Type, Size, Signed) \ - INLINE ap_int_base& operator=(Type op) { \ - Base::V = op; \ - return *this; \ - } - - ASSIGN_OP_FROM_INT(bool, 1, false) - ASSIGN_OP_FROM_INT(char, 8, CHAR_IS_SIGNED) - ASSIGN_OP_FROM_INT(signed char, 8, true) - ASSIGN_OP_FROM_INT(unsigned char, 8, false) - ASSIGN_OP_FROM_INT(short, _AP_SIZE_short, true) - ASSIGN_OP_FROM_INT(unsigned short, _AP_SIZE_short, false) - ASSIGN_OP_FROM_INT(int, _AP_SIZE_int, true) - ASSIGN_OP_FROM_INT(unsigned int, _AP_SIZE_int, false) - ASSIGN_OP_FROM_INT(long, _AP_SIZE_long, true) - ASSIGN_OP_FROM_INT(unsigned long, _AP_SIZE_long, false) - ASSIGN_OP_FROM_INT(ap_slong, _AP_SIZE_ap_slong, true) - ASSIGN_OP_FROM_INT(ap_ulong, _AP_SIZE_ap_slong, false) - -#undef ASSIGN_OP_FROM_INT - - template - INLINE ap_int_base& operator=(const ap_bit_ref<_AP_W2, _AP_S2>& op2) { - Base::V = (bool)op2; - return *this; - } - - template - INLINE ap_int_base& operator=(const ap_range_ref<_AP_W2, _AP_S2>& op2) { - Base::V = (ap_int_base<_AP_W2, false>(op2)).V; - return *this; - } - - template - INLINE ap_int_base& operator=( - const ap_concat_ref<_AP_W2, _AP_T2, _AP_W3, _AP_T3>& op2) { - Base::V = op2.get().V; - return *this; - } - - template - INLINE ap_int_base& operator=( - const ap_fixed_base<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2>& op) { - Base::V = op.to_ap_int_base().V; - return *this; - } - - template - INLINE ap_int_base& operator=( - const af_bit_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2>& op) { - Base::V = (bool)op; - return *this; - } - - template - INLINE ap_int_base& operator=( - const af_range_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2>& op) { - Base::V = ((const ap_int_base<_AP_W2, false>)(op)).V; - return *this; - } - - // FIXME: UG902 has clearly required user to use to_int() to convert to built-in - // types, but this implicit conversion is relied on in hls_cordic.h and hls_rsr.h. - // For example: - // int d_exp = fps_x.exp - fps_y.exp; - INLINE operator RetType() const { return (RetType)(Base::V); } - - /* Explicit conversions to C types. - * ---------------------------------------------------------------- - */ - INLINE bool to_bool() const { return (bool)(Base::V); } - INLINE char to_char() const { return (char)(Base::V); } - INLINE signed char to_schar() const { return (signed char)(Base::V); } - INLINE unsigned char to_uchar() const { return (unsigned char)(Base::V); } - INLINE short to_short() const { return (short)(Base::V); } - INLINE unsigned short to_ushort() const { return (unsigned short)(Base::V); } - INLINE int to_int() const { return (int)(Base::V); } - INLINE unsigned to_uint() const { return (unsigned)(Base::V); } - INLINE long to_long() const { return (long)(Base::V); } - INLINE unsigned long to_ulong() const { return (unsigned long)(Base::V); } - INLINE ap_slong to_int64() const { return (ap_slong)(Base::V); } - INLINE ap_ulong to_uint64() const { return (ap_ulong)(Base::V); } - INLINE float to_float() const { return (float)(Base::V); } - INLINE double to_double() const { return (double)(Base::V); } - - // TODO decide if user-defined conversion should be provided. -#if 0 - INLINE operator char() const { return (char)(Base::V); } - INLINE operator signed char() const { return (signed char)(Base::V); } - INLINE operator unsigned char() const { return (unsigned char)(Base::V); } - INLINE operator short() const { return (short)(Base::V); } - INLINE operator unsigned short() const { return (unsigned short)(Base::V); } - INLINE operator int() const { return (int)(Base::V); } - INLINE operator unsigned int () const { return (unsigned)(Base::V); } - INLINE operator long () const { return (long)(Base::V); } - INLINE operator unsigned long () const { return (unsigned long)(Base::V); } - INLINE operator ap_slong () { return (ap_slong)(Base::V); } - INLINE operator ap_ulong () { return (ap_ulong)(Base::V); } -#endif - - /* Helper methods. - ---------------------------------------------------------------- - */ - /* we cannot call a non-volatile function on a volatile instance. - * but calling a volatile function is ok. - * XXX deleted non-volatile version. - */ - INLINE int length() const volatile { return _AP_W; } - - /*Return true if the value of ap_int_base instance is zero*/ - INLINE bool iszero() const { return Base::V == 0; } - - /*Return true if the value of ap_int_base instance is zero*/ - INLINE bool is_zero() const { return Base::V == 0; } - - /* x < 0 */ - INLINE bool sign() const { - if (_AP_S && - _AP_ROOT_op_get_bit(Base::V, _AP_W - 1)) - return true; - else - return false; - } - - /* x[i] = 0 */ - INLINE void clear(int i) { - AP_ASSERT(i >= 0 && i < _AP_W, "position out of range"); - Base::V = _AP_ROOT_op_set_bit(Base::V, i, 0); - } - - /* x[i] = !x[i]*/ - INLINE void invert(int i) { - AP_ASSERT(i >= 0 && i < _AP_W, "position out of range"); - bool val = _AP_ROOT_op_get_bit(Base::V, i); - if (val) - Base::V = _AP_ROOT_op_set_bit(Base::V, i, 0); - else - Base::V = _AP_ROOT_op_set_bit(Base::V, i, 1); - } - - INLINE bool test(int i) const { - AP_ASSERT(i >= 0 && i < _AP_W, "position out of range"); - return _AP_ROOT_op_get_bit(Base::V, i); - } - - // Get self. For ap_concat_ref expansion. - INLINE ap_int_base& get() { return *this; } - - // Set the ith bit into 1 - INLINE void set(int i) { - AP_ASSERT(i >= 0 && i < _AP_W, "position out of range"); - Base::V = _AP_ROOT_op_set_bit(Base::V, i, 1); - } - - // Set the ith bit into v - INLINE void set(int i, bool v) { - AP_ASSERT(i >= 0 && i < _AP_W, "position out of range"); - Base::V = _AP_ROOT_op_set_bit(Base::V, i, v); - } - - // This is used for sc_lv and sc_bv, which is implemented by sc_uint - // Rotate an ap_int_base object n places to the left - INLINE ap_int_base& lrotate(int n) { - AP_ASSERT(n >= 0 && n < _AP_W, "shift value out of range"); - // TODO unify this. -#ifdef __SYNTHESIS__ - typeof(Base::V) l_p = Base::V << n; - typeof(Base::V) r_p = Base::V >> (_AP_W - n); - Base::V = l_p | r_p; -#else - Base::V.lrotate(n); -#endif - return *this; - } - - // This is used for sc_lv and sc_bv, which is implemented by sc_uint - // Rotate an ap_int_base object n places to the right - INLINE ap_int_base& rrotate(int n) { - AP_ASSERT(n >= 0 && n < _AP_W, "shift value out of range"); - // TODO unify this. -#ifdef __SYNTHESIS__ - typeof(Base::V) l_p = Base::V << (_AP_W - n); - typeof(Base::V) r_p = Base::V >> n; - Base::V = l_p | r_p; -#else - Base::V.rrotate(n); -#endif - return *this; - } - - // Reverse the contents of ap_int_base instance. - // I.e. LSB becomes MSB and vise versa. - INLINE ap_int_base& reverse() { - Base::V = _AP_ROOT_op_get_range(Base::V, _AP_W - 1, 0); - return *this; - } - - // Set the ith bit into v - INLINE void set_bit(int i, bool v) { - Base::V = _AP_ROOT_op_set_bit(Base::V, i, v); - } - - // Get the value of ith bit - INLINE bool get_bit(int i) const { - return (bool)_AP_ROOT_op_get_bit(Base::V, i); - } - - // complements every bit - INLINE void b_not() { Base::V = ~Base::V; } - -#define OP_ASSIGN_AP(Sym) \ - template \ - INLINE ap_int_base& operator Sym(const ap_int_base<_AP_W2, _AP_S2>& op2) { \ - Base::V Sym op2.V; \ - return *this; \ - } - - /* Arithmetic assign. - * ---------------------------------------------------------------- - */ - OP_ASSIGN_AP(*=) - OP_ASSIGN_AP(+=) - OP_ASSIGN_AP(-=) - OP_ASSIGN_AP(/=) - OP_ASSIGN_AP(%=) -#undef OP_ASSIGN_AP - - /* Bitwise assign: and, or, xor. - * ---------------------------------------------------------------- - */ -#define OP_ASSIGN_AP_CHK(Sym) \ - template \ - INLINE ap_int_base& operator Sym(const ap_int_base<_AP_W2, _AP_S2>& op2) { \ - _AP_WARNING((_AP_W != _AP_W2), \ - "Bitsize mismatch for ap_[u]int" #Sym "ap_[u]int."); \ - Base::V Sym op2.V; \ - return *this; \ - } - OP_ASSIGN_AP_CHK(&=) - OP_ASSIGN_AP_CHK(|=) - OP_ASSIGN_AP_CHK(^=) -#undef OP_ASSIGN_AP_CHK - - /* Prefix increment, decrement. - * ---------------------------------------------------------------- - */ - INLINE ap_int_base& operator++() { - operator+=((ap_int_base<1, false>)1); - return *this; - } - INLINE ap_int_base& operator--() { - operator-=((ap_int_base<1, false>)1); - return *this; - } - - /* Postfix increment, decrement - * ---------------------------------------------------------------- - */ - INLINE const typename RType<_AP_W,_AP_S>::arg1 operator++(int) { - ap_int_base t = *this; - operator+=((ap_int_base<1, false>)1); - return t; - } - INLINE const typename RType<_AP_W,_AP_S>::arg1 operator--(int) { - ap_int_base t = *this; - operator-=((ap_int_base<1, false>)1); - return t; - } - - /* Unary arithmetic. - * ---------------------------------------------------------------- - */ - INLINE typename RType<_AP_W,_AP_S>::arg1 operator+() const { return *this; } - - // TODO used to be W>64 only... need check. - INLINE typename RType<1, false>::minus operator-() const { - return ap_int_base<1, false>(0) - *this; - } - - /* Not (!) - * ---------------------------------------------------------------- - */ - INLINE bool operator!() const { return Base::V == 0; } - - /* Bitwise (arithmetic) unary: complement - ---------------------------------------------------------------- - */ - // XXX different from Mentor's ac_int! - INLINE typename RType<_AP_W,_AP_S>::arg1 operator~() const { - ap_int_base<_AP_W, _AP_S> r; - r.V = ~Base::V; - return r; - } - - /* Shift (result constrained by left operand). - * ---------------------------------------------------------------- - */ - template - INLINE typename RType<_AP_W,_AP_S>::arg1 operator<<(const ap_int_base<_AP_W2, true>& op2) const { - bool isNeg = _AP_ROOT_op_get_bit(op2.V, _AP_W2 - 1); - ap_int_base<_AP_W2, false> sh = op2; - if (isNeg) { - sh = -op2; - return operator>>(sh); - } else - return operator<<(sh); - } - - template - INLINE typename RType<_AP_W,_AP_S>::arg1 operator<<(const ap_int_base<_AP_W2, false>& op2) const { - ap_int_base r; - r.V = Base::V << op2.to_uint(); - return r; - } - - template - INLINE typename RType<_AP_W,_AP_S>::arg1 operator>>(const ap_int_base<_AP_W2, true>& op2) const { - bool isNeg = _AP_ROOT_op_get_bit(op2.V, _AP_W2 - 1); - ap_int_base<_AP_W2, false> sh = op2; - if (isNeg) { - sh = -op2; - return operator<<(sh); - } - return operator>>(sh); - } - - template - INLINE typename RType<_AP_W,_AP_S>::arg1 operator>>(const ap_int_base<_AP_W2, false>& op2) const { - ap_int_base r; - r.V = Base::V >> op2.to_uint(); - return r; - } - - // FIXME we standalone operator>> for ap_int_base and ap_range_ref. -#if 0 - template - INLINE ap_int_base operator<<(const ap_range_ref<_AP_W2, _AP_S2>& op2) const { - return *this << (op2.operator ap_int_base<_AP_W2, false>()); - } - - template - INLINE ap_int_base operator>>(const ap_range_ref<_AP_W2, _AP_S2>& op2) const { - return *this >> (op2.operator ap_int_base<_AP_W2, false>()); - } -#endif - - /* Shift assign - * ---------------------------------------------------------------- - */ - template - INLINE ap_int_base& operator<<=(const ap_int_base<_AP_W2, true>& op2) { - bool isNeg = _AP_ROOT_op_get_bit(op2.V, _AP_W2 - 1); - ap_int_base<_AP_W2, false> sh = op2; - if (isNeg) { - sh = -op2; - return operator>>=(sh); - } else - return operator<<=(sh); - } - - template - INLINE ap_int_base& operator<<=(const ap_int_base<_AP_W2, false>& op2) { - Base::V <<= op2.to_uint(); - return *this; - } - - template - INLINE ap_int_base& operator>>=(const ap_int_base<_AP_W2, true>& op2) { - bool isNeg = _AP_ROOT_op_get_bit(op2.V, _AP_W2 - 1); - ap_int_base<_AP_W2, false> sh = op2; - if (isNeg) { - sh = -op2; - return operator<<=(sh); - } - return operator>>=(sh); - } - - template - INLINE ap_int_base& operator>>=(const ap_int_base<_AP_W2, false>& op2) { - Base::V >>= op2.to_uint(); - return *this; - } - - // FIXME we standalone operator>> for ap_int_base and ap_range_ref. -#if 0 - template - INLINE ap_int_base& operator<<=(const ap_range_ref<_AP_W2, _AP_S2>& op2) { - return *this <<= (op2.operator ap_int_base<_AP_W2, false>()); - } - template - INLINE ap_int_base& operator>>=(const ap_range_ref<_AP_W2, _AP_S2>& op2) { - return *this >>= (op2.operator ap_int_base<_AP_W2, false>()); - } -#endif - - /* Equality and Relational. - * ---------------------------------------------------------------- - */ - template - INLINE bool operator==(const ap_int_base<_AP_W2, _AP_S2>& op2) const { - return Base::V == op2.V; - } - template - INLINE bool operator!=(const ap_int_base<_AP_W2, _AP_S2>& op2) const { - return !(Base::V == op2.V); - } - template - INLINE bool operator<(const ap_int_base<_AP_W2, _AP_S2>& op2) const { - return Base::V < op2.V; - } - template - INLINE bool operator>=(const ap_int_base<_AP_W2, _AP_S2>& op2) const { - return Base::V >= op2.V; - } - template - INLINE bool operator>(const ap_int_base<_AP_W2, _AP_S2>& op2) const { - return Base::V > op2.V; - } - template - INLINE bool operator<=(const ap_int_base<_AP_W2, _AP_S2>& op2) const { - return Base::V <= op2.V; - } - - /* Bit and Part Select - * ---------------------------------------------------------------- - */ - INLINE ap_range_ref<_AP_W, _AP_S> range(int Hi, int Lo) { - _AP_ERROR(Hi >= _AP_W, "Hi(%d)out of bound(%d) in range()", Hi, _AP_W); - _AP_ERROR(Lo >= _AP_W, "Lo(%d)out of bound(%d) in range()", Lo, _AP_W); - return ap_range_ref<_AP_W, _AP_S>(this, Hi, Lo); - } - - // This is a must to strip constness to produce reference type. - INLINE ap_range_ref<_AP_W, _AP_S> range(int Hi, int Lo) const { - _AP_ERROR(Hi >= _AP_W, "Hi(%d)out of bound(%d) in range()", Hi, _AP_W); - _AP_ERROR(Lo >= _AP_W, "Lo(%d)out of bound(%d) in range()", Lo, _AP_W); - return ap_range_ref<_AP_W, _AP_S>(const_cast(this), Hi, Lo); - } - - template - INLINE ap_range_ref<_AP_W, _AP_S> range( - const ap_int_base<_AP_W2, _AP_S2>& HiIdx, - const ap_int_base<_AP_W3, _AP_S3>& LoIdx) { - int Hi = HiIdx.to_int(); - int Lo = LoIdx.to_int(); - return this->range(Hi, Lo); - } - - template - INLINE ap_range_ref<_AP_W, _AP_S> range( - const ap_int_base<_AP_W2, _AP_S2>& HiIdx, - const ap_int_base<_AP_W3, _AP_S3>& LoIdx) const { - int Hi = HiIdx.to_int(); - int Lo = LoIdx.to_int(); - return this->range(Hi, Lo); - } - - INLINE ap_range_ref<_AP_W, _AP_S> range() { - return this->range(_AP_W - 1, 0); - } - - INLINE ap_range_ref<_AP_W, _AP_S> range() const { - return this->range(_AP_W - 1, 0); - } - - INLINE ap_range_ref<_AP_W, _AP_S> operator()(int Hi, int Lo) { - return this->range(Hi, Lo); - } - - INLINE ap_range_ref<_AP_W, _AP_S> operator()(int Hi, int Lo) const { - return this->range(Hi, Lo); - } - - template - INLINE ap_range_ref<_AP_W, _AP_S> operator()( - const ap_int_base<_AP_W2, _AP_S2>& HiIdx, - const ap_int_base<_AP_W3, _AP_S3>& LoIdx) { - int Hi = HiIdx.to_int(); - int Lo = LoIdx.to_int(); - return this->range(Hi, Lo); - } - - template - INLINE ap_range_ref<_AP_W, _AP_S> operator()( - const ap_int_base<_AP_W2, _AP_S2>& HiIdx, - const ap_int_base<_AP_W3, _AP_S3>& LoIdx) const { - int Hi = HiIdx.to_int(); - int Lo = LoIdx.to_int(); - return this->range(Hi, Lo); - } - -#if 0 - template - INLINE ap_int_base slice() const { - AP_ASSERT(Hi >= Lo && Hi < _AP_W && Lo < _AP_W, "Out of bounds in slice()"); - ap_int_base tmp ; - tmp.V = _AP_ROOT_op_get_range(Base::V, Lo, Hi); - return tmp; - } - - INLINE ap_bit_ref<_AP_W,_AP_S> operator [] ( unsigned int uindex) { - AP_ASSERT(uindex < _AP_W, "Attempting to read bit beyond MSB"); - ap_bit_ref<_AP_W,_AP_S> bvh( this, uindex ); - return bvh; - } -#endif - - INLINE ap_bit_ref<_AP_W, _AP_S> operator[](int index) { - AP_ASSERT(index >= 0, "Attempting to read bit with negative index"); - AP_ASSERT(index < _AP_W, "Attempting to read bit beyond MSB"); - ap_bit_ref<_AP_W, _AP_S> bvh(this, index); - return bvh; - } - - template - INLINE ap_bit_ref<_AP_W, _AP_S> operator[]( - const ap_int_base<_AP_W2, _AP_S2>& index) { - AP_ASSERT(index >= 0, "Attempting to read bit with negative index"); - AP_ASSERT(index < _AP_W, "Attempting to read bit beyond MSB"); - ap_bit_ref<_AP_W, _AP_S> bvh(this, index.to_int()); - return bvh; - } - - INLINE bool operator[](int index) const { - AP_ASSERT(index >= 0, "Attempting to read bit with negative index"); - AP_ASSERT(index < _AP_W, "Attempting to read bit beyond MSB"); - ap_bit_ref<_AP_W, _AP_S> br(this, index); - return br.to_bool(); - } - template - INLINE bool operator[](const ap_int_base<_AP_W2, _AP_S2>& index) const { - AP_ASSERT(index < _AP_W, "Attempting to read bit beyond MSB"); - ap_bit_ref<_AP_W, _AP_S> br(this, index.to_int()); - return br.to_bool(); - } - - INLINE ap_bit_ref<_AP_W, _AP_S> bit(int index) { - AP_ASSERT(index >= 0, "Attempting to read bit with negative index"); - AP_ASSERT(index < _AP_W, "Attempting to read bit beyond MSB"); - ap_bit_ref<_AP_W, _AP_S> bvh(this, index); - return bvh; - } - template - INLINE ap_bit_ref<_AP_W, _AP_S> bit( - const ap_int_base<_AP_W2, _AP_S2>& index) { - AP_ASSERT(index >= 0, "Attempting to read bit with negative index"); - AP_ASSERT(index < _AP_W, "Attempting to read bit beyond MSB"); - ap_bit_ref<_AP_W, _AP_S> bvh(this, index.to_int()); - return bvh; - } - - INLINE bool bit(int index) const { - AP_ASSERT(index >= 0, "Attempting to read bit with negative index"); - AP_ASSERT(index < _AP_W, "Attempting to read bit beyond MSB"); - ap_bit_ref<_AP_W, _AP_S> br(this, index); - return br.to_bool(); - } - - template - INLINE bool bit(const ap_int_base<_AP_W2, _AP_S2>& index) const { - return bit(index.to_int()); - } - -#if 0 - template - INLINE bool operator[](_AP_T index) const { - AP_ASSERT(index < _AP_W, "Attempting to read bit beyond MSB"); - ap_bit_ref<_AP_W,_AP_S> br = operator[](index); - return br.to_bool(); - } -#endif - - // Count the number of zeros from the most significant bit - // to the first one bit. - INLINE int countLeadingZeros() { -#ifdef __SYNTHESIS__ - if (_AP_W <= 32) { - ap_int_base<32, false> t(-1UL), x; - x.V = _AP_ROOT_op_get_range(this->V, _AP_W - 1, 0); // reverse - t.V = _AP_ROOT_op_set_range(t.V, 0, _AP_W - 1, x.V); - return __builtin_ctz(t.V); // count trailing zeros. - } else if (_AP_W <= 64) { - ap_int_base<64, false> t(-1ULL); - ap_int_base<64, false> x; - x.V = _AP_ROOT_op_get_range(this->V, _AP_W - 1, 0); // reverse - t.V = _AP_ROOT_op_set_range(t.V, 0, _AP_W - 1, x.V); - return __builtin_ctzll(t.V); // count trailing zeros. - } else { - enum { __N = (_AP_W + 63) / 64 }; - int NZeros = 0; - int i = 0; - bool hitNonZero = false; - for (i = 0; i < __N - 1; ++i) { - ap_int_base<64, false> t; - t.V = _AP_ROOT_op_get_range(this->V, _AP_W - i * 64 - 64, _AP_W - i * 64 - 1); - NZeros += hitNonZero ? 0 : __builtin_clzll(t.V); // count leading zeros. - hitNonZero |= (t.V != 0); - } - if (!hitNonZero) { - ap_int_base<64, false> t(-1ULL); - enum { REST = (_AP_W - 1) % 64 }; - ap_int_base<64, false> x; - x.V = _AP_ROOT_op_get_range(this->V, 0, REST); - t.V = _AP_ROOT_op_set_range(t.V, 63 - REST, 63, x.V); - NZeros += __builtin_clzll(t.V); - } - return NZeros; - } -#else - return (Base::V).countLeadingZeros(); -#endif - } // countLeadingZeros - - template - INLINE ap_concat_ref<_AP_W, ap_int_base, _AP_W2, ap_int_base<_AP_W2, _AP_S2> > - concat(const ap_int_base<_AP_W2, _AP_S2>& a2) const { - return ap_concat_ref<_AP_W, ap_int_base, _AP_W2, - ap_int_base<_AP_W2, _AP_S2> >( - const_cast&>(*this), - const_cast&>(a2)); - } - - template - INLINE ap_concat_ref<_AP_W, ap_int_base, _AP_W2, ap_int_base<_AP_W2, _AP_S2> > - concat(ap_int_base<_AP_W2, _AP_S2>& a2) { - return ap_concat_ref<_AP_W, ap_int_base, _AP_W2, - ap_int_base<_AP_W2, _AP_S2> >(*this, a2); - } - - template - INLINE - ap_concat_ref<_AP_W, ap_int_base, _AP_W2, ap_range_ref<_AP_W2, _AP_S2> > - operator,(const ap_range_ref<_AP_W2, _AP_S2> &a2) const { - return ap_concat_ref<_AP_W, ap_int_base, _AP_W2, - ap_range_ref<_AP_W2, _AP_S2> >( - const_cast&>(*this), - const_cast&>(a2)); - } - - template - INLINE - ap_concat_ref<_AP_W, ap_int_base, _AP_W2, ap_range_ref<_AP_W2, _AP_S2> > - operator,(ap_range_ref<_AP_W2, _AP_S2> &a2) { - return ap_concat_ref<_AP_W, ap_int_base, _AP_W2, - ap_range_ref<_AP_W2, _AP_S2> >(*this, a2); - } - - template - INLINE ap_concat_ref<_AP_W, ap_int_base, _AP_W2, ap_int_base<_AP_W2, _AP_S2> > - operator,(const ap_int_base<_AP_W2, _AP_S2> &a2) { - return ap_concat_ref<_AP_W, ap_int_base, _AP_W2, - ap_int_base<_AP_W2, _AP_S2> >( - *this, const_cast&>(a2)); - } - - template - INLINE ap_concat_ref<_AP_W, ap_int_base, _AP_W2, ap_int_base<_AP_W2, _AP_S2> > - operator,(ap_int_base<_AP_W2, _AP_S2> &a2) const { - return ap_concat_ref<_AP_W, ap_int_base, _AP_W2, - ap_int_base<_AP_W2, _AP_S2> >( - const_cast&>(*this), a2); - } - - template - INLINE ap_concat_ref<_AP_W, ap_int_base, _AP_W2, ap_int_base<_AP_W2, _AP_S2> > - operator,(const ap_int_base<_AP_W2, _AP_S2> &a2) const { - return ap_concat_ref<_AP_W, ap_int_base, _AP_W2, - ap_int_base<_AP_W2, _AP_S2> >( - const_cast&>(*this), - const_cast&>(a2)); - } - - template - INLINE ap_concat_ref<_AP_W, ap_int_base, _AP_W2, ap_int_base<_AP_W2, _AP_S2> > - operator,(ap_int_base<_AP_W2, _AP_S2> &a2) { - return ap_concat_ref<_AP_W, ap_int_base, _AP_W2, - ap_int_base<_AP_W2, _AP_S2> >(*this, a2); - } - - template - INLINE ap_concat_ref<_AP_W, ap_int_base, 1, ap_bit_ref<_AP_W2, _AP_S2> > - operator,(const ap_bit_ref<_AP_W2, _AP_S2> &a2) const { - return ap_concat_ref<_AP_W, ap_int_base, 1, ap_bit_ref<_AP_W2, _AP_S2> >( - const_cast&>(*this), - const_cast&>(a2)); - } - - template - INLINE ap_concat_ref<_AP_W, ap_int_base, 1, ap_bit_ref<_AP_W2, _AP_S2> > - operator,(ap_bit_ref<_AP_W2, _AP_S2> &a2) { - return ap_concat_ref<_AP_W, ap_int_base, 1, ap_bit_ref<_AP_W2, _AP_S2> >( - *this, a2); - } - - template - INLINE ap_concat_ref<_AP_W, ap_int_base, _AP_W2 + _AP_W3, - ap_concat_ref<_AP_W2, _AP_T2, _AP_W3, _AP_T3> > - operator,(const ap_concat_ref<_AP_W2, _AP_T2, _AP_W3, _AP_T3> &a2) { - return ap_concat_ref<_AP_W, ap_int_base, _AP_W2 + _AP_W3, - ap_concat_ref<_AP_W2, _AP_T2, _AP_W3, _AP_T3> >( - const_cast&>(*this), - const_cast&>(a2)); - } - - template - INLINE ap_concat_ref<_AP_W, ap_int_base, _AP_W2 + _AP_W3, - ap_concat_ref<_AP_W2, _AP_T2, _AP_W3, _AP_T3> > - operator,(ap_concat_ref<_AP_W2, _AP_T2, _AP_W3, _AP_T3> &a2) { - return ap_concat_ref<_AP_W, ap_int_base, _AP_W2 + _AP_W3, - ap_concat_ref<_AP_W2, _AP_T2, _AP_W3, _AP_T3> >(*this, - a2); - } - - template - INLINE ap_concat_ref< - _AP_W, ap_int_base, _AP_W2, - af_range_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2> > - operator,(const af_range_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2> - &a2) const { - return ap_concat_ref< - _AP_W, ap_int_base, _AP_W2, - af_range_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2> >( - const_cast&>(*this), - const_cast< - af_range_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2>&>(a2)); - } - - template - INLINE ap_concat_ref< - _AP_W, ap_int_base, _AP_W2, - af_range_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2> > - operator,(af_range_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2> &a2) { - return ap_concat_ref< - _AP_W, ap_int_base, _AP_W2, - af_range_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2> >(*this, - a2); - } - - template - INLINE - ap_concat_ref<_AP_W, ap_int_base, 1, - af_bit_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2> > - operator,(const af_bit_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2> - &a2) const { - return ap_concat_ref< - _AP_W, ap_int_base, 1, - af_bit_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2> >( - const_cast&>(*this), - const_cast&>( - a2)); - } - - template - INLINE - ap_concat_ref<_AP_W, ap_int_base, 1, - af_bit_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2> > - operator,( - af_bit_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2> &a2) { - return ap_concat_ref< - _AP_W, ap_int_base, 1, - af_bit_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2> >(*this, a2); - } - - template - INLINE ap_int_base operator&( - const ap_concat_ref<_AP_W2, _AP_T2, _AP_W3, _AP_T3>& a2) { - return *this & a2.get(); - } - - template - INLINE ap_int_base operator|( - const ap_concat_ref<_AP_W2, _AP_T2, _AP_W3, _AP_T3>& a2) { - return *this | a2.get(); - } - - template - INLINE ap_int_base operator^( - const ap_concat_ref<_AP_W2, _AP_T2, _AP_W3, _AP_T3>& a2) { - return *this ^ a2.get(); - } - - template - INLINE void set(const ap_int_base<_AP_W3, false>& val) { - Base::V = val.V; - } - - /* Reduce operations. - * ---------------------------------------------------------------- - */ - // XXX non-const version deleted. - INLINE bool and_reduce() const { return _AP_ROOT_op_reduce(and, Base::V); } - INLINE bool nand_reduce() const { return _AP_ROOT_op_reduce(nand, Base::V); } - INLINE bool or_reduce() const { return _AP_ROOT_op_reduce(or, Base::V); } - INLINE bool nor_reduce() const { return !(_AP_ROOT_op_reduce(or, Base::V)); } - INLINE bool xor_reduce() const { return _AP_ROOT_op_reduce (xor, Base::V); } - INLINE bool xnor_reduce() const { - return !(_AP_ROOT_op_reduce (xor, Base::V)); - } - - /* Output as a string. - * ---------------------------------------------------------------- - */ -#ifndef __SYNTHESIS__ - std::string to_string(signed char rd = 2, bool sign = _AP_S) const { - // XXX in autosim/autowrap.tcl "(${name}).to_string(2).c_str()" is used to - // initialize sc_lv, which seems incapable of handling format "-0b". - if (rd == 2) sign = false; - return (Base::V).to_string(rd, sign); - } -#else - INLINE char* to_string(signed char rd = 2, bool sign = _AP_S) const { - return 0; - } -#endif -}; // struct ap_int_base - -// XXX apcc cannot handle global std::ios_base::Init() brought in by -#ifndef AP_AUTOCC -#ifndef __SYNTHESIS__ -template -INLINE std::ostream& operator<<(std::ostream& os, - const ap_int_base<_AP_W, _AP_S>& x) { - std::ios_base::fmtflags ff = std::cout.flags(); - if (ff & std::cout.hex) { - os << x.to_string(16); // don't print sign - } else if (ff & std::cout.oct) { - os << x.to_string(8); // don't print sign - } else { - os << x.to_string(10); - } - return os; -} -#endif // ifndef __SYNTHESIS__ - -#ifndef __SYNTHESIS__ -template -INLINE std::istream& operator>>(std::istream& in, - ap_int_base<_AP_W, _AP_S>& op) { - std::string str; - in >> str; - const std::ios_base::fmtflags basefield = in.flags() & std::ios_base::basefield; - unsigned radix = (basefield == std::ios_base::dec) ? 0 : ( - (basefield == std::ios_base::oct) ? 8 : ( - (basefield == std::ios_base::hex) ? 16 : 0)); - op = ap_int_base<_AP_W, _AP_S>(str.c_str(), radix); - return in; -} -#endif // ifndef __SYNTHESIS__ -#endif // ifndef AP_AUTOCC - -/* Operators with another ap_int_base. - * ---------------------------------------------------------------- - */ -#define OP_BIN_AP(Sym, Rty) \ - template \ - INLINE \ - typename ap_int_base<_AP_W, _AP_S>::template RType<_AP_W2, _AP_S2>::Rty \ - operator Sym(const ap_int_base<_AP_W, _AP_S>& op, \ - const ap_int_base<_AP_W2, _AP_S2>& op2) { \ - typename ap_int_base<_AP_W, _AP_S>::template RType< \ - _AP_W2, _AP_S2>::Rty##_base lhs(op); \ - typename ap_int_base<_AP_W, _AP_S>::template RType< \ - _AP_W2, _AP_S2>::Rty##_base rhs(op2); \ - typename ap_int_base<_AP_W, _AP_S>::template RType< \ - _AP_W2, _AP_S2>::Rty##_base ret; \ - ret.V = lhs.V Sym rhs.V; \ - return ret; \ - } - -OP_BIN_AP(*, mult) -OP_BIN_AP(+, plus) -OP_BIN_AP(-, minus) -OP_BIN_AP(&, logic) -OP_BIN_AP(|, logic) -OP_BIN_AP(^, logic) - -#define OP_BIN_AP2(Sym, Rty) \ - template \ - INLINE \ - typename ap_int_base<_AP_W, _AP_S>::template RType<_AP_W2, _AP_S2>::Rty \ - operator Sym(const ap_int_base<_AP_W, _AP_S>& op, \ - const ap_int_base<_AP_W2, _AP_S2>& op2) { \ - typename ap_int_base<_AP_W, _AP_S>::template RType< \ - _AP_W2, _AP_S2>::Rty##_base ret; \ - ret.V = op.V Sym op2.V; \ - return ret; \ - } - -OP_BIN_AP2(/, div) -OP_BIN_AP2(%, mod) - -// shift operators are defined inside class. -// compound assignment operators are defined inside class. - -/* Operators with a pointer type. - * ---------------------------------------------------------------- - * char a[100]; - * char* ptr = a; - * ap_int<2> n = 3; - * char* ptr2 = ptr + n*2; - * avoid ambiguous errors. - */ -#define OP_BIN_WITH_PTR(BIN_OP) \ - template \ - INLINE PTR_TYPE* operator BIN_OP(PTR_TYPE* i_op, \ - const ap_int_base<_AP_W, _AP_S>& op) { \ - ap_slong op2 = op.to_int64(); /* Not all implementation */ \ - return i_op BIN_OP op2; \ - } \ - template \ - INLINE PTR_TYPE* operator BIN_OP(const ap_int_base<_AP_W, _AP_S>& op, \ - PTR_TYPE* i_op) { \ - ap_slong op2 = op.to_int64(); /* Not all implementation */ \ - return op2 BIN_OP i_op; \ - } - -OP_BIN_WITH_PTR(+) -OP_BIN_WITH_PTR(-) - -/* Operators with a native floating point types. - * ---------------------------------------------------------------- - */ -// float OP ap_int -// when ap_int's width > 64, then trunc ap_int to ap_int<64> -#define OP_BIN_WITH_FLOAT(BIN_OP, C_TYPE) \ - template \ - INLINE C_TYPE operator BIN_OP(C_TYPE i_op, \ - const ap_int_base<_AP_W, _AP_S>& op) { \ - typename ap_int_base<_AP_W, _AP_S>::RetType op2 = op; \ - return i_op BIN_OP op2; \ - } \ - template \ - INLINE C_TYPE operator BIN_OP(const ap_int_base<_AP_W, _AP_S>& op, \ - C_TYPE i_op) { \ - typename ap_int_base<_AP_W, _AP_S>::RetType op2 = op; \ - return op2 BIN_OP i_op; \ - } - -#define ALL_OP_WITH_FLOAT(C_TYPE) \ - OP_BIN_WITH_FLOAT(*, C_TYPE) \ - OP_BIN_WITH_FLOAT(/, C_TYPE) \ - OP_BIN_WITH_FLOAT(+, C_TYPE) \ - OP_BIN_WITH_FLOAT(-, C_TYPE) - -#if _AP_ENABLE_HALF_ == 1 -ALL_OP_WITH_FLOAT(half) -#endif -ALL_OP_WITH_FLOAT(float) -ALL_OP_WITH_FLOAT(double) - -// TODO no shift? - -/* Operators with a native integral types. - * ---------------------------------------------------------------- - */ -// arithmetic and bitwise operators. -#define OP_BIN_WITH_INT(BIN_OP, C_TYPE, _AP_W2, _AP_S2, RTYPE) \ - template \ - INLINE typename ap_int_base<_AP_W, _AP_S>::template RType<_AP_W2, \ - _AP_S2>::RTYPE \ - operator BIN_OP(C_TYPE i_op, const ap_int_base<_AP_W, _AP_S>& op) { \ - return ap_int_base<_AP_W2, _AP_S2>(i_op) BIN_OP(op); \ - } \ - template \ - INLINE typename ap_int_base<_AP_W, _AP_S>::template RType<_AP_W2, \ - _AP_S2>::RTYPE \ - operator BIN_OP(const ap_int_base<_AP_W, _AP_S>& op, C_TYPE i_op) { \ - return op BIN_OP ap_int_base<_AP_W2, _AP_S2>(i_op); \ - } - -#define ALL_OP_BIN_WITH_INT(C_TYPE, _AP_W2, _AP_S2) \ - OP_BIN_WITH_INT(*, C_TYPE, _AP_W2, _AP_S2, mult) \ - OP_BIN_WITH_INT(+, C_TYPE, _AP_W2, _AP_S2, plus) \ - OP_BIN_WITH_INT(-, C_TYPE, _AP_W2, _AP_S2, minus) \ - OP_BIN_WITH_INT(/, C_TYPE, _AP_W2, _AP_S2, div) \ - OP_BIN_WITH_INT(%, C_TYPE, _AP_W2, _AP_S2, mod) \ - OP_BIN_WITH_INT(&, C_TYPE, _AP_W2, _AP_S2, logic) \ - OP_BIN_WITH_INT(|, C_TYPE, _AP_W2, _AP_S2, logic) \ - OP_BIN_WITH_INT(^, C_TYPE, _AP_W2, _AP_S2, logic) - -ALL_OP_BIN_WITH_INT(bool, 1, false) -ALL_OP_BIN_WITH_INT(char, 8, CHAR_IS_SIGNED) -ALL_OP_BIN_WITH_INT(signed char, 8, true) -ALL_OP_BIN_WITH_INT(unsigned char, 8, false) -ALL_OP_BIN_WITH_INT(short, _AP_SIZE_short, true) -ALL_OP_BIN_WITH_INT(unsigned short, _AP_SIZE_short, false) -ALL_OP_BIN_WITH_INT(int, _AP_SIZE_int, true) -ALL_OP_BIN_WITH_INT(unsigned int, _AP_SIZE_int, false) -ALL_OP_BIN_WITH_INT(long, _AP_SIZE_long, true) -ALL_OP_BIN_WITH_INT(unsigned long, _AP_SIZE_long, false) -ALL_OP_BIN_WITH_INT(ap_slong, _AP_SIZE_ap_slong, true) -ALL_OP_BIN_WITH_INT(ap_ulong, _AP_SIZE_ap_slong, false) - -#undef OP_BIN_WITH_INT -#undef ALL_OP_BIN_WITH_INT - -// shift operators. -#define ALL_OP_SHIFT_WITH_INT(C_TYPE, _AP_W2, _AP_S2) \ - template \ - INLINE typename ap_int_base<_AP_W, _AP_S>::template RType<_AP_W,_AP_S>::arg1 operator<<( \ - const ap_int_base<_AP_W, _AP_S>& op, C_TYPE op2) { \ - ap_int_base<_AP_W, _AP_S> r; \ - if (_AP_S2) \ - r.V = op2 >= 0 ? (op.V << op2) : (op.V >> (-op2)); \ - else \ - r.V = op.V << op2; \ - return r; \ - } \ - template \ - INLINE typename ap_int_base<_AP_W, _AP_S>::template RType<_AP_W,_AP_S>::arg1 operator>>( \ - const ap_int_base<_AP_W, _AP_S>& op, C_TYPE op2) { \ - ap_int_base<_AP_W, _AP_S> r; \ - if (_AP_S2) \ - r.V = op2 >= 0 ? (op.V >> op2) : (op.V << (-op2)); \ - else \ - r.V = op.V >> op2; \ - return r; \ - } - -ALL_OP_SHIFT_WITH_INT(char, 8, CHAR_IS_SIGNED) -ALL_OP_SHIFT_WITH_INT(signed char, 8, true) -ALL_OP_SHIFT_WITH_INT(short, _AP_SIZE_short, true) -ALL_OP_SHIFT_WITH_INT(int, _AP_SIZE_int, true) -ALL_OP_SHIFT_WITH_INT(long, _AP_SIZE_long, true) -ALL_OP_SHIFT_WITH_INT(ap_slong, _AP_SIZE_ap_slong, true) - -#undef ALL_OP_SHIFT_WITH_INT - -#define ALL_OP_SHIFT_WITH_INT(C_TYPE, _AP_W2, _AP_S2) \ - template \ - INLINE typename ap_int_base<_AP_W, _AP_S>::template RType<_AP_W,_AP_S>::arg1 operator<<( \ - const ap_int_base<_AP_W, _AP_S>& op, C_TYPE op2) { \ - ap_int_base<_AP_W, _AP_S> r; \ - r.V = op.V << op2; \ - return r; \ - } \ - template \ - INLINE typename ap_int_base<_AP_W, _AP_S>::template RType<_AP_W,_AP_S>::arg1 operator>>( \ - const ap_int_base<_AP_W, _AP_S>& op, C_TYPE op2) { \ - ap_int_base<_AP_W, _AP_S> r; \ - r.V = op.V >> op2; \ - return r; \ - } -ALL_OP_SHIFT_WITH_INT(bool, 1, false) -ALL_OP_SHIFT_WITH_INT(unsigned char, 8, false) -ALL_OP_SHIFT_WITH_INT(unsigned short, _AP_SIZE_short, false) -ALL_OP_SHIFT_WITH_INT(unsigned int, _AP_SIZE_int, false) -ALL_OP_SHIFT_WITH_INT(unsigned long, _AP_SIZE_long, false) -ALL_OP_SHIFT_WITH_INT(ap_ulong, _AP_SIZE_ap_slong, false) - -#undef ALL_OP_SHIFT_WITH_INT - -// compound assign operators. -#define OP_ASSIGN_WITH_INT(ASSIGN_OP, C_TYPE, _AP_W2, _AP_S2) \ - template \ - INLINE ap_int_base<_AP_W, _AP_S>& operator ASSIGN_OP( \ - ap_int_base<_AP_W, _AP_S>& op, C_TYPE op2) { \ - return op ASSIGN_OP ap_int_base<_AP_W2, _AP_S2>(op2); \ - } - -// TODO int a; ap_int<16> b; a += b; - -#define ALL_OP_ASSIGN_WITH_INT(C_TYPE, _AP_W2, _AP_S2) \ - OP_ASSIGN_WITH_INT(+=, C_TYPE, _AP_W2, _AP_S2) \ - OP_ASSIGN_WITH_INT(-=, C_TYPE, _AP_W2, _AP_S2) \ - OP_ASSIGN_WITH_INT(*=, C_TYPE, _AP_W2, _AP_S2) \ - OP_ASSIGN_WITH_INT(/=, C_TYPE, _AP_W2, _AP_S2) \ - OP_ASSIGN_WITH_INT(%=, C_TYPE, _AP_W2, _AP_S2) \ - OP_ASSIGN_WITH_INT(&=, C_TYPE, _AP_W2, _AP_S2) \ - OP_ASSIGN_WITH_INT(|=, C_TYPE, _AP_W2, _AP_S2) \ - OP_ASSIGN_WITH_INT(^=, C_TYPE, _AP_W2, _AP_S2) \ - OP_ASSIGN_WITH_INT(>>=, C_TYPE, _AP_W2, _AP_S2) \ - OP_ASSIGN_WITH_INT(<<=, C_TYPE, _AP_W2, _AP_S2) - -ALL_OP_ASSIGN_WITH_INT(bool, 1, false) -ALL_OP_ASSIGN_WITH_INT(char, 8, CHAR_IS_SIGNED) -ALL_OP_ASSIGN_WITH_INT(signed char, 8, true) -ALL_OP_ASSIGN_WITH_INT(unsigned char, 8, false) -ALL_OP_ASSIGN_WITH_INT(short, _AP_SIZE_short, true) -ALL_OP_ASSIGN_WITH_INT(unsigned short, _AP_SIZE_short, false) -ALL_OP_ASSIGN_WITH_INT(int, _AP_SIZE_int, true) -ALL_OP_ASSIGN_WITH_INT(unsigned int, _AP_SIZE_int, false) -ALL_OP_ASSIGN_WITH_INT(long, _AP_SIZE_long, true) -ALL_OP_ASSIGN_WITH_INT(unsigned long, _AP_SIZE_long, false) -ALL_OP_ASSIGN_WITH_INT(ap_slong, _AP_SIZE_ap_slong, true) -ALL_OP_ASSIGN_WITH_INT(ap_ulong, _AP_SIZE_ap_slong, false) - -#undef OP_ASSIGN_WITH_INT -#undef ALL_OP_ASSIGN_WITH_INT - -// equality and relational operators. -#define OP_REL_WITH_INT(REL_OP, C_TYPE, _AP_W2, _AP_S2) \ - template \ - INLINE bool operator REL_OP(C_TYPE i_op, \ - const ap_int_base<_AP_W, _AP_S>& op) { \ - return ap_int_base<_AP_W2, _AP_S2>(i_op) REL_OP op; \ - } \ - template \ - INLINE bool operator REL_OP(const ap_int_base<_AP_W, _AP_S>& op, \ - C_TYPE op2) { \ - return op REL_OP ap_int_base<_AP_W2, _AP_S2>(op2); \ - } - -#define ALL_OP_REL_WITH_INT(C_TYPE, _AP_W2, _AP_S2) \ - OP_REL_WITH_INT(>, C_TYPE, _AP_W2, _AP_S2) \ - OP_REL_WITH_INT(<, C_TYPE, _AP_W2, _AP_S2) \ - OP_REL_WITH_INT(>=, C_TYPE, _AP_W2, _AP_S2) \ - OP_REL_WITH_INT(<=, C_TYPE, _AP_W2, _AP_S2) \ - OP_REL_WITH_INT(==, C_TYPE, _AP_W2, _AP_S2) \ - OP_REL_WITH_INT(!=, C_TYPE, _AP_W2, _AP_S2) - -ALL_OP_REL_WITH_INT(bool, 1, false) -ALL_OP_REL_WITH_INT(char, 8, CHAR_IS_SIGNED) -ALL_OP_REL_WITH_INT(signed char, 8, true) -ALL_OP_REL_WITH_INT(unsigned char, 8, false) -ALL_OP_REL_WITH_INT(short, _AP_SIZE_short, true) -ALL_OP_REL_WITH_INT(unsigned short, _AP_SIZE_short, false) -ALL_OP_REL_WITH_INT(int, _AP_SIZE_int, true) -ALL_OP_REL_WITH_INT(unsigned int, _AP_SIZE_int, false) -ALL_OP_REL_WITH_INT(long, _AP_SIZE_long, true) -ALL_OP_REL_WITH_INT(unsigned long, _AP_SIZE_long, false) -ALL_OP_REL_WITH_INT(ap_slong, _AP_SIZE_ap_slong, true) -ALL_OP_REL_WITH_INT(ap_ulong, _AP_SIZE_ap_slong, false) - -#undef OP_REL_WITH_INT -#undef ALL_OP_BIN_WITH_INT - -#define OP_REL_WITH_DOUBLE_OR_FLOAT(Sym) \ - template \ - INLINE bool operator Sym(const ap_int_base<_AP_W, _AP_S>& op1, \ - double op2) { \ - return op1.to_double() Sym op2 ; \ - } \ - template \ - INLINE bool operator Sym(double op1, \ - const ap_int_base<_AP_W, _AP_S>& op2) { \ - return op1 Sym op2.to_double() ; \ - } \ - template \ - INLINE bool operator Sym(const ap_int_base<_AP_W, _AP_S>& op1, \ - float op2) { \ - return op1.to_double() Sym op2 ; \ - } \ - template \ - INLINE bool operator Sym(float op1, \ - const ap_int_base<_AP_W, _AP_S>& op2) { \ - return op1 Sym op2.to_double() ; \ - } - OP_REL_WITH_DOUBLE_OR_FLOAT(>) - OP_REL_WITH_DOUBLE_OR_FLOAT(<) - OP_REL_WITH_DOUBLE_OR_FLOAT(>=) - OP_REL_WITH_DOUBLE_OR_FLOAT(<=) - OP_REL_WITH_DOUBLE_OR_FLOAT(==) - OP_REL_WITH_DOUBLE_OR_FLOAT(!=) - -#undef OP_REL_WITH_DOUBLE_OR_FLOAT - - -/* Operators with ap_bit_ref. - * ------------------------------------------------------------ - */ -// arithmetic, bitwise and shift operators. -#define OP_BIN_WITH_RANGE(BIN_OP, RTYPE) \ - template \ - INLINE typename ap_int_base<_AP_W1, _AP_S1>::template RType<_AP_W2, \ - _AP_S2>::RTYPE \ - operator BIN_OP(const ap_range_ref<_AP_W1, _AP_S1>& op1, \ - const ap_int_base<_AP_W2, _AP_S2>& op2) { \ - return ap_int_base<_AP_W1, false>(op1) BIN_OP op2; \ - } \ - template \ - INLINE typename ap_int_base<_AP_W1, _AP_S1>::template RType<_AP_W2, \ - _AP_S2>::RTYPE \ - operator BIN_OP(const ap_int_base<_AP_W1, _AP_S1>& op1, \ - const ap_range_ref<_AP_W2, _AP_S2>& op2) { \ - return op1 BIN_OP ap_int_base<_AP_W2, false>(op2); \ - } - -OP_BIN_WITH_RANGE(+, plus) -OP_BIN_WITH_RANGE(-, minus) -OP_BIN_WITH_RANGE(*, mult) -OP_BIN_WITH_RANGE(/, div) -OP_BIN_WITH_RANGE(%, mod) -OP_BIN_WITH_RANGE(&, logic) -OP_BIN_WITH_RANGE(|, logic) -OP_BIN_WITH_RANGE(^, logic) -OP_BIN_WITH_RANGE(>>, arg1) -OP_BIN_WITH_RANGE(<<, arg1) - -#undef OP_BIN_WITH_RANGE - -// compound assignment operators. -#define OP_ASSIGN_WITH_RANGE(ASSIGN_OP) \ - template \ - INLINE ap_int_base<_AP_W1, _AP_S1>& operator ASSIGN_OP( \ - ap_int_base<_AP_W1, _AP_S1>& op1, ap_range_ref<_AP_W2, _AP_S2>& op2) { \ - return op1 ASSIGN_OP ap_int_base<_AP_W2, false>(op2); \ - } \ - template \ - INLINE ap_range_ref<_AP_W1, _AP_S1>& operator ASSIGN_OP( \ - ap_range_ref<_AP_W1, _AP_S1>& op1, ap_int_base<_AP_W2, _AP_S2>& op2) { \ - ap_int_base<_AP_W1, false> tmp(op1); \ - tmp ASSIGN_OP op2; \ - op1 = tmp; \ - return op1; \ - } - -OP_ASSIGN_WITH_RANGE(+=) -OP_ASSIGN_WITH_RANGE(-=) -OP_ASSIGN_WITH_RANGE(*=) -OP_ASSIGN_WITH_RANGE(/=) -OP_ASSIGN_WITH_RANGE(%=) -OP_ASSIGN_WITH_RANGE(&=) -OP_ASSIGN_WITH_RANGE(|=) -OP_ASSIGN_WITH_RANGE(^=) -OP_ASSIGN_WITH_RANGE(>>=) -OP_ASSIGN_WITH_RANGE(<<=) - -#undef OP_ASSIGN_WITH_RANGE - -// equality and relational operators -#define OP_REL_WITH_RANGE(REL_OP) \ - template \ - INLINE bool operator REL_OP(const ap_range_ref<_AP_W1, _AP_S1>& op1, \ - const ap_int_base<_AP_W2, _AP_S2>& op2) { \ - return ap_int_base<_AP_W1, false>(op1).operator REL_OP(op2); \ - } \ - template \ - INLINE bool operator REL_OP(const ap_int_base<_AP_W1, _AP_S1>& op1, \ - const ap_range_ref<_AP_W2, _AP_S2>& op2) { \ - return op1.operator REL_OP(op2.operator ap_int_base<_AP_W2, false>()); \ - } - -OP_REL_WITH_RANGE(==) -OP_REL_WITH_RANGE(!=) -OP_REL_WITH_RANGE(>) -OP_REL_WITH_RANGE(>=) -OP_REL_WITH_RANGE(<) -OP_REL_WITH_RANGE(<=) - -#undef OP_REL_WITH_RANGE - -/* Operators with ap_bit_ref. - * ------------------------------------------------------------ - */ -// arithmetic, bitwise and shift operators. -#define OP_BIN_WITH_BIT(BIN_OP, RTYPE) \ - template \ - INLINE typename ap_int_base<_AP_W1, _AP_S1>::template RType<1, false>::RTYPE \ - operator BIN_OP(const ap_int_base<_AP_W1, _AP_S1>& op1, \ - const ap_bit_ref<_AP_W2, _AP_S2>& op2) { \ - return op1 BIN_OP ap_int_base<1, false>(op2); \ - } \ - template \ - INLINE typename ap_int_base<1, false>::template RType<_AP_W2, _AP_S2>::RTYPE \ - operator BIN_OP(const ap_bit_ref<_AP_W1, _AP_S1>& op1, \ - const ap_int_base<_AP_W2, _AP_S2>& op2) { \ - return ap_int_base<1, false>(op1) BIN_OP op2; \ - } - -OP_BIN_WITH_BIT(+, plus) -OP_BIN_WITH_BIT(-, minus) -OP_BIN_WITH_BIT(*, mult) -OP_BIN_WITH_BIT(/, div) -OP_BIN_WITH_BIT(%, mod) -OP_BIN_WITH_BIT(&, logic) -OP_BIN_WITH_BIT(|, logic) -OP_BIN_WITH_BIT(^, logic) -OP_BIN_WITH_BIT(>>, arg1) -OP_BIN_WITH_BIT(<<, arg1) - -#undef OP_BIN_WITH_BIT - -// compound assignment operators. -#define OP_ASSIGN_WITH_BIT(ASSIGN_OP) \ - template \ - INLINE ap_int_base<_AP_W1, _AP_S1>& operator ASSIGN_OP( \ - ap_int_base<_AP_W1, _AP_S1>& op1, ap_bit_ref<_AP_W2, _AP_S2>& op2) { \ - return op1 ASSIGN_OP ap_int_base<1, false>(op2); \ - } \ - template \ - INLINE ap_bit_ref<_AP_W1, _AP_S1>& operator ASSIGN_OP( \ - ap_bit_ref<_AP_W1, _AP_S1>& op1, ap_int_base<_AP_W2, _AP_S2>& op2) { \ - ap_int_base<1, false> tmp(op1); \ - tmp ASSIGN_OP op2; \ - op1 = tmp; \ - return op1; \ - } - -OP_ASSIGN_WITH_BIT(+=) -OP_ASSIGN_WITH_BIT(-=) -OP_ASSIGN_WITH_BIT(*=) -OP_ASSIGN_WITH_BIT(/=) -OP_ASSIGN_WITH_BIT(%=) -OP_ASSIGN_WITH_BIT(&=) -OP_ASSIGN_WITH_BIT(|=) -OP_ASSIGN_WITH_BIT(^=) -OP_ASSIGN_WITH_BIT(>>=) -OP_ASSIGN_WITH_BIT(<<=) - -#undef OP_ASSIGN_WITH_BIT - -// equality and relational operators. -#define OP_REL_WITH_BIT(REL_OP) \ - template \ - INLINE bool operator REL_OP(const ap_int_base<_AP_W1, _AP_S1>& op1, \ - const ap_bit_ref<_AP_W2, _AP_S2>& op2) { \ - return op1 REL_OP ap_int_base<1, false>(op2); \ - } \ - template \ - INLINE bool operator REL_OP(const ap_bit_ref<_AP_W1, _AP_S1>& op1, \ - const ap_int_base<_AP_W2, _AP_S2>& op2) { \ - return ap_int_base<1, false>(op1) REL_OP op2; \ - } - -OP_REL_WITH_BIT(==) -OP_REL_WITH_BIT(!=) -OP_REL_WITH_BIT(>) -OP_REL_WITH_BIT(>=) -OP_REL_WITH_BIT(<) -OP_REL_WITH_BIT(<=) - -#undef OP_REL_WITH_BIT - - -/* Operators with ap_concat_ref. - * ------------------------------------------------------------ - */ -// arithmetic, bitwise and shift operators. -// bitwise operators are defined in struct. -// TODO specify whether to define arithmetic and bitwise operators. -#if 0 -#define OP_BIN_WITH_CONCAT(BIN_OP, RTYPE) \ - template \ - INLINE typename ap_int_base<_AP_W3, _AP_S3>::template RType<_AP_W1 + _AP_W2, \ - false>::RTYPE \ - operator BIN_OP(const ap_int_base<_AP_W3, _AP_S3>& op1, \ - const ap_concat_ref<_AP_W1, _AP_T1, _AP_W2, _AP_T2>& op2) { \ - /* convert ap_concat_ref to ap_int_base */ \ - return op1 BIN_OP op2.get(); \ - } \ - template \ - INLINE typename ap_int_base<_AP_W1 + _AP_W2, \ - false>::template RType<_AP_W3, _AP_S3>::RTYPE \ - operator BIN_OP(const ap_concat_ref<_AP_W1, _AP_T1, _AP_W2, _AP_T2>& op1, \ - const ap_int_base<_AP_W3, _AP_S3>& op2) { \ - /* convert ap_concat_ref to ap_int_base */ \ - return op1.get() BIN_OP op2; \ - } - -OP_BIN_WITH_CONCAT(+, plus) -OP_BIN_WITH_CONCAT(-, minus) -OP_BIN_WITH_CONCAT(*, mult) -OP_BIN_WITH_CONCAT(/, div) -OP_BIN_WITH_CONCAT(%, mod) -OP_BIN_WITH_CONCAT(&, logic) -OP_BIN_WITH_CONCAT(|, logic) -OP_BIN_WITH_CONCAT(^, logic) -OP_BIN_WITH_CONCAT(>>, arg1) -OP_BIN_WITH_CONCAT(<<, arg1) - -#undef OP_BIN_WITH_CONCAT - -// compound assignment operators. -#define OP_ASSIGN_WITH_CONCAT(ASSIGN_OP) \ - template \ - INLINE typename ap_int_base<_AP_W3, _AP_S3>::template RType<_AP_W1 + _AP_W2, \ - false>::RTYPE \ - operator ASSIGN_OP( \ - const ap_int_base<_AP_W3, _AP_S3>& op1, \ - const ap_concat_ref<_AP_W1, _AP_T1, _AP_W2, _AP_T2>& op2) { \ - /* convert ap_concat_ref to ap_int_base */ \ - return op1 ASSIGN_OP op2.get(); \ - } \ - template \ - INLINE typename ap_int_base<_AP_W1 + _AP_W2, \ - false>::template RType<_AP_W3, _AP_S3>::RTYPE \ - operator ASSIGN_OP(const ap_concat_ref<_AP_W1, _AP_T1, _AP_W2, _AP_T2>& op1, \ - const ap_int_base<_AP_W3, _AP_S3>& op2) { \ - /* convert ap_concat_ref to ap_int_base */ \ - ap_int_base<_AP_W1 + _AP_W2, false> tmp = op1.get(); \ - tmp ASSIGN_OP op2; \ - op1 = tmp; \ - return op1; \ - } - -OP_ASSIGN_WITH_CONCAT(+=) -OP_ASSIGN_WITH_CONCAT(-=) -OP_ASSIGN_WITH_CONCAT(*=) -OP_ASSIGN_WITH_CONCAT(/=) -OP_ASSIGN_WITH_CONCAT(%=) -OP_ASSIGN_WITH_CONCAT(&=) -OP_ASSIGN_WITH_CONCAT(|=) -OP_ASSIGN_WITH_CONCAT(^=) -OP_ASSIGN_WITH_CONCAT(>>=) -OP_ASSIGN_WITH_CONCAT(<<=) - -#undef OP_ASSIGN_WITH_CONCAT -#endif - -// equality and relational operators. -#define OP_REL_WITH_CONCAT(REL_OP) \ - template \ - INLINE bool operator REL_OP( \ - const ap_int_base<_AP_W3, _AP_S3>& op1, \ - const ap_concat_ref<_AP_W1, _AP_T1, _AP_W2, _AP_T2>& op2) { \ - /* convert ap_concat_ref to ap_int_base */ \ - return op1 REL_OP op2.get(); \ - } \ - template \ - INLINE bool operator REL_OP( \ - const ap_concat_ref<_AP_W1, _AP_T1, _AP_W2, _AP_T2>& op1, \ - const ap_int_base<_AP_W3, _AP_S3>& op2) { \ - /* convert ap_concat_ref to ap_int_base */ \ - return op1.get() REL_OP op2; \ - } - -OP_REL_WITH_CONCAT(==) -OP_REL_WITH_CONCAT(!=) -OP_REL_WITH_CONCAT(>) -OP_REL_WITH_CONCAT(>=) -OP_REL_WITH_CONCAT(<) -OP_REL_WITH_CONCAT(<=) - -#undef OP_REL_WITH_CONCAT - -#endif // ifndef __cplusplus -#endif // ifndef __AP_INT_BASE_H__ - -// -*- cpp -*- diff --git a/TrigScint/include/TrigScint/ap_int_ref.h b/TrigScint/include/TrigScint/ap_int_ref.h deleted file mode 100644 index 421f09fda..000000000 --- a/TrigScint/include/TrigScint/ap_int_ref.h +++ /dev/null @@ -1,1346 +0,0 @@ -/* - * Copyright 2011-2019 Xilinx, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef __AP_INT_REF_H__ -#define __AP_INT_REF_H__ - -#ifndef __AP_INT_H__ -#error "Only ap_fixed.h and ap_int.h can be included directly in user code." -#endif - -#ifndef __cplusplus -#error "C++ is required to include this header file" - -#else - -#ifndef __SYNTHESIS__ -#include -#endif - -/* Concatination reference. - ---------------------------------------------------------------- -*/ -template -struct ap_concat_ref { - enum { - _AP_WR = _AP_W1 + _AP_W2, - }; - - _AP_T1& mbv1; - _AP_T2& mbv2; - - INLINE ap_concat_ref(const ap_concat_ref<_AP_W1, _AP_T1, _AP_W2, _AP_T2>& ref) - : mbv1(ref.mbv1), mbv2(ref.mbv2) {} - - INLINE ap_concat_ref(_AP_T1& bv1, _AP_T2& bv2) : mbv1(bv1), mbv2(bv2) {} - - template - INLINE ap_concat_ref& operator=(const ap_int_base<_AP_W3, _AP_S3>& val) { - ap_int_base<_AP_W1 + _AP_W2, false> vval(val); - int W_ref1 = mbv1.length(); - int W_ref2 = mbv2.length(); - ap_int_base<_AP_W1, false> Part1; - Part1.V = _AP_ROOT_op_get_range(vval.V, W_ref2, W_ref1 + W_ref2 - 1); - mbv1.set(Part1); - ap_int_base<_AP_W2, false> Part2; - Part2.V = _AP_ROOT_op_get_range(vval.V, 0, W_ref2 - 1); - mbv2.set(Part2); - return *this; - } - - // assign op from hls supported C integral types. - // FIXME disabled to support legacy code directly assign from sc_signal - //template - //INLINE typename _ap_type::enable_if<_ap_type::is_integral::value, - // ap_concat_ref&>::type - //operator=(T val) { - // ap_int_base<_AP_W1 + _AP_W2, false> tmpVal(val); - // return operator=(tmpVal); - //} -#define ASSIGN_WITH_CTYPE(_Tp) \ - INLINE ap_concat_ref& operator=(_Tp val) { \ - ap_int_base<_AP_W1 + _AP_W2, false> tmpVal(val); \ - return operator=(tmpVal); \ - } - - ASSIGN_WITH_CTYPE(bool) - ASSIGN_WITH_CTYPE(char) - ASSIGN_WITH_CTYPE(signed char) - ASSIGN_WITH_CTYPE(unsigned char) - ASSIGN_WITH_CTYPE(short) - ASSIGN_WITH_CTYPE(unsigned short) - ASSIGN_WITH_CTYPE(int) - ASSIGN_WITH_CTYPE(unsigned int) - ASSIGN_WITH_CTYPE(long) - ASSIGN_WITH_CTYPE(unsigned long) - ASSIGN_WITH_CTYPE(ap_slong) - ASSIGN_WITH_CTYPE(ap_ulong) -#if _AP_ENABLE_HALF_ == 1 - ASSIGN_WITH_CTYPE(half) -#endif - ASSIGN_WITH_CTYPE(float) - ASSIGN_WITH_CTYPE(double) - -#undef ASSIGN_WITH_CTYPE - - // Be explicit to prevent it from being deleted, as field d_bv - // is of reference type. - INLINE ap_concat_ref& operator=( - const ap_concat_ref<_AP_W1, _AP_T1, _AP_W2, _AP_T2>& val) { - ap_int_base<_AP_W1 + _AP_W2, false> tmpVal(val); - return operator=(tmpVal); - } - - template - INLINE ap_concat_ref& operator=( - const ap_concat_ref<_AP_W3, _AP_T3, _AP_W4, _AP_T4>& val) { - ap_int_base<_AP_W1 + _AP_W2, false> tmpVal(val); - return operator=(tmpVal); - } - - template - INLINE ap_concat_ref& operator=(const ap_bit_ref<_AP_W3, _AP_S3>& val) { - ap_int_base<_AP_W1 + _AP_W2, false> tmpVal(val); - return operator=(tmpVal); - } - template - INLINE ap_concat_ref& operator=(const ap_range_ref<_AP_W3, _AP_S3>& val) { - ap_int_base<_AP_W1 + _AP_W2, false> tmpVal(val); - return operator=(tmpVal); - } - - template - INLINE ap_concat_ref& operator=( - const af_range_ref<_AP_W3, _AP_I3, _AP_S3, _AP_Q3, _AP_O3, _AP_N3>& val) { - return operator=((const ap_int_base<_AP_W3, false>)(val)); - } - - template - INLINE ap_concat_ref& operator=( - const ap_fixed_base<_AP_W3, _AP_I3, _AP_S3, _AP_Q3, _AP_O3, _AP_N3>& - val) { - return operator=(val.to_ap_int_base()); - } - - template - INLINE ap_concat_ref& operator=( - const af_bit_ref<_AP_W3, _AP_I3, _AP_S3, _AP_Q3, _AP_O3, _AP_N3>& val) { - return operator=((ap_ulong)(bool)(val)); - } - - INLINE operator ap_int_base<_AP_WR, false>() const { return get(); } - - INLINE operator ap_ulong() const { return get().to_uint64(); } - - template - INLINE ap_concat_ref<_AP_WR, ap_concat_ref, _AP_W3, - ap_range_ref<_AP_W3, _AP_S3> > - operator,(const ap_range_ref<_AP_W3, _AP_S3> &a2) { - return ap_concat_ref<_AP_WR, ap_concat_ref, _AP_W3, - ap_range_ref<_AP_W3, _AP_S3> >( - *this, const_cast&>(a2)); - } - - template - INLINE - ap_concat_ref<_AP_WR, ap_concat_ref, _AP_W3, ap_int_base<_AP_W3, _AP_S3> > - operator,(ap_int_base<_AP_W3, _AP_S3> &a2) { - return ap_concat_ref<_AP_WR, ap_concat_ref, _AP_W3, - ap_int_base<_AP_W3, _AP_S3> >(*this, a2); - } - - template - INLINE - ap_concat_ref<_AP_WR, ap_concat_ref, _AP_W3, ap_int_base<_AP_W3, _AP_S3> > - operator,(volatile ap_int_base<_AP_W3, _AP_S3> &a2) { - return ap_concat_ref<_AP_WR, ap_concat_ref, _AP_W3, - ap_int_base<_AP_W3, _AP_S3> >( - *this, const_cast&>(a2)); - } - - template - INLINE - ap_concat_ref<_AP_WR, ap_concat_ref, _AP_W3, ap_int_base<_AP_W3, _AP_S3> > - operator,(const ap_int_base<_AP_W3, _AP_S3> &a2) { - return ap_concat_ref<_AP_WR, ap_concat_ref, _AP_W3, - ap_int_base<_AP_W3, _AP_S3> >( - *this, const_cast&>(a2)); - } - - template - INLINE - ap_concat_ref<_AP_WR, ap_concat_ref, _AP_W3, ap_int_base<_AP_W3, _AP_S3> > - operator,(const volatile ap_int_base<_AP_W3, _AP_S3> &a2) { - // FIXME op's life does not seem long enough - ap_int_base<_AP_W3, _AP_S3> op(a2); - return ap_concat_ref<_AP_WR, ap_concat_ref, _AP_W3, - ap_int_base<_AP_W3, _AP_S3> >( - *this, const_cast&>(op)); - } - - template - INLINE ap_concat_ref<_AP_WR, ap_concat_ref, 1, ap_bit_ref<_AP_W3, _AP_S3> > - operator,(const ap_bit_ref<_AP_W3, _AP_S3> &a2) { - return ap_concat_ref<_AP_WR, ap_concat_ref, 1, ap_bit_ref<_AP_W3, _AP_S3> >( - *this, const_cast&>(a2)); - } - - template - INLINE ap_concat_ref<_AP_WR, ap_concat_ref, _AP_W3 + _AP_W4, - ap_concat_ref<_AP_W3, _AP_T3, _AP_W4, _AP_T4> > - operator,(const ap_concat_ref<_AP_W3, _AP_T3, _AP_W4, _AP_T4> &a2) { - return ap_concat_ref<_AP_WR, ap_concat_ref, _AP_W3 + _AP_W4, - ap_concat_ref<_AP_W3, _AP_T3, _AP_W4, _AP_T4> >( - *this, const_cast&>(a2)); - } - - template - INLINE ap_concat_ref< - _AP_WR, ap_concat_ref, _AP_W3, - af_range_ref<_AP_W3, _AP_I3, _AP_S3, _AP_Q3, _AP_O3, _AP_N3> > - operator,( - const af_range_ref<_AP_W3, _AP_I3, _AP_S3, _AP_Q3, _AP_O3, _AP_N3> &a2) { - return ap_concat_ref< - _AP_WR, ap_concat_ref, _AP_W3, - af_range_ref<_AP_W3, _AP_I3, _AP_S3, _AP_Q3, _AP_O3, _AP_N3> >( - *this, - const_cast< - af_range_ref<_AP_W3, _AP_I3, _AP_S3, _AP_Q3, _AP_O3, _AP_N3>&>(a2)); - } - - template - INLINE - ap_concat_ref<_AP_WR, ap_concat_ref, 1, - af_bit_ref<_AP_W3, _AP_I3, _AP_S3, _AP_Q3, _AP_O3, _AP_N3> > - operator,(const af_bit_ref<_AP_W3, _AP_I3, _AP_S3, _AP_Q3, _AP_O3, _AP_N3> - &a2) { - return ap_concat_ref< - _AP_WR, ap_concat_ref, 1, - af_bit_ref<_AP_W3, _AP_I3, _AP_S3, _AP_Q3, _AP_O3, _AP_N3> >( - *this, - const_cast&>( - a2)); - } - - template - INLINE ap_int_base operator&( - const ap_int_base<_AP_W3, _AP_S3>& a2) { - return get() & a2; - } - - template - INLINE ap_int_base operator|( - const ap_int_base<_AP_W3, _AP_S3>& a2) { - return get() | a2; - } - - template - INLINE ap_int_base operator^( - const ap_int_base<_AP_W3, _AP_S3>& a2) { - return get() ^ a2; - } - -#if 0 - template - INLINE ap_int_base slice() { - ap_int_base<_AP_WR, false> bv = get(); - return bv.slice(); - } -#endif - - INLINE ap_int_base<_AP_WR, false> get() const { - ap_int_base<_AP_WR, false> tmpVal(0); - int W_ref1 = mbv1.length(); - int W_ref2 = mbv2.length(); - ap_int_base<_AP_W2, false> v2(mbv2); - ap_int_base<_AP_W1, false> v1(mbv1); - tmpVal.V = _AP_ROOT_op_set_range(tmpVal.V, 0, W_ref2 - 1, v2.V); - tmpVal.V = - _AP_ROOT_op_set_range(tmpVal.V, W_ref2, W_ref1 + W_ref2 - 1, v1.V); - return tmpVal; - } - - template - INLINE void set(const ap_int_base<_AP_W3, false>& val) { - ap_int_base<_AP_W1 + _AP_W2, false> vval(val); - int W_ref1 = mbv1.length(); - int W_ref2 = mbv2.length(); - ap_int_base<_AP_W1, false> tmpVal1; - tmpVal1.V = _AP_ROOT_op_get_range(vval.V, W_ref2, W_ref1 + W_ref2 - 1); - mbv1.set(tmpVal1); - ap_int_base<_AP_W2, false> tmpVal2; - tmpVal2.V = _AP_ROOT_op_get_range(vval.V, 0, W_ref2 - 1); - mbv2.set(tmpVal2); - } - - INLINE int length() const { return mbv1.length() + mbv2.length(); } -}; // struct ap_concat_ref - -/* Range (slice) reference. - ---------------------------------------------------------------- -*/ -template -struct ap_range_ref { - // struct ssdm_int or its sim model. - // TODO make it possible to reference to ap_fixed_base/ap_fixed/ap_ufixed - // and then we can retire af_range_ref. - typedef ap_int_base<_AP_W, _AP_S> ref_type; - ref_type& d_bv; - int l_index; - int h_index; - - public: - INLINE ap_range_ref(const ap_range_ref<_AP_W, _AP_S>& ref) - : d_bv(ref.d_bv), l_index(ref.l_index), h_index(ref.h_index) {} - - INLINE ap_range_ref(ref_type* bv, int h, int l) - : d_bv(*bv), l_index(l), h_index(h) {} - - INLINE ap_range_ref(const ref_type* bv, int h, int l) - : d_bv(*const_cast(bv)), l_index(l), h_index(h) {} - - INLINE operator ap_int_base<_AP_W, false>() const { - ap_int_base<_AP_W, false> ret; - ret.V = _AP_ROOT_op_get_range(d_bv.V, l_index, h_index); - return ret; - } - - INLINE operator ap_ulong() const { return to_uint64(); } - - /// @name assign operators - // @{ - - // FIXME disabled to work-around lagacy code assigning from sc_signal, - // which dependes on implicit type conversion. - // - // /// assign from hls supported C integral types. - // template - // INLINE typename _ap_type::enable_if<_ap_type::is_integral::value, - // ap_range_ref&>::type - // operator=(T val) { - // ap_int_base<_AP_W, false> tmp(val); - // d_bv.V = _AP_ROOT_op_set_range(d_bv.V, l_index, h_index, tmp.V); - // return *this; - // } -#define ASSIGN_WITH_CTYPE(_Tp) \ - INLINE ap_range_ref& operator=(_Tp val) { \ - ap_int_base<_AP_W, false> tmp(val); \ - d_bv.V = _AP_ROOT_op_set_range(d_bv.V, l_index, h_index, tmp.V); \ - return *this; \ - } - - ASSIGN_WITH_CTYPE(bool) - ASSIGN_WITH_CTYPE(char) - ASSIGN_WITH_CTYPE(signed char) - ASSIGN_WITH_CTYPE(unsigned char) - ASSIGN_WITH_CTYPE(short) - ASSIGN_WITH_CTYPE(unsigned short) - ASSIGN_WITH_CTYPE(int) - ASSIGN_WITH_CTYPE(unsigned int) - ASSIGN_WITH_CTYPE(long) - ASSIGN_WITH_CTYPE(unsigned long) - ASSIGN_WITH_CTYPE(ap_slong) - ASSIGN_WITH_CTYPE(ap_ulong) -#if _AP_ENABLE_HALF_ == 1 - ASSIGN_WITH_CTYPE(half) -#endif - ASSIGN_WITH_CTYPE(float) - ASSIGN_WITH_CTYPE(double) - -#undef ASSIGN_WITH_CTYPE - - /// assign using string. XXX crucial for cosim. - INLINE ap_range_ref& operator=(const char* val) { - const ap_int_base<_AP_W, false> tmp(val); // XXX figure out radix - d_bv.V = _AP_ROOT_op_set_range(d_bv.V, l_index, h_index, tmp.V); - return *this; - } - - /// assign from ap_int_base. - template - INLINE ap_range_ref& operator=(const ap_int_base<_AP_W2, _AP_S2>& val) { - ap_int_base<_AP_W, false> tmp(val); - d_bv.V = _AP_ROOT_op_set_range(d_bv.V, l_index, h_index, tmp.V); - return *this; - } - - /// copy assign operator - // XXX Be explicit to prevent it from being deleted, as field d_bv - // is of reference type. - INLINE ap_range_ref& operator=(const ap_range_ref& val) { - return operator=((const ap_int_base<_AP_W, false>)val); - } - - /// assign from range reference to ap_int_base. - template - INLINE ap_range_ref& operator=(const ap_range_ref<_AP_W2, _AP_S2>& val) { - return operator=((const ap_int_base<_AP_W2, false>)val); - } - - /// assign from bit reference to ap_int_base. - template - INLINE ap_range_ref& operator=(const ap_bit_ref<_AP_W2, _AP_S2>& val) { - return operator=((ap_ulong)(bool)(val)); - } - - /// assign from ap_fixed_base. - template - INLINE ap_range_ref& operator=( - const ap_fixed_base<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2>& - val) { - return operator=(val.to_ap_int_base()); - } - - /// assign from range reference to ap_fixed_base. - template - INLINE ap_range_ref& operator=( - const af_range_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2>& val) { - return operator=((const ap_int_base<_AP_W2, false>)val); - } - - /// assign from bit reference to ap_fixed_base. - template - INLINE ap_range_ref& operator=( - const af_bit_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2>& val) { - return operator=((ap_ulong)(bool)(val)); - } - - /// assign from compound reference. - template - INLINE ap_range_ref& operator=( - const ap_concat_ref<_AP_W2, _AP_T3, _AP_W3, _AP_T3>& val) { - return operator=((const ap_int_base<_AP_W2 + _AP_W3, false>)(val)); - } - // @} - - template - INLINE - ap_concat_ref<_AP_W, ap_range_ref, _AP_W2, ap_range_ref<_AP_W2, _AP_S2> > - operator,(const ap_range_ref<_AP_W2, _AP_S2> &a2) { - return ap_concat_ref<_AP_W, ap_range_ref, _AP_W2, - ap_range_ref<_AP_W2, _AP_S2> >( - *this, const_cast&>(a2)); - } - - template - INLINE - ap_concat_ref<_AP_W, ap_range_ref, _AP_W2, ap_int_base<_AP_W2, _AP_S2> > - operator,(ap_int_base<_AP_W2, _AP_S2> &a2) { - return ap_concat_ref<_AP_W, ap_range_ref, _AP_W2, - ap_int_base<_AP_W2, _AP_S2> >(*this, a2); - } - - INLINE - ap_concat_ref<_AP_W, ap_range_ref, _AP_W, ap_int_base<_AP_W, _AP_S> > - operator,(ap_int_base<_AP_W, _AP_S>& a2) { - return ap_concat_ref<_AP_W, ap_range_ref, _AP_W, - ap_int_base<_AP_W, _AP_S> >(*this, a2); - } - - template - INLINE - ap_concat_ref<_AP_W, ap_range_ref, _AP_W2, ap_int_base<_AP_W2, _AP_S2> > - operator,(volatile ap_int_base<_AP_W2, _AP_S2> &a2) { - return ap_concat_ref<_AP_W, ap_range_ref, _AP_W2, - ap_int_base<_AP_W2, _AP_S2> >( - *this, const_cast&>(a2)); - } - - template - INLINE - ap_concat_ref<_AP_W, ap_range_ref, _AP_W2, ap_int_base<_AP_W2, _AP_S2> > - operator,(const ap_int_base<_AP_W2, _AP_S2> &a2) { - return ap_concat_ref<_AP_W, ap_range_ref, _AP_W2, - ap_int_base<_AP_W2, _AP_S2> >( - *this, const_cast&>(a2)); - } - - template - INLINE - ap_concat_ref<_AP_W, ap_range_ref, _AP_W2, ap_int_base<_AP_W2, _AP_S2> > - operator,(const volatile ap_int_base<_AP_W2, _AP_S2> &a2) { - return ap_concat_ref<_AP_W, ap_range_ref, _AP_W2, - ap_int_base<_AP_W2, _AP_S2> >( - *this, const_cast&>(a2)); - } - - template - INLINE ap_concat_ref<_AP_W, ap_range_ref, 1, ap_bit_ref<_AP_W2, _AP_S2> > - operator,(const ap_bit_ref<_AP_W2, _AP_S2> &a2) { - return ap_concat_ref<_AP_W, ap_range_ref, 1, ap_bit_ref<_AP_W2, _AP_S2> >( - *this, const_cast&>(a2)); - } - - template - INLINE ap_concat_ref<_AP_W, ap_range_ref, _AP_W2 + _AP_W3, - ap_concat_ref<_AP_W2, _AP_T2, _AP_W3, _AP_T3> > - operator,(const ap_concat_ref<_AP_W2, _AP_T2, _AP_W3, _AP_T3> &a2) { - return ap_concat_ref<_AP_W, ap_range_ref, _AP_W2 + _AP_W3, - ap_concat_ref<_AP_W2, _AP_T2, _AP_W3, _AP_T3> >( - *this, const_cast&>(a2)); - } - - template - INLINE ap_concat_ref< - _AP_W, ap_range_ref, _AP_W2, - af_range_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2> > - operator,( - const af_range_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2> a2) { - return ap_concat_ref< - _AP_W, ap_range_ref, _AP_W2, - af_range_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2> >( - *this, - const_cast< - af_range_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2>&>(a2)); - } - - template - INLINE - ap_concat_ref<_AP_W, ap_range_ref, 1, - af_bit_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2> > - operator,(const af_bit_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2> - &a2) { - return ap_concat_ref< - _AP_W, ap_range_ref, 1, - af_bit_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2> >( - *this, - const_cast&>( - a2)); - } - - template - INLINE bool operator==(const ap_range_ref<_AP_W2, _AP_S2>& op2) { - ap_int_base<_AP_W, false> lop(*this); - ap_int_base<_AP_W2, false> hop(op2); - return lop == hop; - } - - template - INLINE bool operator!=(const ap_range_ref<_AP_W2, _AP_S2>& op2) { - return !(operator==(op2)); - } - - template - INLINE bool operator<(const ap_range_ref<_AP_W2, _AP_S2>& op2) { - ap_int_base<_AP_W, false> lop(*this); - ap_int_base<_AP_W2, false> hop(op2); - return lop < hop; - } - - template - INLINE bool operator<=(const ap_range_ref<_AP_W2, _AP_S2>& op2) { - ap_int_base<_AP_W, false> lop(*this); - ap_int_base<_AP_W2, false> hop(op2); - return lop <= hop; - } - - template - INLINE bool operator>(const ap_range_ref<_AP_W2, _AP_S2>& op2) { - return !(operator<=(op2)); - } - - template - INLINE bool operator>=(const ap_range_ref<_AP_W2, _AP_S2>& op2) { - return !(operator<(op2)); - } - - template - INLINE ap_range_ref<_AP_W, _AP_S>& operator|=( - const ap_range_ref<_AP_W2, _AP_S2>& op2) { - (this->d_bv).V |= (op2.d_bv).V; - return *this; - }; - - template - INLINE ap_range_ref<_AP_W, _AP_S>& operator|=( - const ap_int_base<_AP_W2, _AP_S2>& op2) { - (this->d_bv).V |= op2.V; - return *this; - }; - - template - INLINE ap_range_ref<_AP_W, _AP_S>& operator&=( - const ap_range_ref<_AP_W2, _AP_S2>& op2) { - (this->d_bv).V &= (op2.d_bv).V; - return *this; - }; - - template - INLINE ap_range_ref<_AP_W, _AP_S>& operator&=( - const ap_int_base<_AP_W2, _AP_S2>& op2) { - (this->d_bv).V &= op2.V; - return *this; - }; - - template - INLINE ap_range_ref<_AP_W, _AP_S>& operator^=( - const ap_range_ref<_AP_W2, _AP_S2>& op2) { - (this->d_bv).V ^= (op2.d_bv).V; - return *this; - }; - - template - INLINE ap_range_ref<_AP_W, _AP_S>& operator^=( - const ap_int_base<_AP_W2, _AP_S2>& op2) { - (this->d_bv).V ^= op2.V; - return *this; - }; - - INLINE ap_int_base<_AP_W, false> get() const { - ap_int_base<_AP_W, false> ret; - ret.V = _AP_ROOT_op_get_range(d_bv.V, l_index, h_index); - return ret; - } - - template - INLINE void set(const ap_int_base<_AP_W2, false>& val) { - d_bv.V = _AP_ROOT_op_set_range(d_bv.V, l_index, h_index, val.V); - } - - INLINE int length() const { - return h_index >= l_index ? h_index - l_index + 1 : l_index - h_index + 1; - } - - INLINE int to_int() const { - return (int)(_AP_ROOT_op_get_range(d_bv.V, l_index, h_index)); - } - - INLINE unsigned to_uint() const { - return (unsigned)(_AP_ROOT_op_get_range(d_bv.V, l_index, h_index)); - } - - INLINE long to_long() const { - return (long)(_AP_ROOT_op_get_range(d_bv.V, l_index, h_index)); - } - - INLINE unsigned long to_ulong() const { - return (unsigned long)(_AP_ROOT_op_get_range(d_bv.V, l_index, h_index)); - } - - INLINE ap_slong to_int64() const { - return (ap_slong)(_AP_ROOT_op_get_range(d_bv.V, l_index, h_index)); - } - - INLINE ap_ulong to_uint64() const { - return (ap_ulong)(_AP_ROOT_op_get_range(d_bv.V, l_index, h_index)); - } - - INLINE bool and_reduce() const { - bool ret = true; - bool reverse = l_index > h_index; - unsigned low = reverse ? h_index : l_index; - unsigned high = reverse ? l_index : h_index; - for (unsigned i = low; i != high; ++i) { -#ifdef __SYNTHESIS__ -#pragma HLS unroll -#endif - ret &= _AP_ROOT_op_get_bit(d_bv.V, i); - } - return ret; - } - - INLINE bool or_reduce() const { - bool ret = false; - bool reverse = l_index > h_index; - unsigned low = reverse ? h_index : l_index; - unsigned high = reverse ? l_index : h_index; - for (unsigned i = low; i != high; ++i) { -#ifdef __SYNTHESIS__ -#pragma HLS unroll -#endif - ret |= _AP_ROOT_op_get_bit(d_bv.V, i); - } - return ret; - } - - INLINE bool xor_reduce() const { - bool ret = false; - bool reverse = l_index > h_index; - unsigned low = reverse ? h_index : l_index; - unsigned high = reverse ? l_index : h_index; - for (unsigned i = low; i != high; ++i) { -#ifdef __SYNTHESIS__ -#pragma HLS unroll -#endif - ret ^= _AP_ROOT_op_get_bit(d_bv.V, i); - } - return ret; - } -#ifndef __SYNTHESIS__ - std::string to_string(signed char radix = 2) const { - ap_int_base<_AP_W, false> ret; - ret.V = _AP_ROOT_op_get_range(d_bv.V, l_index, h_index); - return ret.to_string(radix); - } -#else - // XXX HLS will delete this in synthesis - INLINE char* to_string(signed char radix = 2) const { - return 0; - } -#endif -}; // struct ap_range_ref - -// XXX apcc cannot handle global std::ios_base::Init() brought in by -#ifndef AP_AUTOCC -#ifndef __SYNTHESIS__ -template -INLINE std::ostream& operator<<(std::ostream& os, - const ap_range_ref<_AP_W, _AP_S>& x) { - std::ios_base::fmtflags ff = std::cout.flags(); - if (ff & std::cout.hex) { - os << x.to_string(16); // don't print sign - } else if (ff & std::cout.oct) { - os << x.to_string(8); // don't print sign - } else { - os << x.to_string(10); - } - return os; -} -#endif // ifndef __SYNTHESIS__ - -#ifndef __SYNTHESIS__ -template -INLINE std::istream& operator>>(std::istream& in, - ap_range_ref<_AP_W, _AP_S>& op) { - std::string str; - in >> str; - op = ap_int_base<_AP_W, _AP_S>(str.c_str()); - return in; -} -#endif // ifndef __SYNTHESIS__ -#endif // ifndef AP_AUTOCC - -/* Bit reference. - ---------------------------------------------------------------- -*/ -template -struct ap_bit_ref { - // struct ssdm_int or its sim model. - // TODO make it possible to reference to ap_fixed_base/ap_fixed/ap_ufixed - // and then we can retire af_bit_ref. - typedef ap_int_base<_AP_W, _AP_S> ref_type; - ref_type& d_bv; - int d_index; - - public: - // copy ctor - INLINE ap_bit_ref(const ap_bit_ref<_AP_W, _AP_S>& ref) - : d_bv(ref.d_bv), d_index(ref.d_index) {} - - INLINE ap_bit_ref(ref_type* bv, int index = 0) : d_bv(*bv), d_index(index) {} - - INLINE ap_bit_ref(const ref_type* bv, int index = 0) - : d_bv(*const_cast(bv)), d_index(index) {} - - INLINE operator bool() const { return _AP_ROOT_op_get_bit(d_bv.V, d_index); } - INLINE bool to_bool() const { return _AP_ROOT_op_get_bit(d_bv.V, d_index); } - - // assign op from hls supported C integral types. - // FIXME disabled to support sc_signal. - // NOTE this used to be unsigned long long. - //template - //INLINE typename _ap_type::enable_if<_ap_type::is_integral::value, - // ap_bit_ref&>::type - //operator=(T val) { - // d_bv.V = _AP_ROOT_op_set_bit(d_bv.V, d_index, val); - // return *this; - //} -#define ASSIGN_WITH_CTYPE(_Tp) \ - INLINE ap_bit_ref& operator=(_Tp val) { \ - d_bv.V = _AP_ROOT_op_set_bit(d_bv.V, d_index, val); \ - return *this; \ - } - - ASSIGN_WITH_CTYPE(bool) - ASSIGN_WITH_CTYPE(char) - ASSIGN_WITH_CTYPE(signed char) - ASSIGN_WITH_CTYPE(unsigned char) - ASSIGN_WITH_CTYPE(short) - ASSIGN_WITH_CTYPE(unsigned short) - ASSIGN_WITH_CTYPE(int) - ASSIGN_WITH_CTYPE(unsigned int) - ASSIGN_WITH_CTYPE(long) - ASSIGN_WITH_CTYPE(unsigned long) - ASSIGN_WITH_CTYPE(ap_slong) - ASSIGN_WITH_CTYPE(ap_ulong) - -#undef ASSIGN_WITH_CTYPE - -#define ASSIGN_WITH_CTYPE_FP(_Tp) \ - INLINE ap_bit_ref& operator=(_Tp val) { \ - bool tmp_val = val; \ - d_bv.V = _AP_ROOT_op_set_bit(d_bv.V, d_index,tmp_val); \ - return *this; \ - } - -#if _AP_ENABLE_HALF_ == 1 - ASSIGN_WITH_CTYPE_FP(half) -#endif - ASSIGN_WITH_CTYPE_FP(float) - ASSIGN_WITH_CTYPE_FP(double) - -#undef ASSIGN_WITH_CTYPE_FP - - - template - INLINE ap_bit_ref& operator=(const ap_int_base<_AP_W2, _AP_S2>& val) { - return operator=((ap_ulong)(val.V != 0)); - } - - template - INLINE ap_bit_ref& operator=(const ap_range_ref<_AP_W2, _AP_S2>& val) { - return operator=((ap_int_base<_AP_W2, false>)val); - } - - // Be explicit to prevent it from being deleted, as field d_bv - // is of reference type. - INLINE ap_bit_ref& operator=(const ap_bit_ref& val) { - return operator=((ap_ulong)(bool)val); - } - - template - INLINE ap_bit_ref& operator=(const ap_bit_ref<_AP_W2, _AP_S2>& val) { - return operator=((ap_ulong)(bool)val); - } - - template - INLINE ap_bit_ref& operator=( - const af_range_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2>& val) { - return operator=((const ap_int_base<_AP_W2, false>)val); - } - - template - INLINE ap_bit_ref& operator=( - const af_bit_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2>& val) { - return operator=((ap_ulong)(bool)val); - } - - template - INLINE ap_bit_ref& operator=( - const ap_concat_ref<_AP_W2, _AP_T3, _AP_W3, _AP_T3>& val) { - return operator=((const ap_int_base<_AP_W2 + _AP_W3, false>)val); - } - - template - INLINE ap_concat_ref<1, ap_bit_ref, _AP_W2, ap_int_base<_AP_W2, _AP_S2> > - operator,(ap_int_base<_AP_W2, _AP_S2> &a2) { - return ap_concat_ref<1, ap_bit_ref, _AP_W2, ap_int_base<_AP_W2, _AP_S2> >( - *this, a2); - } - - template - INLINE ap_concat_ref<1, ap_bit_ref, _AP_W2, ap_int_base<_AP_W2, _AP_S2> > - operator,(volatile ap_int_base<_AP_W2, _AP_S2> &a2) { - return ap_concat_ref<1, ap_bit_ref, _AP_W2, ap_int_base<_AP_W2, _AP_S2> >( - *this, const_cast&>(a2)); - } - - template - INLINE ap_concat_ref<1, ap_bit_ref, _AP_W2, ap_int_base<_AP_W2, _AP_S2> > - operator,(const ap_int_base<_AP_W2, _AP_S2> &a2) { - ap_int_base<_AP_W2, _AP_S2> op(a2); - return ap_concat_ref<1, ap_bit_ref, _AP_W2, ap_int_base<_AP_W2, _AP_S2> >( - *this, const_cast&>(op)); - } - - template - INLINE ap_concat_ref<1, ap_bit_ref, _AP_W2, ap_int_base<_AP_W2, _AP_S2> > - operator,(const volatile ap_int_base<_AP_W2, _AP_S2> &a2) { - ap_int_base<_AP_W2, _AP_S2> op(a2); - return ap_concat_ref<1, ap_bit_ref, _AP_W2, ap_int_base<_AP_W2, _AP_S2> >( - *this, const_cast&>(op)); - } - - template - INLINE ap_concat_ref<1, ap_bit_ref, _AP_W2, ap_range_ref<_AP_W2, _AP_S2> > - operator,(const ap_range_ref<_AP_W2, _AP_S2> &a2) { - return ap_concat_ref<1, ap_bit_ref, _AP_W2, ap_range_ref<_AP_W2, _AP_S2> >( - *this, const_cast&>(a2)); - } - - template - INLINE ap_concat_ref<1, ap_bit_ref, 1, ap_bit_ref<_AP_W2, _AP_S2> > operator,( - const ap_bit_ref<_AP_W2, _AP_S2> &a2) { - return ap_concat_ref<1, ap_bit_ref, 1, ap_bit_ref<_AP_W2, _AP_S2> >( - *this, const_cast&>(a2)); - } - - template - INLINE ap_concat_ref<1, ap_bit_ref, _AP_W2 + _AP_W3, - ap_concat_ref<_AP_W2, _AP_T2, _AP_W3, _AP_T3> > - operator,(const ap_concat_ref<_AP_W2, _AP_T2, _AP_W3, _AP_T3> &a2) { - return ap_concat_ref<1, ap_bit_ref, _AP_W2 + _AP_W3, - ap_concat_ref<_AP_W2, _AP_T2, _AP_W3, _AP_T3> >( - *this, const_cast&>(a2)); - } - - template - INLINE ap_concat_ref< - 1, ap_bit_ref, _AP_W2, - af_range_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2> > - operator,( - const af_range_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2> &a2) { - return ap_concat_ref< - 1, ap_bit_ref, _AP_W2, - af_range_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2> >( - *this, - const_cast< - af_range_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2>&>(a2)); - } - - template - INLINE ap_concat_ref<1, ap_bit_ref, 1, af_bit_ref<_AP_W2, _AP_I2, _AP_S2, - _AP_Q2, _AP_O2, _AP_N2> > - operator,( - const af_bit_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2> &a2) { - return ap_concat_ref<1, ap_bit_ref, 1, af_bit_ref<_AP_W2, _AP_I2, _AP_S2, - _AP_Q2, _AP_O2, _AP_N2> >( - *this, - const_cast&>( - a2)); - } - - template - INLINE bool operator==(const ap_bit_ref<_AP_W2, _AP_S2>& op) { - return get() == op.get(); - } - - template - INLINE bool operator!=(const ap_bit_ref<_AP_W2, _AP_S2>& op) { - return get() != op.get(); - } - - INLINE bool get() const { return _AP_ROOT_op_get_bit(d_bv.V, d_index); } - - INLINE bool get() { return _AP_ROOT_op_get_bit(d_bv.V, d_index); } - - template - INLINE void set(const ap_int_base<_AP_W3, false>& val) { - operator=(val); - } - - INLINE bool operator~() const { - bool bit = _AP_ROOT_op_get_bit(d_bv.V, d_index); - return bit ? false : true; - } - - INLINE int length() const { return 1; } - -#ifndef __SYNTHESIS__ - std::string to_string() const { return get() ? "1" : "0"; } -#else - // XXX HLS will delete this in synthesis - INLINE char* to_string() const { return 0; } -#endif -}; // struct ap_bit_ref - -/* ap_range_ref with int. - * ------------------------------------------------------------ - */ -// equality and relational operators. -#define REF_REL_OP_WITH_INT(REL_OP, C_TYPE, _AP_W2, _AP_S2) \ - template \ - INLINE bool operator REL_OP(const ap_range_ref<_AP_W, _AP_S>& op, \ - C_TYPE op2) { \ - return ap_int_base<_AP_W, false>(op) \ - REL_OP ap_int_base<_AP_W2, _AP_S2>(op2); \ - } \ - template \ - INLINE bool operator REL_OP(const ap_bit_ref<_AP_W, _AP_S>& op, \ - C_TYPE op2) { \ - return bool(op) REL_OP op2; \ - } \ - template \ - INLINE bool operator REL_OP(C_TYPE op2, \ - const ap_bit_ref<_AP_W, _AP_S>& op) { \ - return op2 REL_OP bool(op); \ - } \ - template \ - INLINE bool operator REL_OP( \ - const ap_concat_ref<_AP_W, _AP_T, _AP_W1, _AP_T1>& op, C_TYPE op2) { \ - return ap_int_base<_AP_W + _AP_W1, false>(op) \ - REL_OP ap_int_base<_AP_W2, _AP_S2>(op2); \ - } - -// Make the line shorter than 5000 chars -#define REF_REL_WITH_INT_1(C_TYPE, _AP_WI, _AP_SI) \ - REF_REL_OP_WITH_INT(>, C_TYPE, _AP_WI, _AP_SI) \ - REF_REL_OP_WITH_INT(<, C_TYPE, _AP_WI, _AP_SI) \ - REF_REL_OP_WITH_INT(>=, C_TYPE, _AP_WI, _AP_SI) \ - REF_REL_OP_WITH_INT(<=, C_TYPE, _AP_WI, _AP_SI) - -REF_REL_WITH_INT_1(bool, 1, false) -REF_REL_WITH_INT_1(char, 8, CHAR_IS_SIGNED) -REF_REL_WITH_INT_1(signed char, 8, true) -REF_REL_WITH_INT_1(unsigned char, 8, false) -REF_REL_WITH_INT_1(short, _AP_SIZE_short, true) -REF_REL_WITH_INT_1(unsigned short, _AP_SIZE_short, false) -REF_REL_WITH_INT_1(int, _AP_SIZE_int, true) -REF_REL_WITH_INT_1(unsigned int, _AP_SIZE_int, false) -REF_REL_WITH_INT_1(long, _AP_SIZE_long, true) -REF_REL_WITH_INT_1(unsigned long, _AP_SIZE_long, false) -REF_REL_WITH_INT_1(ap_slong, _AP_SIZE_ap_slong, true) -REF_REL_WITH_INT_1(ap_ulong, _AP_SIZE_ap_slong, false) - -// Make the line shorter than 5000 chars -#define REF_REL_WITH_INT_2(C_TYPE, _AP_WI, _AP_SI) \ - REF_REL_OP_WITH_INT(==, C_TYPE, _AP_WI, _AP_SI) \ - REF_REL_OP_WITH_INT(!=, C_TYPE, _AP_WI, _AP_SI) - -REF_REL_WITH_INT_2(bool, 1, false) -REF_REL_WITH_INT_2(char, 8, CHAR_IS_SIGNED) -REF_REL_WITH_INT_2(signed char, 8, true) -REF_REL_WITH_INT_2(unsigned char, 8, false) -REF_REL_WITH_INT_2(short, _AP_SIZE_short, true) -REF_REL_WITH_INT_2(unsigned short, _AP_SIZE_short, false) -REF_REL_WITH_INT_2(int, _AP_SIZE_int, true) -REF_REL_WITH_INT_2(unsigned int, _AP_SIZE_int, false) -REF_REL_WITH_INT_2(long, _AP_SIZE_long, true) -REF_REL_WITH_INT_2(unsigned long, _AP_SIZE_long, false) -REF_REL_WITH_INT_2(ap_slong, _AP_SIZE_ap_slong, true) -REF_REL_WITH_INT_2(ap_ulong, _AP_SIZE_ap_slong, false) - -#undef REF_REL_OP_WITH_INT -#undef REF_REL_WITH_INT_1 -#undef REF_REL_WITH_INT_2 - -#define REF_BIN_OP_WITH_INT(BIN_OP, RTYPE, C_TYPE, _AP_W2, _AP_S2) \ - template \ - INLINE typename ap_int_base<_AP_W, false>::template RType<_AP_W2, \ - _AP_S2>::RTYPE \ - operator BIN_OP(const ap_range_ref<_AP_W, _AP_S>& op, C_TYPE op2) { \ - return ap_int_base<_AP_W, false>(op) \ - BIN_OP ap_int_base<_AP_W2, _AP_S2>(op2); \ - } \ - template \ - INLINE typename ap_int_base<_AP_W2, _AP_S2>::template RType<_AP_W, \ - false>::RTYPE \ - operator BIN_OP(C_TYPE op2, const ap_range_ref<_AP_W, _AP_S>& op) { \ - return ap_int_base<_AP_W2, _AP_S2>(op2) \ - BIN_OP ap_int_base<_AP_W, false>(op); \ - } - -// arithmetic operators. -#define REF_BIN_OP_WITH_INT_ARITH(C_TYPE, _AP_W2, _AP_S2) \ - REF_BIN_OP_WITH_INT(+, plus, C_TYPE, (_AP_W2), (_AP_S2)) \ - REF_BIN_OP_WITH_INT(-, minus, C_TYPE, (_AP_W2), (_AP_S2)) \ - REF_BIN_OP_WITH_INT(*, mult, C_TYPE, (_AP_W2), (_AP_S2)) \ - REF_BIN_OP_WITH_INT(/, div, C_TYPE, (_AP_W2), (_AP_S2)) \ - REF_BIN_OP_WITH_INT(%, mod, C_TYPE, (_AP_W2), (_AP_S2)) - -REF_BIN_OP_WITH_INT_ARITH(bool, 1, false) -REF_BIN_OP_WITH_INT_ARITH(char, 8, CHAR_IS_SIGNED) -REF_BIN_OP_WITH_INT_ARITH(signed char, 8, true) -REF_BIN_OP_WITH_INT_ARITH(unsigned char, 8, false) -REF_BIN_OP_WITH_INT_ARITH(short, _AP_SIZE_short, true) -REF_BIN_OP_WITH_INT_ARITH(unsigned short, _AP_SIZE_short, false) -REF_BIN_OP_WITH_INT_ARITH(int, _AP_SIZE_int, true) -REF_BIN_OP_WITH_INT_ARITH(unsigned int, _AP_SIZE_int, false) -REF_BIN_OP_WITH_INT_ARITH(long, _AP_SIZE_long, true) -REF_BIN_OP_WITH_INT_ARITH(unsigned long, _AP_SIZE_long, false) -REF_BIN_OP_WITH_INT_ARITH(ap_slong, _AP_SIZE_ap_slong, true) -REF_BIN_OP_WITH_INT_ARITH(ap_ulong, _AP_SIZE_ap_slong, false) - -#undef REF_BIN_OP_WITH_INT_ARITH - -// bitwise and shift operators -#define REF_BIN_OP_WITH_INT_BITS(C_TYPE, _AP_W2, _AP_S2) \ - REF_BIN_OP_WITH_INT(&, logic, C_TYPE, (_AP_W2), (_AP_S2)) \ - REF_BIN_OP_WITH_INT(|, logic, C_TYPE, (_AP_W2), (_AP_S2)) \ - REF_BIN_OP_WITH_INT(^, logic, C_TYPE, (_AP_W2), (_AP_S2)) \ - REF_BIN_OP_WITH_INT(>>, arg1, C_TYPE, (_AP_W2), (_AP_S2)) \ - REF_BIN_OP_WITH_INT(<<, arg1, C_TYPE, (_AP_W2), (_AP_S2)) - -REF_BIN_OP_WITH_INT_BITS(bool, 1, false) -REF_BIN_OP_WITH_INT_BITS(char, 8, CHAR_IS_SIGNED) -REF_BIN_OP_WITH_INT_BITS(signed char, 8, true) -REF_BIN_OP_WITH_INT_BITS(unsigned char, 8, false) -REF_BIN_OP_WITH_INT_BITS(short, _AP_SIZE_short, true) -REF_BIN_OP_WITH_INT_BITS(unsigned short, _AP_SIZE_short, false) -REF_BIN_OP_WITH_INT_BITS(int, _AP_SIZE_int, true) -REF_BIN_OP_WITH_INT_BITS(unsigned int, _AP_SIZE_int, false) -REF_BIN_OP_WITH_INT_BITS(long, _AP_SIZE_long, true) -REF_BIN_OP_WITH_INT_BITS(unsigned long, _AP_SIZE_long, false) -REF_BIN_OP_WITH_INT_BITS(ap_slong, _AP_SIZE_ap_slong, true) -REF_BIN_OP_WITH_INT_BITS(ap_ulong, _AP_SIZE_ap_slong, false) - -#undef REF_BIN_OP_WITH_INT_BITS - -/* ap_range_ref with ap_range_ref - * ------------------------------------------------------------ - */ -#define REF_BIN_OP(BIN_OP, RTYPE) \ - template \ - INLINE \ - typename ap_int_base<_AP_W, false>::template RType<_AP_W2, false>::RTYPE \ - operator BIN_OP(const ap_range_ref<_AP_W, _AP_S>& lhs, \ - const ap_range_ref<_AP_W2, _AP_S2>& rhs) { \ - return (lhs.operator ap_int_base<_AP_W, false>())BIN_OP( \ - rhs.operator ap_int_base<_AP_W2, false>()); \ - } - -REF_BIN_OP(+, plus) -REF_BIN_OP(-, minus) -REF_BIN_OP(*, mult) -REF_BIN_OP(/, div) -REF_BIN_OP(%, mod) -REF_BIN_OP(&, logic) -REF_BIN_OP(|, logic) -REF_BIN_OP(^, logic) -REF_BIN_OP(>>, arg1) -REF_BIN_OP(<<, arg1) - -/* ap_concat_ref with ap_concat_ref. - * ------------------------------------------------------------ - */ - -//************************************************************************ -// Implement -// ap_int_base = ap_concat_ref OP ap_concat_ref -// for operators +, -, *, /, %, >>, <<, &, |, ^ -// Without these operators the operands are converted to int64 and -// larger results lose informations (higher order bits). -// -// operand OP -// / | -// left-concat right-concat -// / | / | -// -// -// _AP_LW1, _AP_LT1 (width and type of left-concat's left side) -// _AP_LW2, _AP_LT2 (width and type of left-concat's right side) -// Similarly for RHS of operand OP: _AP_RW1, AP_RW2, _AP_RT1, _AP_RT2 -// -// In Verilog 2001 result of concatenation is always unsigned even -// when both sides are signed. -//************************************************************************ - -#undef SYN_CONCAT_REF_BIN_OP - -#define SYN_CONCAT_REF_BIN_OP(BIN_OP, RTYPE) \ - template \ - INLINE typename ap_int_base<_AP_LW1 + _AP_LW2, false>::template RType< \ - _AP_RW1 + _AP_RW2, false>::RTYPE \ - operator BIN_OP( \ - const ap_concat_ref<_AP_LW1, _AP_LT1, _AP_LW2, _AP_LT2>& lhs, \ - const ap_concat_ref<_AP_RW1, _AP_RT1, _AP_RW2, _AP_RT2>& rhs) { \ - return lhs.get() BIN_OP rhs.get(); \ - } - -SYN_CONCAT_REF_BIN_OP(+, plus) -SYN_CONCAT_REF_BIN_OP(-, minus) -SYN_CONCAT_REF_BIN_OP(*, mult) -SYN_CONCAT_REF_BIN_OP(/, div) -SYN_CONCAT_REF_BIN_OP(%, mod) -SYN_CONCAT_REF_BIN_OP(&, logic) -SYN_CONCAT_REF_BIN_OP(|, logic) -SYN_CONCAT_REF_BIN_OP(^, logic) -SYN_CONCAT_REF_BIN_OP(>>, arg1) -SYN_CONCAT_REF_BIN_OP(<<, arg1) - -#undef SYN_CONCAT_REF_BIN_OP - -#define CONCAT_OP_WITH_INT(C_TYPE, _AP_WI, _AP_SI) \ - template \ - INLINE ap_int_base<_AP_W + _AP_WI, false> operator,( \ - const ap_int_base<_AP_W, _AP_S> &op1, C_TYPE op2) { \ - ap_int_base<_AP_WI + _AP_W, false> val(op2); \ - ap_int_base<_AP_WI + _AP_W, false> ret(op1); \ - ret <<= _AP_WI; \ - if (_AP_SI) { \ - val <<= _AP_W; \ - val >>= _AP_W; \ - } \ - ret |= val; \ - return ret; \ - } \ - template \ - INLINE ap_int_base<_AP_W + _AP_WI, false> operator,( \ - C_TYPE op1, const ap_int_base<_AP_W, _AP_S> &op2) { \ - ap_int_base<_AP_WI + _AP_W, false> val(op1); \ - ap_int_base<_AP_WI + _AP_W, false> ret(op2); \ - if (_AP_S) { \ - ret <<= _AP_WI; \ - ret >>= _AP_WI; \ - } \ - ret |= val << _AP_W; \ - return ret; \ - } \ - template \ - INLINE ap_int_base<_AP_W + _AP_WI, false> operator,( \ - const ap_range_ref<_AP_W, _AP_S> &op1, C_TYPE op2) { \ - ap_int_base<_AP_WI + _AP_W, false> val(op2); \ - ap_int_base<_AP_WI + _AP_W, false> ret(op1); \ - ret <<= _AP_WI; \ - if (_AP_SI) { \ - val <<= _AP_W; \ - val >>= _AP_W; \ - } \ - ret |= val; \ - return ret; \ - } \ - template \ - INLINE ap_int_base<_AP_W + _AP_WI, false> operator,( \ - C_TYPE op1, const ap_range_ref<_AP_W, _AP_S> &op2) { \ - ap_int_base<_AP_WI + _AP_W, false> val(op1); \ - ap_int_base<_AP_WI + _AP_W, false> ret(op2); \ - int len = op2.length(); \ - val <<= len; \ - ret |= val; \ - return ret; \ - } \ - template \ - INLINE ap_int_base<_AP_WI + 1, false> operator,( \ - const ap_bit_ref<_AP_W, _AP_S> &op1, C_TYPE op2) { \ - ap_int_base<_AP_WI + 1, false> val(op2); \ - val[_AP_WI] = op1; \ - return val; \ - } \ - template \ - INLINE ap_int_base<_AP_WI + 1, false> operator,( \ - C_TYPE op1, const ap_bit_ref<_AP_W, _AP_S> &op2) { \ - ap_int_base<_AP_WI + 1, false> val(op1); \ - val <<= 1; \ - val[0] = op2; \ - return val; \ - } \ - template \ - INLINE ap_int_base<_AP_W + _AP_W2 + _AP_WI, false> operator,( \ - const ap_concat_ref<_AP_W, _AP_T, _AP_W2, _AP_T2> &op1, C_TYPE op2) { \ - ap_int_base<_AP_WI + _AP_W + _AP_W2, _AP_SI> val(op2); \ - ap_int_base<_AP_WI + _AP_W + _AP_W2, _AP_SI> ret(op1); \ - if (_AP_SI) { \ - val <<= _AP_W + _AP_W2; \ - val >>= _AP_W + _AP_W2; \ - } \ - ret <<= _AP_WI; \ - ret |= val; \ - return ret; \ - } \ - template \ - INLINE ap_int_base<_AP_W + _AP_W2 + _AP_WI, false> operator,( \ - C_TYPE op1, const ap_concat_ref<_AP_W, _AP_T, _AP_W2, _AP_T2> &op2) { \ - ap_int_base<_AP_WI + _AP_W + _AP_W2, _AP_SI> val(op1); \ - ap_int_base<_AP_WI + _AP_W + _AP_W2, _AP_SI> ret(op2); \ - int len = op2.length(); \ - val <<= len; \ - ret |= val; \ - return ret; \ - } \ - template \ - INLINE ap_int_base<_AP_W + _AP_WI, false> operator,( \ - const af_range_ref<_AP_W, _AP_I, _AP_S, _AP_Q, _AP_O, _AP_N> &op1, \ - C_TYPE op2) { \ - ap_int_base<_AP_WI + _AP_W, false> val(op2); \ - ap_int_base<_AP_WI + _AP_W, false> ret(op1); \ - if (_AP_SI) { \ - val <<= _AP_W; \ - val >>= _AP_W; \ - } \ - ret <<= _AP_WI; \ - ret |= val; \ - return ret; \ - } \ - template \ - INLINE ap_int_base<_AP_W + _AP_WI, false> operator,( \ - C_TYPE op1, \ - const af_range_ref<_AP_W, _AP_I, _AP_S, _AP_Q, _AP_O, _AP_N> &op2) { \ - ap_int_base<_AP_WI + _AP_W, false> val(op1); \ - ap_int_base<_AP_WI + _AP_W, false> ret(op2); \ - int len = op2.length(); \ - val <<= len; \ - ret |= val; \ - return ret; \ - } \ - template \ - INLINE ap_int_base<1 + _AP_WI, false> operator,( \ - const af_bit_ref<_AP_W, _AP_I, _AP_S, _AP_Q, _AP_O, _AP_N> &op1, \ - C_TYPE op2) { \ - ap_int_base<_AP_WI + 1, _AP_SI> val(op2); \ - val[_AP_WI] = op1; \ - return val; \ - } \ - template \ - INLINE ap_int_base<1 + _AP_WI, false> operator,( \ - C_TYPE op1, \ - const af_bit_ref<_AP_W, _AP_I, _AP_S, _AP_Q, _AP_O, _AP_N> &op2) { \ - ap_int_base<_AP_WI + 1, _AP_SI> val(op1); \ - val <<= 1; \ - val[0] = op2; \ - return val; \ - } - -CONCAT_OP_WITH_INT(bool, 1, false) -CONCAT_OP_WITH_INT(char, 8, CHAR_IS_SIGNED) -CONCAT_OP_WITH_INT(signed char, 8, true) -CONCAT_OP_WITH_INT(unsigned char, 8, false) -CONCAT_OP_WITH_INT(short, _AP_SIZE_short, true) -CONCAT_OP_WITH_INT(unsigned short, _AP_SIZE_short, false) -CONCAT_OP_WITH_INT(int, _AP_SIZE_int, true) -CONCAT_OP_WITH_INT(unsigned int, _AP_SIZE_int, false) -CONCAT_OP_WITH_INT(long, _AP_SIZE_long, true) -CONCAT_OP_WITH_INT(unsigned long, _AP_SIZE_long, false) -CONCAT_OP_WITH_INT(ap_slong, _AP_SIZE_ap_slong, true) -CONCAT_OP_WITH_INT(ap_ulong, _AP_SIZE_ap_slong, false) - -#undef CONCAT_OP_WITH_INT - -#define CONCAT_SHIFT_WITH_INT(C_TYPE, OP) \ - template \ - INLINE ap_uint<_AP_W + _AP_W1> operator OP( \ - const ap_concat_ref<_AP_W, _AP_T, _AP_W1, _AP_T1> lhs, C_TYPE rhs) { \ - return ap_uint<_AP_W + _AP_W1>(lhs).get() OP int(rhs); \ - } - -// FIXME int(rhs) may loose precision. - -CONCAT_SHIFT_WITH_INT(int, <<) -CONCAT_SHIFT_WITH_INT(unsigned int, <<) -CONCAT_SHIFT_WITH_INT(long, <<) -CONCAT_SHIFT_WITH_INT(unsigned long, <<) -CONCAT_SHIFT_WITH_INT(ap_slong, <<) -CONCAT_SHIFT_WITH_INT(ap_ulong, <<) - -CONCAT_SHIFT_WITH_INT(int, >>) -CONCAT_SHIFT_WITH_INT(unsigned int, >>) -CONCAT_SHIFT_WITH_INT(long, >>) -CONCAT_SHIFT_WITH_INT(unsigned long, >>) -CONCAT_SHIFT_WITH_INT(ap_slong, >>) -CONCAT_SHIFT_WITH_INT(ap_ulong, >>) - -#endif // ifndef __cplusplus -#endif // ifndef __AP_INT_REF_H__ - -// -*- cpp -*- diff --git a/TrigScint/include/TrigScint/ap_int_special.h b/TrigScint/include/TrigScint/ap_int_special.h deleted file mode 100644 index 3afc6192b..000000000 --- a/TrigScint/include/TrigScint/ap_int_special.h +++ /dev/null @@ -1,223 +0,0 @@ -/* - * Copyright 2011-2019 Xilinx, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef __AP_INT_SPECIAL_H__ -#define __AP_INT_SPECIAL_H__ - -#ifndef __AP_INT_H__ -#error "Only ap_fixed.h and ap_int.h can be included directly in user code." -#endif - -#ifndef __SYNTHESIS__ -#include -#include -#endif -// FIXME AP_AUTOCC cannot handle many standard headers, so declare instead of -// include. -// #include -namespace std { -template class complex; -} - -/* - TODO: Modernize the code using C++11/C++14 - 1. constexpr http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2016/p0415r0.html - 2. move constructor -*/ - -namespace std { -/* - Specialize std::complex to zero initialization ap_int. - - To reduce the area cost, ap_int is not zero initialized, just like basic - types float or double. However, libstdc++ provides specialization for float, - double and long double, initializing image part to 0 when not specified. - - This has become a difficulty in switching legacy code from these C types to - ap_int. To ease the tranform of legacy code, we have to implement - specialization of std::complex<> for our type. - - As ap_int is a template, it is impossible to specialize only the methods - that causes default initialization of value type in std::complex<>. An - explicit full specialization of the template class has to be done, covering - all the member functions and operators of std::complex<> as specified - in standard 26.2.4 and 26.2.5. -*/ -template -class complex > { - public: - typedef ap_int<_AP_W> _Tp; - typedef _Tp value_type; - - // 26.2.4/1 - // Constructor without argument - // Default initialize, so that in dataflow, the variable is only written once. - complex() : _M_real(_Tp()), _M_imag(_Tp()) {} - // Constructor with ap_int. - // Zero initialize image part when not specified, so that `C(1) == C(1,0)` - complex(const _Tp &__r, const _Tp &__i = _Tp(0)) - : _M_real(__r), _M_imag(__i) {} - - // Constructor with another complex number - template - complex(const complex<_Up> &__z) : _M_real(__z.real()), _M_imag(__z.imag()) {} - -#if __cplusplus >= 201103L - const _Tp& real() const { return _M_real; } - const _Tp& imag() const { return _M_imag; } -#else - _Tp& real() { return _M_real; } - const _Tp& real() const { return _M_real; } - _Tp& imag() { return _M_imag; } - const _Tp& imag() const { return _M_imag; } -#endif - - void real(_Tp __val) { _M_real = __val; } - - void imag(_Tp __val) { _M_imag = __val; } - - // Assign this complex number with ap_int. - // Zero initialize image poarrt, so that `C c; c = 1; c == C(1,0);` - complex<_Tp> &operator=(const _Tp __t) { - _M_real = __t; - _M_imag = _Tp(0); - return *this; - } - - // 26.2.5/1 - // Add ap_int to this complex number. - complex<_Tp> &operator+=(const _Tp &__t) { - _M_real += __t; - return *this; - } - - // 26.2.5/3 - // Subtract ap_int from this complex number. - complex<_Tp> &operator-=(const _Tp &__t) { - _M_real -= __t; - return *this; - } - - // 26.2.5/5 - // Multiply this complex number by ap_int. - complex<_Tp> &operator*=(const _Tp &__t) { - _M_real *= __t; - _M_imag *= __t; - return *this; - } - - // 26.2.5/7 - // Divide this complex number by ap_int. - complex<_Tp> &operator/=(const _Tp &__t) { - _M_real /= __t; - _M_imag /= __t; - return *this; - } - - // Assign complex number to this complex number. - template - complex<_Tp> &operator=(const complex<_Up> &__z) { - _M_real = __z.real(); - _M_imag = __z.imag(); - return *this; - } - - // 26.2.5/9 - // Add complex number to this. - template - complex<_Tp> &operator+=(const complex<_Up> &__z) { - _M_real += __z.real(); - _M_imag += __z.imag(); - return *this; - } - - // 26.2.5/11 - // Subtract complex number from this. - template - complex<_Tp> &operator-=(const complex<_Up> &__z) { - _M_real -= __z.real(); - _M_imag -= __z.imag(); - return *this; - } - - // 26.2.5/13 - // Multiply this by complex number. - template - complex<_Tp> &operator*=(const complex<_Up> &__z) { - const _Tp __r = _M_real * __z.real() - _M_imag * __z.imag(); - _M_imag = _M_real * __z.imag() + _M_imag * __z.real(); - _M_real = __r; - return *this; - } - - // 26.2.5/15 - // Divide this by complex number. - template - complex<_Tp> &operator/=(const complex<_Up> &__z) { - complex<_Tp> cj (__z.real(), -__z.imag()); - complex<_Tp> a = (*this) * cj; - complex<_Tp> b = cj * __z; - _M_real = a.real() / b.real(); - _M_imag = a.imag() / b.real(); - return *this; - } - - private: - _Tp _M_real; - _Tp _M_imag; - -}; // class complex > - - -/* - Non-member operations - These operations are not required by standard in 26.2.6, but libstdc++ - defines them for - float, double or long double's specialization. -*/ -// Compare complex number with ap_int. -template -inline bool operator==(const complex > &__x, const ap_int<_AP_W> &__y) { - return __x.real() == __y && - __x.imag() == 0; -} - -// Compare ap_int with complex number. -template -inline bool operator==(const ap_int<_AP_W> &__x, const complex > &__y) { - return __x == __y.real() && - 0 == __y.imag(); -} - -// Compare complex number with ap_int. -template -inline bool operator!=(const complex > &__x, const ap_int<_AP_W> &__y) { - return __x.real() != __y || - __x.imag() != 0; -} - -// Compare ap_int with complex number. -template -inline bool operator!=(const ap_int<_AP_W> &__x, const complex > &__y) { - return __x != __y.real() || - 0 != __y.imag(); -} - -} // namespace std - -#endif // ifndef __AP_INT_SPECIAL_H__ - -// -*- cpp -*- diff --git a/TrigScint/include/TrigScint/objdef.h b/TrigScint/include/TrigScint/objdef.h index 9b3b7ac74..230cf1125 100755 --- a/TrigScint/include/TrigScint/objdef.h +++ b/TrigScint/include/TrigScint/objdef.h @@ -1,7 +1,7 @@ #ifndef OBJDEF_H -#include "TrigScint/ap_int.h" #define OBJDEF_H +#include "../../../Trigger/HLS_arbitrary_Precision_Types/include/ap_int.h" #define NTIMES 6 #define NHITS 25 #define NCLUS 25 From 896dd63d3e21489af247c874a435404892229537 Mon Sep 17 00:00:00 2001 From: rodwyer100 Date: Mon, 16 Sep 2024 23:47:39 -0700 Subject: [PATCH 09/19] Commiting removal of hard coded channel numbers --- .../TrigScint/TrigScintFirmwareTracker.cxx | 20 +++++++++---------- TrigScint/src/TrigScint/trackproducer_hw.cxx | 2 +- 2 files changed, 11 insertions(+), 11 deletions(-) diff --git a/TrigScint/src/TrigScint/TrigScintFirmwareTracker.cxx b/TrigScint/src/TrigScint/TrigScintFirmwareTracker.cxx index 91814b514..63229c0dd 100644 --- a/TrigScint/src/TrigScint/TrigScintFirmwareTracker.cxx +++ b/TrigScint/src/TrigScint/TrigScintFirmwareTracker.cxx @@ -96,14 +96,14 @@ void TrigScintFirmwareTracker::produce(framework::Event &event) { const auto digis2_{ event.getCollection(digis3_collection_, passName_)}; - int occupied[50]; - for(int i = 0; i<50;i++){ + int occupied[NCHAN]; + for(int i = 0; i - minThr_)and(digi.getBarID()<=50)and(digi.getBarID()>=0)){ + minThr_)and(digi.getBarID()<=NCHAN)and(digi.getBarID()>=0)){ ap_int<12> bID = (ap_int<12>)(digi.getBarID()); ap_int<12> Amp = (ap_int<12>)(digi.getPE()); int index=count; @@ -125,13 +125,13 @@ void TrigScintFirmwareTracker::produce(framework::Event &event) { } } - for(int i = 0; i<50;i++){ + for(int i = 0; i - minThr_)and(digi.getBarID()<=50)and(digi.getBarID()>=0)){ + minThr_)and(digi.getBarID()<=NCHAN)and(digi.getBarID()>=0)){ ap_int<12> bID = (ap_int<12>)(digi.getBarID()); ap_int<12> Amp = (ap_int<12>)(digi.getPE()); int index=count; @@ -152,13 +152,13 @@ void TrigScintFirmwareTracker::produce(framework::Event &event) { } } } - for(int i = 0; i<50;i++){ + for(int i = 0; i - minThr_)and(digi.getBarID()<=50)and(digi.getBarID()>=0)){ + minThr_)and(digi.getBarID()<=NCHAN)and(digi.getBarID()>=0)){ ap_int<12> bID = (ap_int<12>)(digi.getBarID()); ap_int<12> Amp = (ap_int<12>)(digi.getPE()); int index=count; @@ -185,7 +185,7 @@ void TrigScintFirmwareTracker::produce(framework::Event &event) { Cluster* Point1=clusterproducer_sw(HPad1); int topSeed=0; for(int i = 0; i30)and(Point1[i].Seed.bID<51)and(Point1[i].Seed.bID>=0)and(Point1[i].Sec.Amp<450)and(counterN30)and(Point1[i].Seed.bID<(NCHAN+1))and(Point1[i].Seed.bID>=0)and(Point1[i].Sec.Amp<450)and(counterN=topSeed){ cpyHit(Pad1[counterN].Seed,Point1[i].Seed);cpyHit(Pad1[counterN].Sec,Point1[i].Sec); calcCent(Pad1[counterN]); @@ -197,7 +197,7 @@ void TrigScintFirmwareTracker::produce(framework::Event &event) { Cluster* Point2=clusterproducer_sw(HPad2); topSeed=0; for(int i = 0; i30)and(Point2[i].Seed.bID<51)and(Point2[i].Seed.bID>=0)and(Point2[i].Sec.Amp<450)){ + if((Point2[i].Seed.Amp<450)and(Point2[i].Seed.Amp>30)and(Point2[i].Seed.bID<(NCHAN+1))and(Point2[i].Seed.bID>=0)and(Point2[i].Sec.Amp<450)){ if(Point2[i].Seed.bID>=topSeed){ cpyHit(Pad2[i].Seed,Point2[i].Seed);cpyHit(Pad2[i].Sec,Point2[i].Sec); calcCent(Pad2[i]); @@ -208,7 +208,7 @@ void TrigScintFirmwareTracker::produce(framework::Event &event) { Cluster* Point3=clusterproducer_sw(HPad3); topSeed=0; for(int i = 0; i30)and(Point3[i].Seed.bID<51)and(Point3[i].Seed.bID>=0)and(Point3[i].Sec.Amp<450)){ + if((Point3[i].Seed.Amp<450)and(Point3[i].Seed.Amp>30)and(Point3[i].Seed.bID<(NCHAN+1))and(Point3[i].Seed.bID>=0)and(Point3[i].Sec.Amp<450)){ if(Point3[i].Seed.bID>=topSeed){ cpyHit(Pad3[i].Seed,Point3[i].Seed);cpyHit(Pad3[i].Sec,Point3[i].Sec); calcCent(Pad3[i]); diff --git a/TrigScint/src/TrigScint/trackproducer_hw.cxx b/TrigScint/src/TrigScint/trackproducer_hw.cxx index 39de952c7..7cf0ef66d 100755 --- a/TrigScint/src/TrigScint/trackproducer_hw.cxx +++ b/TrigScint/src/TrigScint/trackproducer_hw.cxx @@ -15,7 +15,7 @@ void trackproducer_hw(Cluster Pad1[NTRK],Cluster Pad2[NCLUS],Cluster Pad3[NCLUS] #pragma HLS ARRAY_PARTITION variable=test complete for(int i = 0;i100){continue;} + if(2*Pad1[i].Seed.bID>2*NCHAN){continue;} for(int I = 0;I0)){continue;}//Continue if Seed not Satisfied From 42ed26b094cd2a96a6743ff0fbf482f38e0f5ade Mon Sep 17 00:00:00 2001 From: rodwyer100 Date: Tue, 17 Sep 2024 08:40:15 -0700 Subject: [PATCH 10/19] Forgot to add some changes that we unstagged w.r.t. the comments and CMakeFix. I haven't included all of the class name changes (capitalizations) I may eventually include --- TrigScint/CMakeLists.txt | 2 +- TrigScint/include/TrigScint/etc/ap_private.h | 7199 ----------------- TrigScint/include/TrigScint/objdef.h | 2 +- .../TrigScint/TrigScintFirmwareTracker.cxx | 45 +- .../src/TrigScint/clusterproducer_sw.cxx | 4 + TrigScint/src/TrigScint/trackproducer_hw.cxx | 15 +- 6 files changed, 52 insertions(+), 7215 deletions(-) delete mode 100644 TrigScint/include/TrigScint/etc/ap_private.h diff --git a/TrigScint/CMakeLists.txt b/TrigScint/CMakeLists.txt index 2d1a84735..33c12f323 100644 --- a/TrigScint/CMakeLists.txt +++ b/TrigScint/CMakeLists.txt @@ -44,6 +44,6 @@ setup_library(module TrigScint dependencies Framework::Framework Recon::Event DetDescr::DetDescr Tools::Tools SimCore::Event ) - +target_include_directories(TrigScint PUBLIC ../Trigger/HLS_arbitrary_Precision_Types/include) setup_python(package_name LDMX/TrigScint) diff --git a/TrigScint/include/TrigScint/etc/ap_private.h b/TrigScint/include/TrigScint/etc/ap_private.h deleted file mode 100644 index 0c29a0ac1..000000000 --- a/TrigScint/include/TrigScint/etc/ap_private.h +++ /dev/null @@ -1,7199 +0,0 @@ -/* - * Copyright 2011-2019 Xilinx, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef __AP_PRIVATE_H__ -#define __AP_PRIVATE_H__ - -// common macros and type declarations are now defined in ap_common.h, and -// ap_private becomes part of it. -#ifndef __AP_COMMON_H__ -#error "etc/ap_private.h cannot be included directly." -#endif - -// forward declarations -//template -//class ap_private; // moved to ap_common.h -template -struct _private_range_ref; -template -struct _private_bit_ref; - -// TODO clean up this part. -#ifndef LLVM_SUPPORT_MATHEXTRAS_H -#define LLVM_SUPPORT_MATHEXTRAS_H - -#ifdef _MSC_VER -#if _MSC_VER <= 1500 -typedef __int8 int8_t; -typedef unsigned __int8 uint8_t; -typedef __int16 int16_t; -typedef unsigned __int16 uint16_t; -typedef __int32 int32_t; -typedef unsigned __int32 uint32_t; -typedef __int64 int64_t; -typedef unsigned __int64 uint64_t; -#else -#include -#endif -#else -#include -#endif - -#ifndef INLINE -#define INLINE inline -// Enable to debug ap_int/ap_fixed -// #define INLINE __attribute__((weak)) -#endif - -// NOTE: The following support functions use the _32/_64 extensions instead of -// type overloading so that signed and unsigned integers can be used without -// ambiguity. -namespace AESL_std { -template -DataType INLINE min(DataType a, DataType b) { - return (a >= b) ? b : a; -} - -template -DataType INLINE max(DataType a, DataType b) { - return (a >= b) ? a : b; -} -} // namespace AESL_std - -// TODO clean up included headers. -#include -#include -#include -#include -#include -#include -#include -#include -#include - -namespace ap_private_ops { -/// Hi_32 - This function returns the high 32 bits of a 64 bit value. -static INLINE uint32_t Hi_32(uint64_t Value) { - return static_cast(Value >> 32); -} - -/// Lo_32 - This function returns the low 32 bits of a 64 bit value. -static INLINE uint32_t Lo_32(uint64_t Value) { - return static_cast(Value); -} - -template -INLINE bool isNegative(const ap_private<_AP_W, false>& a) { - return false; -} - -template -INLINE bool isNegative(const ap_private<_AP_W, true>& a) { - enum { - APINT_BITS_PER_WORD = 64, - _AP_N = (_AP_W + APINT_BITS_PER_WORD - 1) / APINT_BITS_PER_WORD - }; - static const uint64_t sign_mask = 1ULL << ((_AP_W - 1) % APINT_BITS_PER_WORD); - return (sign_mask & a.get_pVal(_AP_N - 1)) != 0; -} - -/// CountLeadingZeros_32 - this function performs the platform optimal form of -/// counting the number of zeros from the most significant bit to the first one -/// bit. Ex. CountLeadingZeros_32(0x00F000FF) == 8. -/// Returns 32 if the word is zero. -static INLINE unsigned CountLeadingZeros_32(uint32_t Value) { - unsigned Count; // result -#if __GNUC__ >= 4 -// PowerPC is defined for __builtin_clz(0) -#if !defined(__ppc__) && !defined(__ppc64__) - if (Value == 0) return 32; -#endif - Count = __builtin_clz(Value); -#else - if (Value == 0) return 32; - Count = 0; - // bisecton method for count leading zeros - for (unsigned Shift = 32 >> 1; Shift; Shift >>= 1) { - uint32_t Tmp = (Value) >> (Shift); - if (Tmp) { - Value = Tmp; - } else { - Count |= Shift; - } - } -#endif - return Count; -} - -/// CountLeadingZeros_64 - This function performs the platform optimal form -/// of counting the number of zeros from the most significant bit to the first -/// one bit (64 bit edition.) -/// Returns 64 if the word is zero. -static INLINE unsigned CountLeadingZeros_64(uint64_t Value) { - unsigned Count; // result -#if __GNUC__ >= 4 -// PowerPC is defined for __builtin_clzll(0) -#if !defined(__ppc__) && !defined(__ppc64__) - if (!Value) return 64; -#endif - Count = __builtin_clzll(Value); -#else - if (sizeof(long) == sizeof(int64_t)) { - if (!Value) return 64; - Count = 0; - // bisecton method for count leading zeros - for (unsigned Shift = 64 >> 1; Shift; Shift >>= 1) { - uint64_t Tmp = (Value) >> (Shift); - if (Tmp) { - Value = Tmp; - } else { - Count |= Shift; - } - } - } else { - // get hi portion - uint32_t Hi = Hi_32(Value); - - // if some bits in hi portion - if (Hi) { - // leading zeros in hi portion plus all bits in lo portion - Count = CountLeadingZeros_32(Hi); - } else { - // get lo portion - uint32_t Lo = Lo_32(Value); - // same as 32 bit value - Count = CountLeadingZeros_32(Lo) + 32; - } - } -#endif - return Count; -} - -/// CountTrailingZeros_64 - This function performs the platform optimal form -/// of counting the number of zeros from the least significant bit to the first -/// one bit (64 bit edition.) -/// Returns 64 if the word is zero. -static INLINE unsigned CountTrailingZeros_64(uint64_t Value) { -#if __GNUC__ >= 4 - return (Value != 0) ? __builtin_ctzll(Value) : 64; -#else - static const unsigned Mod67Position[] = { - 64, 0, 1, 39, 2, 15, 40, 23, 3, 12, 16, 59, 41, 19, 24, 54, 4, - 64, 13, 10, 17, 62, 60, 28, 42, 30, 20, 51, 25, 44, 55, 47, 5, 32, - 65, 38, 14, 22, 11, 58, 18, 53, 63, 9, 61, 27, 29, 50, 43, 46, 31, - 37, 21, 57, 52, 8, 26, 49, 45, 36, 56, 7, 48, 35, 6, 34, 33, 0}; - return Mod67Position[(uint64_t)(-(int64_t)Value & (int64_t)Value) % 67]; -#endif -} - -/// CountPopulation_64 - this function counts the number of set bits in a value, -/// (64 bit edition.) -static INLINE unsigned CountPopulation_64(uint64_t Value) { -#if __GNUC__ >= 4 - return __builtin_popcountll(Value); -#else - uint64_t v = Value - (((Value) >> 1) & 0x5555555555555555ULL); - v = (v & 0x3333333333333333ULL) + (((v) >> 2) & 0x3333333333333333ULL); - v = (v + ((v) >> 4)) & 0x0F0F0F0F0F0F0F0FULL; - return unsigned((uint64_t)(v * 0x0101010101010101ULL) >> 56); -#endif -} - -static INLINE uint32_t countLeadingOnes_64(uint64_t __V, uint32_t skip) { - uint32_t Count = 0; - if (skip) (__V) <<= (skip); - while (__V && (__V & (1ULL << 63))) { - Count++; - (__V) <<= 1; - } - return Count; -} - -static INLINE std::string oct2Bin(char oct) { - switch (oct) { - case '\0': { - return ""; - } - case '.': { - return "."; - } - case '0': { - return "000"; - } - case '1': { - return "001"; - } - case '2': { - return "010"; - } - case '3': { - return "011"; - } - case '4': { - return "100"; - } - case '5': { - return "101"; - } - case '6': { - return "110"; - } - case '7': { - return "111"; - } - } - assert(0 && "Invalid character in digit string"); - return ""; -} - -static INLINE std::string hex2Bin(char hex) { - switch (hex) { - case '\0': { - return ""; - } - case '.': { - return "."; - } - case '0': { - return "0000"; - } - case '1': { - return "0001"; - } - case '2': { - return "0010"; - } - case '3': { - return "0011"; - } - case '4': { - return "0100"; - } - case '5': { - return "0101"; - } - case '6': { - return "0110"; - } - case '7': { - return "0111"; - } - case '8': { - return "1000"; - } - case '9': { - return "1001"; - } - case 'A': - case 'a': { - return "1010"; - } - case 'B': - case 'b': { - return "1011"; - } - case 'C': - case 'c': { - return "1100"; - } - case 'D': - case 'd': { - return "1101"; - } - case 'E': - case 'e': { - return "1110"; - } - case 'F': - case 'f': { - return "1111"; - } - } - assert(0 && "Invalid character in digit string"); - return ""; -} - -static INLINE uint32_t decode_digit(char cdigit, int radix) { - uint32_t digit = 0; - if (radix == 16) { -#define isxdigit(c) \ - (((c) >= '0' && (c) <= '9') || ((c) >= 'a' && (c) <= 'f') || \ - ((c) >= 'A' && (c) <= 'F')) -#define isdigit(c) ((c) >= '0' && (c) <= '9') - if (!isxdigit(cdigit)) assert(0 && "Invalid hex digit in string"); - if (isdigit(cdigit)) - digit = cdigit - '0'; - else if (cdigit >= 'a') - digit = cdigit - 'a' + 10; - else if (cdigit >= 'A') - digit = cdigit - 'A' + 10; - else - assert(0 && "huh? we shouldn't get here"); - } else if (isdigit(cdigit)) { - digit = cdigit - '0'; - } else { - assert(0 && "Invalid character in digit string"); - } -#undef isxdigit -#undef isdigit - return digit; -} - -// Determine the radix of "val". -static INLINE std::string parseString(const std::string& input, unsigned char& radix) { - size_t len = input.length(); - if (len == 0) { - if (radix == 0) radix = 10; - return input; - } - - size_t startPos = 0; - // Trim whitespace - while (input[startPos] == ' ' && startPos < len) startPos++; - while (input[len - 1] == ' ' && startPos < len) len--; - - std::string val = input.substr(startPos, len - startPos); - // std::cout << "val = " << val << "\n"; - len = val.length(); - startPos = 0; - - // If the length of the string is less than 2, then radix - // is decimal and there is no exponent. - if (len < 2) { - if (radix == 0) radix = 10; - return val; - } - - bool isNegative = false; - std::string ans; - - // First check to see if we start with a sign indicator - if (val[0] == '-') { - ans = "-"; - ++startPos; - isNegative = true; - } else if (val[0] == '+') - ++startPos; - - if (len - startPos < 2) { - if (radix == 0) radix = 10; - return val; - } - - if (val.substr(startPos, 2) == "0x" || val.substr(startPos, 2) == "0X") { - // If we start with "0x", then the radix is hex. - radix = 16; - startPos += 2; - } else if (val.substr(startPos, 2) == "0b" || - val.substr(startPos, 2) == "0B") { - // If we start with "0b", then the radix is binary. - radix = 2; - startPos += 2; - } else if (val.substr(startPos, 2) == "0o" || - val.substr(startPos, 2) == "0O") { - // If we start with "0o", then the radix is octal. - radix = 8; - startPos += 2; - } else if (radix == 0) { - radix = 10; - } - - int exp = 0; - if (radix == 10) { - // If radix is decimal, then see if there is an - // exponent indicator. - size_t expPos = val.find('e'); - bool has_exponent = true; - if (expPos == std::string::npos) expPos = val.find('E'); - if (expPos == std::string::npos) { - // No exponent indicator, so the mantissa goes to the end. - expPos = len; - has_exponent = false; - } - // std::cout << "startPos = " << startPos << " " << expPos << "\n"; - - ans += val.substr(startPos, expPos - startPos); - if (has_exponent) { - // Parse the exponent. - std::istringstream iss(val.substr(expPos + 1, len - expPos - 1)); - iss >> exp; - } - } else { - // Check for a binary exponent indicator. - size_t expPos = val.find('p'); - bool has_exponent = true; - if (expPos == std::string::npos) expPos = val.find('P'); - if (expPos == std::string::npos) { - // No exponent indicator, so the mantissa goes to the end. - expPos = len; - has_exponent = false; - } - - // std::cout << "startPos = " << startPos << " " << expPos << "\n"; - - assert(startPos <= expPos); - // Convert to binary as we go. - for (size_t i = startPos; i < expPos; ++i) { - if (radix == 16) { - ans += hex2Bin(val[i]); - } else if (radix == 8) { - ans += oct2Bin(val[i]); - } else { // radix == 2 - ans += val[i]; - } - } - // End in binary - radix = 2; - if (has_exponent) { - // Parse the exponent. - std::istringstream iss(val.substr(expPos + 1, len - expPos - 1)); - iss >> exp; - } - } - if (exp == 0) return ans; - - size_t decPos = ans.find('.'); - if (decPos == std::string::npos) decPos = ans.length(); - if ((int)decPos + exp >= (int)ans.length()) { - int i = decPos; - for (; i < (int)ans.length() - 1; ++i) ans[i] = ans[i + 1]; - for (; i < (int)ans.length(); ++i) ans[i] = '0'; - for (; i < (int)decPos + exp; ++i) ans += '0'; - return ans; - } else if ((int)decPos + exp < (int)isNegative) { - std::string dupAns = "0."; - if (ans[0] == '-') dupAns = "-0."; - for (int i = 0; i < isNegative - (int)decPos - exp; ++i) dupAns += '0'; - for (size_t i = isNegative; i < ans.length(); ++i) - if (ans[i] != '.') dupAns += ans[i]; - return dupAns; - } - - if (exp > 0) - for (size_t i = decPos; i < decPos + exp; ++i) ans[i] = ans[i + 1]; - else { - if (decPos == ans.length()) ans += ' '; - for (int i = decPos; i > (int)decPos + exp; --i) ans[i] = ans[i - 1]; - } - ans[decPos + exp] = '.'; - return ans; -} - -/// sub_1 - This function subtracts a single "digit" (64-bit word), y, from -/// the multi-digit integer array, x[], propagating the borrowed 1 value until -/// no further borrowing is neeeded or it runs out of "digits" in x. The result -/// is 1 if "borrowing" exhausted the digits in x, or 0 if x was not exhausted. -/// In other words, if y > x then this function returns 1, otherwise 0. -/// @returns the borrow out of the subtraction -static INLINE bool sub_1(uint64_t x[], uint32_t len, uint64_t y) { - for (uint32_t i = 0; i < len; ++i) { - uint64_t __X = x[i]; - x[i] -= y; - if (y > __X) - y = 1; // We have to "borrow 1" from next "digit" - else { - y = 0; // No need to borrow - break; // Remaining digits are unchanged so exit early - } - } - return (y != 0); -} - -/// add_1 - This function adds a single "digit" integer, y, to the multiple -/// "digit" integer array, x[]. x[] is modified to reflect the addition and -/// 1 is returned if there is a carry out, otherwise 0 is returned. -/// @returns the carry of the addition. -static INLINE bool add_1(uint64_t dest[], uint64_t x[], uint32_t len, - uint64_t y) { - for (uint32_t i = 0; i < len; ++i) { - dest[i] = y + x[i]; - if (dest[i] < y) - y = 1; // Carry one to next digit. - else { - y = 0; // No need to carry so exit early - break; - } - } - return (y != 0); -} - -/// add - This function adds the integer array x to the integer array Y and -/// places the result in dest. -/// @returns the carry out from the addition -/// @brief General addition of 64-bit integer arrays -static INLINE bool add(uint64_t* dest, const uint64_t* x, const uint64_t* y, - uint32_t destlen, uint32_t xlen, uint32_t ylen, - bool xsigned, bool ysigned) { - bool carry = false; - uint32_t len = AESL_std::min(xlen, ylen); - uint32_t i; - for (i = 0; i < len && i < destlen; ++i) { - uint64_t limit = - AESL_std::min(x[i], y[i]); // must come first in case dest == x - dest[i] = x[i] + y[i] + carry; - carry = dest[i] < limit || (carry && dest[i] == limit); - } - if (xlen > ylen) { - const uint64_t yext = ysigned && int64_t(y[ylen - 1]) < 0 ? -1 : 0; - for (i = ylen; i < xlen && i < destlen; i++) { - uint64_t limit = AESL_std::min(x[i], yext); - dest[i] = x[i] + yext + carry; - carry = (dest[i] < limit) || (carry && dest[i] == limit); - } - } else if (ylen > xlen) { - const uint64_t xext = xsigned && int64_t(x[xlen - 1]) < 0 ? -1 : 0; - for (i = xlen; i < ylen && i < destlen; i++) { - uint64_t limit = AESL_std::min(xext, y[i]); - dest[i] = xext + y[i] + carry; - carry = (dest[i] < limit) || (carry && dest[i] == limit); - } - } - return carry; -} - -/// @returns returns the borrow out. -/// @brief Generalized subtraction of 64-bit integer arrays. -static INLINE bool sub(uint64_t* dest, const uint64_t* x, const uint64_t* y, - uint32_t destlen, uint32_t xlen, uint32_t ylen, - bool xsigned, bool ysigned) { - bool borrow = false; - uint32_t i; - uint32_t len = AESL_std::min(xlen, ylen); - for (i = 0; i < len && i < destlen; ++i) { - uint64_t x_tmp = borrow ? x[i] - 1 : x[i]; - borrow = y[i] > x_tmp || (borrow && x[i] == 0); - dest[i] = x_tmp - y[i]; - } - if (xlen > ylen) { - const uint64_t yext = ysigned && int64_t(y[ylen - 1]) < 0 ? -1 : 0; - for (i = ylen; i < xlen && i < destlen; i++) { - uint64_t x_tmp = borrow ? x[i] - 1 : x[i]; - borrow = yext > x_tmp || (borrow && x[i] == 0); - dest[i] = x_tmp - yext; - } - } else if (ylen > xlen) { - const uint64_t xext = xsigned && int64_t(x[xlen - 1]) < 0 ? -1 : 0; - for (i = xlen; i < ylen && i < destlen; i++) { - uint64_t x_tmp = borrow ? xext - 1 : xext; - borrow = y[i] > x_tmp || (borrow && xext == 0); - dest[i] = x_tmp - y[i]; - } - } - return borrow; -} - -/// Subtracts the RHS ap_private from this ap_private -/// @returns this, after subtraction -/// @brief Subtraction assignment operator. - -/// Multiplies an integer array, x by a a uint64_t integer and places the result -/// into dest. -/// @returns the carry out of the multiplication. -/// @brief Multiply a multi-digit ap_private by a single digit (64-bit) integer. -static INLINE uint64_t mul_1(uint64_t dest[], const uint64_t x[], uint32_t len, - uint64_t y) { - // Split y into high 32-bit part (hy) and low 32-bit part (ly) - uint64_t ly = y & 0xffffffffULL, hy = (y) >> 32; - uint64_t carry = 0; - static const uint64_t two_power_32 = 1ULL << 32; - // For each digit of x. - for (uint32_t i = 0; i < len; ++i) { - // Split x into high and low words - uint64_t lx = x[i] & 0xffffffffULL; - uint64_t hx = (x[i]) >> 32; - // hasCarry - A flag to indicate if there is a carry to the next digit. - // hasCarry == 0, no carry - // hasCarry == 1, has carry - // hasCarry == 2, no carry and the calculation result == 0. - uint8_t hasCarry = 0; - dest[i] = carry + lx * ly; - // Determine if the add above introduces carry. - hasCarry = (dest[i] < carry) ? 1 : 0; - carry = hx * ly + ((dest[i]) >> 32) + (hasCarry ? two_power_32 : 0); - // The upper limit of carry can be (2^32 - 1)(2^32 - 1) + - // (2^32 - 1) + 2^32 = 2^64. - hasCarry = (!carry && hasCarry) ? 1 : (!carry ? 2 : 0); - - carry += (lx * hy) & 0xffffffffULL; - dest[i] = ((carry) << 32) | (dest[i] & 0xffffffffULL); - carry = (((!carry && hasCarry != 2) || hasCarry == 1) ? two_power_32 : 0) + - ((carry) >> 32) + ((lx * hy) >> 32) + hx * hy; - } - return carry; -} - -/// Multiplies integer array x by integer array y and stores the result into -/// the integer array dest. Note that dest's size must be >= xlen + ylen in -/// order to -/// do a full precision computation. If it is not, then only the low-order words -/// are returned. -/// @brief Generalized multiplicate of integer arrays. -static INLINE void mul(uint64_t dest[], const uint64_t x[], uint32_t xlen, - const uint64_t y[], uint32_t ylen, uint32_t destlen) { - assert(xlen > 0); - assert(ylen > 0); - assert(destlen >= xlen + ylen); - if (xlen < destlen) dest[xlen] = mul_1(dest, x, xlen, y[0]); - for (uint32_t i = 1; i < ylen; ++i) { - uint64_t ly = y[i] & 0xffffffffULL, hy = (y[i]) >> 32; - uint64_t carry = 0, lx = 0, hx = 0; - for (uint32_t j = 0; j < xlen; ++j) { - lx = x[j] & 0xffffffffULL; - hx = (x[j]) >> 32; - // hasCarry - A flag to indicate if has carry. - // hasCarry == 0, no carry - // hasCarry == 1, has carry - // hasCarry == 2, no carry and the calculation result == 0. - uint8_t hasCarry = 0; - uint64_t resul = carry + lx * ly; - hasCarry = (resul < carry) ? 1 : 0; - carry = (hasCarry ? (1ULL << 32) : 0) + hx * ly + ((resul) >> 32); - hasCarry = (!carry && hasCarry) ? 1 : (!carry ? 2 : 0); - carry += (lx * hy) & 0xffffffffULL; - resul = ((carry) << 32) | (resul & 0xffffffffULL); - if (i + j < destlen) dest[i + j] += resul; - carry = - (((!carry && hasCarry != 2) || hasCarry == 1) ? (1ULL << 32) : 0) + - ((carry) >> 32) + (dest[i + j] < resul ? 1 : 0) + ((lx * hy) >> 32) + - hx * hy; - } - if (i + xlen < destlen) dest[i + xlen] = carry; - } -} - -/// Implementation of Knuth's Algorithm D (Division of nonnegative integers) -/// from "Art of Computer Programming, Volume 2", section 4.3.1, p. 272. The -/// variables here have the same names as in the algorithm. Comments explain -/// the algorithm and any deviation from it. -static INLINE void KnuthDiv(uint32_t* u, uint32_t* v, uint32_t* q, uint32_t* r, - uint32_t m, uint32_t n) { - assert(u && "Must provide dividend"); - assert(v && "Must provide divisor"); - assert(q && "Must provide quotient"); - assert(u != v && u != q && v != q && "Must us different memory"); - assert(n > 1 && "n must be > 1"); - - // Knuth uses the value b as the base of the number system. In our case b - // is 2^31 so we just set it to -1u. - uint64_t b = uint64_t(1) << 32; - - // DEBUG(cerr << "KnuthDiv: m=" << m << " n=" << n << '\n'); - // DEBUG(cerr << "KnuthDiv: original:"); - // DEBUG(for (int i = m+n; i >=0; i--) cerr << " " << std::setbase(16) << - // u[i]); - // DEBUG(cerr << " by"); - // DEBUG(for (int i = n; i >0; i--) cerr << " " << std::setbase(16) << - // v[i-1]); - // DEBUG(cerr << '\n'); - // D1. [Normalize.] Set d = b / (v[n-1] + 1) and multiply all the digits of - // u and v by d. Note that we have taken Knuth's advice here to use a power - // of 2 value for d such that d * v[n-1] >= b/2 (b is the base). A power of - // 2 allows us to shift instead of multiply and it is easy to determine the - // shift amount from the leading zeros. We are basically normalizing the u - // and v so that its high bits are shifted to the top of v's range without - // overflow. Note that this can require an extra word in u so that u must - // be of length m+n+1. - uint32_t shift = CountLeadingZeros_32(v[n - 1]); - uint32_t v_carry = 0; - uint32_t u_carry = 0; - if (shift) { - for (uint32_t i = 0; i < m + n; ++i) { - uint32_t u_tmp = (u[i]) >> (32 - shift); - u[i] = ((u[i]) << (shift)) | u_carry; - u_carry = u_tmp; - } - for (uint32_t i = 0; i < n; ++i) { - uint32_t v_tmp = (v[i]) >> (32 - shift); - v[i] = ((v[i]) << (shift)) | v_carry; - v_carry = v_tmp; - } - } - u[m + n] = u_carry; - // DEBUG(cerr << "KnuthDiv: normal:"); - // DEBUG(for (int i = m+n; i >=0; i--) cerr << " " << std::setbase(16) << - // u[i]); - // DEBUG(cerr << " by"); - // DEBUG(for (int i = n; i >0; i--) cerr << " " << std::setbase(16) << - // v[i-1]); - // DEBUG(cerr << '\n'); - - // D2. [Initialize j.] Set j to m. This is the loop counter over the places. - int j = m; - do { - // DEBUG(cerr << "KnuthDiv: quotient digit #" << j << '\n'); - // D3. [Calculate q'.]. - // Set qp = (u[j+n]*b + u[j+n-1]) / v[n-1]. (qp=qprime=q') - // Set rp = (u[j+n]*b + u[j+n-1]) % v[n-1]. (rp=rprime=r') - // Now test if qp == b or qp*v[n-2] > b*rp + u[j+n-2]; if so, decrease - // qp by 1, inrease rp by v[n-1], and repeat this test if rp < b. The test - // on v[n-2] determines at high speed most of the cases in which the trial - // value qp is one too large, and it eliminates all cases where qp is two - // too large. - uint64_t dividend = ((uint64_t(u[j + n]) << 32) + u[j + n - 1]); - // DEBUG(cerr << "KnuthDiv: dividend == " << dividend << '\n'); - uint64_t qp = dividend / v[n - 1]; - uint64_t rp = dividend % v[n - 1]; - if (qp == b || qp * v[n - 2] > b * rp + u[j + n - 2]) { - qp--; - rp += v[n - 1]; - if (rp < b && (qp == b || qp * v[n - 2] > b * rp + u[j + n - 2])) qp--; - } - // DEBUG(cerr << "KnuthDiv: qp == " << qp << ", rp == " << rp << '\n'); - - // D4. [Multiply and subtract.] Replace (u[j+n]u[j+n-1]...u[j]) with - // (u[j+n]u[j+n-1]..u[j]) - qp * (v[n-1]...v[1]v[0]). This computation - // consists of a simple multiplication by a one-place number, combined with - // a subtraction. - bool isNeg = false; - for (uint32_t i = 0; i < n; ++i) { - uint64_t u_tmp = uint64_t(u[j + i]) | ((uint64_t(u[j + i + 1])) << 32); - uint64_t subtrahend = uint64_t(qp) * uint64_t(v[i]); - bool borrow = subtrahend > u_tmp; - /*DEBUG(cerr << "KnuthDiv: u_tmp == " << u_tmp - << ", subtrahend == " << subtrahend - << ", borrow = " << borrow << '\n');*/ - - uint64_t result = u_tmp - subtrahend; - uint32_t k = j + i; - u[k++] = (uint32_t)(result & (b - 1)); // subtract low word - u[k++] = (uint32_t)((result) >> 32); // subtract high word - while (borrow && k <= m + n) { // deal with borrow to the left - borrow = u[k] == 0; - u[k]--; - k++; - } - isNeg |= borrow; - /*DEBUG(cerr << "KnuthDiv: u[j+i] == " << u[j+i] << ", u[j+i+1] == " << - u[j+i+1] << '\n');*/ - } - /*DEBUG(cerr << "KnuthDiv: after subtraction:"); - DEBUG(for (int i = m+n; i >=0; i--) cerr << " " << u[i]); - DEBUG(cerr << '\n');*/ - // The digits (u[j+n]...u[j]) should be kept positive; if the result of - // this step is actually negative, (u[j+n]...u[j]) should be left as the - // true value plus b**(n+1), namely as the b's complement of - // the true value, and a "borrow" to the left should be remembered. - // - if (isNeg) { - bool carry = true; // true because b's complement is "complement + 1" - for (uint32_t i = 0; i <= m + n; ++i) { - u[i] = ~u[i] + carry; // b's complement - carry = carry && u[i] == 0; - } - } - /*DEBUG(cerr << "KnuthDiv: after complement:"); - DEBUG(for (int i = m+n; i >=0; i--) cerr << " " << u[i]); - DEBUG(cerr << '\n');*/ - - // D5. [Test remainder.] Set q[j] = qp. If the result of step D4 was - // negative, go to step D6; otherwise go on to step D7. - q[j] = (uint32_t)qp; - if (isNeg) { - // D6. [Add back]. The probability that this step is necessary is very - // small, on the order of only 2/b. Make sure that test data accounts for - // this possibility. Decrease q[j] by 1 - q[j]--; - // and add (0v[n-1]...v[1]v[0]) to (u[j+n]u[j+n-1]...u[j+1]u[j]). - // A carry will occur to the left of u[j+n], and it should be ignored - // since it cancels with the borrow that occurred in D4. - bool carry = false; - for (uint32_t i = 0; i < n; i++) { - uint32_t limit = AESL_std::min(u[j + i], v[i]); - u[j + i] += v[i] + carry; - carry = u[j + i] < limit || (carry && u[j + i] == limit); - } - u[j + n] += carry; - } - /*DEBUG(cerr << "KnuthDiv: after correction:"); - DEBUG(for (int i = m+n; i >=0; i--) cerr <<" " << u[i]); - DEBUG(cerr << "\nKnuthDiv: digit result = " << q[j] << '\n');*/ - - // D7. [Loop on j.] Decrease j by one. Now if j >= 0, go back to D3. - } while (--j >= 0); - - /*DEBUG(cerr << "KnuthDiv: quotient:"); - DEBUG(for (int i = m; i >=0; i--) cerr <<" " << q[i]); - DEBUG(cerr << '\n');*/ - - // D8. [Unnormalize]. Now q[...] is the desired quotient, and the desired - // remainder may be obtained by dividing u[...] by d. If r is non-null we - // compute the remainder (urem uses this). - if (r) { - // The value d is expressed by the "shift" value above since we avoided - // multiplication by d by using a shift left. So, all we have to do is - // shift right here. In order to mak - if (shift) { - uint32_t carry = 0; - // DEBUG(cerr << "KnuthDiv: remainder:"); - for (int i = n - 1; i >= 0; i--) { - r[i] = ((u[i]) >> (shift)) | carry; - carry = (u[i]) << (32 - shift); - // DEBUG(cerr << " " << r[i]); - } - } else { - for (int i = n - 1; i >= 0; i--) { - r[i] = u[i]; - // DEBUG(cerr << " " << r[i]); - } - } - // DEBUG(cerr << '\n'); - } - // DEBUG(cerr << std::setbase(10) << '\n'); -} - -template -void divide(const ap_private<_AP_W, _AP_S>& LHS, uint32_t lhsWords, - const ap_private<_AP_W, _AP_S>& RHS, uint32_t rhsWords, - ap_private<_AP_W, _AP_S>* Quotient, - ap_private<_AP_W, _AP_S>* Remainder) { - assert(lhsWords >= rhsWords && "Fractional result"); - enum { APINT_BITS_PER_WORD = 64 }; - // First, compose the values into an array of 32-bit words instead of - // 64-bit words. This is a necessity of both the "short division" algorithm - // and the the Knuth "classical algorithm" which requires there to be native - // operations for +, -, and * on an m bit value with an m*2 bit result. We - // can't use 64-bit operands here because we don't have native results of - // 128-bits. Furthremore, casting the 64-bit values to 32-bit values won't - // work on large-endian machines. - uint64_t mask = ~0ull >> (sizeof(uint32_t) * 8); - uint32_t n = rhsWords * 2; - uint32_t m = (lhsWords * 2) - n; - - // Allocate space for the temporary values we need either on the stack, if - // it will fit, or on the heap if it won't. - uint32_t SPACE[128]; - uint32_t* __U = 0; - uint32_t* __V = 0; - uint32_t* __Q = 0; - uint32_t* __R = 0; - if ((Remainder ? 4 : 3) * n + 2 * m + 1 <= 128) { - __U = &SPACE[0]; - __V = &SPACE[m + n + 1]; - __Q = &SPACE[(m + n + 1) + n]; - if (Remainder) __R = &SPACE[(m + n + 1) + n + (m + n)]; - } else { - __U = new uint32_t[m + n + 1]; - __V = new uint32_t[n]; - __Q = new uint32_t[m + n]; - if (Remainder) __R = new uint32_t[n]; - } - - // Initialize the dividend - memset(__U, 0, (m + n + 1) * sizeof(uint32_t)); - for (unsigned i = 0; i < lhsWords; ++i) { - uint64_t tmp = LHS.get_pVal(i); - __U[i * 2] = (uint32_t)(tmp & mask); - __U[i * 2 + 1] = (tmp) >> (sizeof(uint32_t) * 8); - } - __U[m + n] = 0; // this extra word is for "spill" in the Knuth algorithm. - - // Initialize the divisor - memset(__V, 0, (n) * sizeof(uint32_t)); - for (unsigned i = 0; i < rhsWords; ++i) { - uint64_t tmp = RHS.get_pVal(i); - __V[i * 2] = (uint32_t)(tmp & mask); - __V[i * 2 + 1] = (tmp) >> (sizeof(uint32_t) * 8); - } - - // initialize the quotient and remainder - memset(__Q, 0, (m + n) * sizeof(uint32_t)); - if (Remainder) memset(__R, 0, n * sizeof(uint32_t)); - - // Now, adjust m and n for the Knuth division. n is the number of words in - // the divisor. m is the number of words by which the dividend exceeds the - // divisor (i.e. m+n is the length of the dividend). These sizes must not - // contain any zero words or the Knuth algorithm fails. - for (unsigned i = n; i > 0 && __V[i - 1] == 0; i--) { - n--; - m++; - } - for (unsigned i = m + n; i > 0 && __U[i - 1] == 0; i--) m--; - - // If we're left with only a single word for the divisor, Knuth doesn't work - // so we implement the short division algorithm here. This is much simpler - // and faster because we are certain that we can divide a 64-bit quantity - // by a 32-bit quantity at hardware speed and short division is simply a - // series of such operations. This is just like doing short division but we - // are using base 2^32 instead of base 10. - assert(n != 0 && "Divide by zero?"); - if (n == 1) { - uint32_t divisor = __V[0]; - uint32_t remainder = 0; - for (int i = m + n - 1; i >= 0; i--) { - uint64_t partial_dividend = (uint64_t(remainder)) << 32 | __U[i]; - if (partial_dividend == 0) { - __Q[i] = 0; - remainder = 0; - } else if (partial_dividend < divisor) { - __Q[i] = 0; - remainder = (uint32_t)partial_dividend; - } else if (partial_dividend == divisor) { - __Q[i] = 1; - remainder = 0; - } else { - __Q[i] = (uint32_t)(partial_dividend / divisor); - remainder = (uint32_t)(partial_dividend - (__Q[i] * divisor)); - } - } - if (__R) __R[0] = remainder; - } else { - // Now we're ready to invoke the Knuth classical divide algorithm. In this - // case n > 1. - KnuthDiv(__U, __V, __Q, __R, m, n); - } - - // If the caller wants the quotient - if (Quotient) { - // Set up the Quotient value's memory. - if (Quotient->BitWidth != LHS.BitWidth) { - if (Quotient->isSingleWord()) Quotient->set_VAL(0); - } else - Quotient->clear(); - - // The quotient is in Q. Reconstitute the quotient into Quotient's low - // order words. - if (lhsWords == 1) { - uint64_t tmp = - uint64_t(__Q[0]) | ((uint64_t(__Q[1])) << (APINT_BITS_PER_WORD / 2)); - Quotient->set_VAL(tmp); - } else { - assert(!Quotient->isSingleWord() && - "Quotient ap_private not large enough"); - for (unsigned i = 0; i < lhsWords; ++i) - Quotient->set_pVal( - i, uint64_t(__Q[i * 2]) | - ((uint64_t(__Q[i * 2 + 1])) << (APINT_BITS_PER_WORD / 2))); - } - Quotient->clearUnusedBits(); - } - - // If the caller wants the remainder - if (Remainder) { - // Set up the Remainder value's memory. - if (Remainder->BitWidth != RHS.BitWidth) { - if (Remainder->isSingleWord()) Remainder->set_VAL(0); - } else - Remainder->clear(); - - // The remainder is in R. Reconstitute the remainder into Remainder's low - // order words. - if (rhsWords == 1) { - uint64_t tmp = - uint64_t(__R[0]) | ((uint64_t(__R[1])) << (APINT_BITS_PER_WORD / 2)); - Remainder->set_VAL(tmp); - } else { - assert(!Remainder->isSingleWord() && - "Remainder ap_private not large enough"); - for (unsigned i = 0; i < rhsWords; ++i) - Remainder->set_pVal( - i, uint64_t(__R[i * 2]) | - ((uint64_t(__R[i * 2 + 1])) << (APINT_BITS_PER_WORD / 2))); - } - Remainder->clearUnusedBits(); - } - - // Clean up the memory we allocated. - if (__U != &SPACE[0]) { - delete[] __U; - delete[] __V; - delete[] __Q; - delete[] __R; - } -} - -template -void divide(const ap_private<_AP_W, _AP_S>& LHS, uint32_t lhsWords, - uint64_t RHS, ap_private<_AP_W, _AP_S>* Quotient, - ap_private<_AP_W, _AP_S>* Remainder) { - uint32_t rhsWords = 1; - assert(lhsWords >= rhsWords && "Fractional result"); - enum { APINT_BITS_PER_WORD = 64 }; - // First, compose the values into an array of 32-bit words instead of - // 64-bit words. This is a necessity of both the "short division" algorithm - // and the the Knuth "classical algorithm" which requires there to be native - // operations for +, -, and * on an m bit value with an m*2 bit result. We - // can't use 64-bit operands here because we don't have native results of - // 128-bits. Furthremore, casting the 64-bit values to 32-bit values won't - // work on large-endian machines. - uint64_t mask = ~0ull >> (sizeof(uint32_t) * 8); - uint32_t n = 2; - uint32_t m = (lhsWords * 2) - n; - - // Allocate space for the temporary values we need either on the stack, if - // it will fit, or on the heap if it won't. - uint32_t SPACE[128]; - uint32_t* __U = 0; - uint32_t* __V = 0; - uint32_t* __Q = 0; - uint32_t* __R = 0; - if ((Remainder ? 4 : 3) * n + 2 * m + 1 <= 128) { - __U = &SPACE[0]; - __V = &SPACE[m + n + 1]; - __Q = &SPACE[(m + n + 1) + n]; - if (Remainder) __R = &SPACE[(m + n + 1) + n + (m + n)]; - } else { - __U = new uint32_t[m + n + 1]; - __V = new uint32_t[n]; - __Q = new uint32_t[m + n]; - if (Remainder) __R = new uint32_t[n]; - } - - // Initialize the dividend - memset(__U, 0, (m + n + 1) * sizeof(uint32_t)); - for (unsigned i = 0; i < lhsWords; ++i) { - uint64_t tmp = LHS.get_pVal(i); - __U[i * 2] = tmp & mask; - __U[i * 2 + 1] = (tmp) >> (sizeof(uint32_t) * 8); - } - __U[m + n] = 0; // this extra word is for "spill" in the Knuth algorithm. - - // Initialize the divisor - memset(__V, 0, (n) * sizeof(uint32_t)); - __V[0] = RHS & mask; - __V[1] = (RHS) >> (sizeof(uint32_t) * 8); - - // initialize the quotient and remainder - memset(__Q, 0, (m + n) * sizeof(uint32_t)); - if (Remainder) memset(__R, 0, n * sizeof(uint32_t)); - - // Now, adjust m and n for the Knuth division. n is the number of words in - // the divisor. m is the number of words by which the dividend exceeds the - // divisor (i.e. m+n is the length of the dividend). These sizes must not - // contain any zero words or the Knuth algorithm fails. - for (unsigned i = n; i > 0 && __V[i - 1] == 0; i--) { - n--; - m++; - } - for (unsigned i = m + n; i > 0 && __U[i - 1] == 0; i--) m--; - - // If we're left with only a single word for the divisor, Knuth doesn't work - // so we implement the short division algorithm here. This is much simpler - // and faster because we are certain that we can divide a 64-bit quantity - // by a 32-bit quantity at hardware speed and short division is simply a - // series of such operations. This is just like doing short division but we - // are using base 2^32 instead of base 10. - assert(n != 0 && "Divide by zero?"); - if (n == 1) { - uint32_t divisor = __V[0]; - uint32_t remainder = 0; - for (int i = m + n - 1; i >= 0; i--) { - uint64_t partial_dividend = (uint64_t(remainder)) << 32 | __U[i]; - if (partial_dividend == 0) { - __Q[i] = 0; - remainder = 0; - } else if (partial_dividend < divisor) { - __Q[i] = 0; - remainder = partial_dividend; - } else if (partial_dividend == divisor) { - __Q[i] = 1; - remainder = 0; - } else { - __Q[i] = partial_dividend / divisor; - remainder = partial_dividend - (__Q[i] * divisor); - } - } - if (__R) __R[0] = remainder; - } else { - // Now we're ready to invoke the Knuth classical divide algorithm. In this - // case n > 1. - KnuthDiv(__U, __V, __Q, __R, m, n); - } - - // If the caller wants the quotient - if (Quotient) { - // Set up the Quotient value's memory. - if (Quotient->BitWidth != LHS.BitWidth) { - if (Quotient->isSingleWord()) Quotient->set_VAL(0); - } else - Quotient->clear(); - - // The quotient is in Q. Reconstitute the quotient into Quotient's low - // order words. - if (lhsWords == 1) { - uint64_t tmp = - uint64_t(__Q[0]) | ((uint64_t(__Q[1])) << (APINT_BITS_PER_WORD / 2)); - Quotient->set_VAL(tmp); - } else { - assert(!Quotient->isSingleWord() && - "Quotient ap_private not large enough"); - for (unsigned i = 0; i < lhsWords; ++i) - Quotient->set_pVal( - i, uint64_t(__Q[i * 2]) | - ((uint64_t(__Q[i * 2 + 1])) << (APINT_BITS_PER_WORD / 2))); - } - Quotient->clearUnusedBits(); - } - - // If the caller wants the remainder - if (Remainder) { - // Set up the Remainder value's memory. - if (Remainder->BitWidth != 64 /* RHS.BitWidth */) { - if (Remainder->isSingleWord()) Remainder->set_VAL(0); - } else - Remainder->clear(); - - // The remainder is in __R. Reconstitute the remainder into Remainder's low - // order words. - if (rhsWords == 1) { - uint64_t tmp = - uint64_t(__R[0]) | ((uint64_t(__R[1])) << (APINT_BITS_PER_WORD / 2)); - Remainder->set_VAL(tmp); - } else { - assert(!Remainder->isSingleWord() && - "Remainder ap_private not large enough"); - for (unsigned i = 0; i < rhsWords; ++i) - Remainder->set_pVal( - i, uint64_t(__R[i * 2]) | - ((uint64_t(__R[i * 2 + 1])) << (APINT_BITS_PER_WORD / 2))); - } - Remainder->clearUnusedBits(); - } - - // Clean up the memory we allocated. - if (__U != &SPACE[0]) { - delete[] __U; - delete[] __V; - delete[] __Q; - delete[] __R; - } -} - -/// @brief Logical right-shift function. -template -INLINE ap_private<_AP_W, _AP_S, _AP_C> lshr( - const ap_private<_AP_W, _AP_S, _AP_C>& LHS, uint32_t shiftAmt) { - return LHS.lshr(shiftAmt); -} - -/// Left-shift the ap_private by shiftAmt. -/// @brief Left-shift function. -template -INLINE ap_private<_AP_W, _AP_S, _AP_C> shl( - const ap_private<_AP_W, _AP_S, _AP_C>& LHS, uint32_t shiftAmt) { - return LHS.shl(shiftAmt); -} - -} // namespace ap_private_ops - -#endif // LLVM_SUPPORT_MATHEXTRAS_H - -/// This enumeration just provides for internal constants used in this -/// translation unit. -enum { - MIN_INT_BITS = 1, ///< Minimum number of bits that can be specified - ///< Note that this must remain synchronized with IntegerType::MIN_INT_BITS - MAX_INT_BITS = (1 << 23) - 1 ///< Maximum number of bits that can be specified - ///< Note that this must remain synchronized with IntegerType::MAX_INT_BITS -}; - -//===----------------------------------------------------------------------===// -// ap_private Class -//===----------------------------------------------------------------------===// - -/// ap_private - This class represents arbitrary precision constant integral -/// values. -/// It is a functional replacement for common case unsigned integer type like -/// "unsigned", "unsigned long" or "uint64_t", but also allows non-byte-width -/// integer sizes and large integer value types such as 3-bits, 15-bits, or more -/// than 64-bits of precision. ap_private provides a variety of arithmetic -/// operators -/// and methods to manipulate integer values of any bit-width. It supports both -/// the typical integer arithmetic and comparison operations as well as bitwise -/// manipulation. -/// -/// The class has several invariants worth noting: -/// * All bit, byte, and word positions are zero-based. -/// * Once the bit width is set, it doesn't change except by the Truncate, -/// SignExtend, or ZeroExtend operations. -/// * All binary operators must be on ap_private instances of the same bit -/// width. -/// Attempting to use these operators on instances with different bit -/// widths will yield an assertion. -/// * The value is stored canonically as an unsigned value. For operations -/// where it makes a difference, there are both signed and unsigned variants -/// of the operation. For example, sdiv and udiv. However, because the bit -/// widths must be the same, operations such as Mul and Add produce the same -/// results regardless of whether the values are interpreted as signed or -/// not. -/// * In general, the class tries to follow the style of computation that LLVM -/// uses in its IR. This simplifies its use for LLVM. -/// -/// @brief Class for arbitrary precision integers. - -#if defined(_MSC_VER) -#if _MSC_VER < 1400 && !defined(for) -#define for if (0); else for -#endif -typedef unsigned __int64 ap_ulong; -typedef signed __int64 ap_slong; -#else -typedef unsigned long long ap_ulong; -typedef signed long long ap_slong; -#endif -template -struct valtype; - -template -struct valtype<_AP_N8, false> { - typedef uint64_t Type; -}; - -template -struct valtype<_AP_N8, true> { - typedef int64_t Type; -}; - -template <> -struct valtype<1, false> { - typedef unsigned char Type; -}; -template <> -struct valtype<2, false> { - typedef unsigned short Type; -}; -template <> -struct valtype<3, false> { - typedef unsigned int Type; -}; -template <> -struct valtype<4, false> { - typedef unsigned int Type; -}; -template <> -struct valtype<1, true> { - typedef signed char Type; -}; -template <> -struct valtype<2, true> { - typedef short Type; -}; -template <> -struct valtype<3, true> { - typedef int Type; -}; -template <> -struct valtype<4, true> { - typedef int Type; -}; - -template -struct ap_private_enable_if {}; -template <> -struct ap_private_enable_if { - static const bool isValid = true; -}; - -// When bitwidth < 64 -template -class ap_private<_AP_W, _AP_S, true> { - // SFINAE pattern. Only consider this class when _AP_W <= 64 - const static bool valid = ap_private_enable_if<_AP_W <= 64>::isValid; - -#ifdef _MSC_VER -#pragma warning(disable : 4521 4522) -#endif - public: - typedef typename valtype<(_AP_W + 7) / 8, _AP_S>::Type ValType; - typedef ap_private<_AP_W, _AP_S> Type; - template - struct RType { - enum { - mult_w = _AP_W + _AP_W2, - mult_s = _AP_S || _AP_S2, - plus_w = - AP_MAX(_AP_W + (_AP_S2 && !_AP_S), _AP_W2 + (_AP_S && !_AP_S2)) + 1, - plus_s = _AP_S || _AP_S2, - minus_w = - AP_MAX(_AP_W + (_AP_S2 && !_AP_S), _AP_W2 + (_AP_S && !_AP_S2)) + 1, - minus_s = true, - div_w = _AP_W + _AP_S2, - div_s = _AP_S || _AP_S2, - mod_w = AP_MIN(_AP_W, _AP_W2 + (!_AP_S2 && _AP_S)), - mod_s = _AP_S, - logic_w = AP_MAX(_AP_W + (_AP_S2 && !_AP_S), _AP_W2 + (_AP_S && !_AP_S2)), - logic_s = _AP_S || _AP_S2 - }; - typedef ap_private mult; - typedef ap_private plus; - typedef ap_private minus; - typedef ap_private logic; - typedef ap_private div; - typedef ap_private mod; - typedef ap_private<_AP_W, _AP_S> arg1; - typedef bool reduce; - }; - enum { APINT_BITS_PER_WORD = sizeof(uint64_t) * 8 }; - enum { - excess_bits = (_AP_W % APINT_BITS_PER_WORD) - ? APINT_BITS_PER_WORD - (_AP_W % APINT_BITS_PER_WORD) - : 0 - }; - static const uint64_t mask = ((uint64_t)~0ULL >> (excess_bits)); - static const uint64_t not_mask = ~mask; - static const uint64_t sign_bit_mask = 1ULL << (APINT_BITS_PER_WORD - 1); - template - struct sign_ext_mask { - static const uint64_t mask = ~0ULL << _AP_W1; - }; - static const int width = _AP_W; - - enum { - BitWidth = _AP_W, - _AP_N = 1, - }; - ValType VAL; ///< Used to store the <= 64 bits integer value. -#ifdef AP_CANARY - ValType CANARY; - void check_canary() { assert(CANARY == (ValType)0xDEADBEEFDEADBEEF); } - void set_canary() { CANARY = (ValType)0xDEADBEEFDEADBEEF; } -#else - void check_canary() {} - void set_canary() {} -#endif - - INLINE ValType& get_VAL(void) { return VAL; } - INLINE ValType get_VAL(void) const { return VAL; } - INLINE ValType get_VAL(void) const volatile { return VAL; } - INLINE void set_VAL(uint64_t value) { VAL = (ValType)value; } - INLINE ValType& get_pVal(int i) { return VAL; } - INLINE ValType get_pVal(int i) const { return VAL; } - INLINE const uint64_t* get_pVal() const { - assert(0 && "invalid usage"); - return 0; - } - INLINE ValType get_pVal(int i) const volatile { return VAL; } - INLINE uint64_t* get_pVal() const volatile { - assert(0 && "invalid usage"); - return 0; - } - INLINE void set_pVal(int i, uint64_t value) { VAL = (ValType)value; } - - INLINE uint32_t getBitWidth() const { return BitWidth; } - - template - ap_private<_AP_W, _AP_S>& operator=(const ap_private<_AP_W1, _AP_S1>& RHS) { - VAL = (ValType)(RHS.get_VAL()); - clearUnusedBits(); - return *this; - } - - template - ap_private<_AP_W, _AP_S>& operator=( - const volatile ap_private<_AP_W1, _AP_S1>& RHS) { - VAL = (ValType)(RHS.get_VAL()); // TODO check here about ap_private - clearUnusedBits(); - return *this; - } - - void operator=(const ap_private& RHS) volatile { - // Don't do anything for X = X - VAL = RHS.get_VAL(); // No need to check because no harm done by copying. - clearUnusedBits(); - } - - ap_private& operator=(const ap_private& RHS) { - // Don't do anything for X = X - VAL = RHS.get_VAL(); // No need to check because no harm done by copying. - clearUnusedBits(); - return *this; - } - - void operator=(const volatile ap_private& RHS) volatile { - // Don't do anything for X = X - VAL = RHS.get_VAL(); // No need to check because no harm done by copying. - clearUnusedBits(); - } - - ap_private& operator=(const volatile ap_private& RHS) { - // Don't do anything for X = X - VAL = RHS.get_VAL(); // No need to check because no harm done by copying. - clearUnusedBits(); - return *this; - } - - template - INLINE ap_private& operator=(const _private_range_ref<_AP_W2, _AP_S2>& op2) { - *this = ap_private<_AP_W2, false>(op2); - return *this; - } - -#define ASSIGN_OP_FROM_INT(C_TYPE) \ - INLINE ap_private& operator=(const C_TYPE v) { \ - set_canary(); \ - this->VAL = (ValType)v; \ - clearUnusedBits(); \ - check_canary(); \ - return *this; \ - } - -ASSIGN_OP_FROM_INT(bool) -ASSIGN_OP_FROM_INT(char) -ASSIGN_OP_FROM_INT(signed char) -ASSIGN_OP_FROM_INT(unsigned char) -ASSIGN_OP_FROM_INT(short) -ASSIGN_OP_FROM_INT(unsigned short) -ASSIGN_OP_FROM_INT(int) -ASSIGN_OP_FROM_INT(unsigned int) -ASSIGN_OP_FROM_INT(long) -ASSIGN_OP_FROM_INT(unsigned long) -ASSIGN_OP_FROM_INT(ap_slong) -ASSIGN_OP_FROM_INT(ap_ulong) -#if 0 -ASSIGN_OP_FROM_INT(half) -ASSIGN_OP_FROM_INT(float) -ASSIGN_OP_FROM_INT(double) -#endif -#undef ASSIGN_OP_FROM_INT - - // XXX This is a must to prevent pointer being converted to bool. - INLINE ap_private& operator=(const char* s) { - ap_private tmp(s); // XXX direct-initialization, as ctor is explicit. - operator=(tmp); - return *this; - } - - private: - explicit INLINE ap_private(uint64_t* val) : VAL(val[0]) { - set_canary(); - clearUnusedBits(); - check_canary(); - } - - INLINE bool isSingleWord() const { return true; } - - public: - INLINE void fromString(const char* strStart, uint32_t slen, uint8_t radix) { - bool isNeg = strStart[0] == '-'; - if (isNeg) { - strStart++; - slen--; - } - - if (strStart[0] == '0' && (strStart[1] == 'b' || strStart[1] == 'B')) { - //if(radix == 0) radix = 2; - _AP_WARNING(radix != 2, "%s seems to have base %d, but %d given.", strStart, 2, radix); - strStart += 2; - slen -=2; - } else if (strStart[0] == '0' && (strStart[1] == 'o' || strStart[1] == 'O')) { - //if (radix == 0) radix = 8; - _AP_WARNING(radix != 8, "%s seems to have base %d, but %d given.", strStart, 8, radix); - strStart += 2; - slen -=2; - } else if (strStart[0] == '0' && (strStart[1] == 'x' || strStart[1] == 'X')) { - //if (radix == 0) radix = 16; - _AP_WARNING(radix != 16, "%s seems to have base %d, but %d given.", strStart, 16, radix); - strStart += 2; - slen -=2; - } else if (strStart[0] == '0' && (strStart[1] == 'd' || strStart[1] == 'D')) { - //if (radix == 0) radix = 10; - _AP_WARNING(radix != 10, "%s seems to have base %d, but %d given.", strStart, 10, radix); - strStart += 2; - slen -=2; - } else if (radix == 0) { - //radix = 2; // XXX default value - } - - // Check our assumptions here - assert((radix == 10 || radix == 8 || radix == 16 || radix == 2) && - "Radix should be 2, 8, 10, or 16!"); - assert(strStart && "String is null?"); - - // Clear bits. - uint64_t tmpVAL = VAL = 0; - - switch (radix) { - case 2: - // sscanf(strStart,"%b",&VAL); - // tmpVAL = *strStart =='1' ? ~0ULL : 0; - for (; *strStart; ++strStart) { - assert((*strStart == '0' || *strStart == '1') && - ("Wrong binary number")); - tmpVAL <<= 1; - tmpVAL |= (*strStart - '0'); - } - break; - case 8: -#ifdef _MSC_VER - sscanf_s(strStart, "%llo", &tmpVAL, slen + 1); -#else -#if defined(__x86_64__) && !defined(__MINGW32__) && !defined(__WIN32__) - sscanf(strStart, "%lo", &tmpVAL); -#else - sscanf(strStart, "%llo", &tmpVAL); -#endif //__x86_64__ -#endif //_MSC_VER - break; - case 10: -#ifdef _MSC_VER - sscanf_s(strStart, "%llu", &tmpVAL, slen + 1); -#else -#if defined(__x86_64__) && !defined(__MINGW32__) && !defined(__WIN32__) - sscanf(strStart, "%lu", &tmpVAL); -#else - sscanf(strStart, "%llu", &tmpVAL); -#endif //__x86_64__ -#endif //_MSC_VER - break; - case 16: -#ifdef _MSC_VER - sscanf_s(strStart, "%llx", &tmpVAL, slen + 1); -#else -#if defined(__x86_64__) && !defined(__MINGW32__) && !defined(__WIN32__) - sscanf(strStart, "%lx", &tmpVAL); -#else - sscanf(strStart, "%llx", &tmpVAL); -#endif //__x86_64__ -#endif //_MSC_VER - break; - default: - assert(true && "Unknown radix"); - // error - } - VAL = isNeg ? (ValType)(-tmpVAL) : (ValType)(tmpVAL); - - clearUnusedBits(); - } - - private: - INLINE ap_private(const std::string& val, uint8_t radix = 2) : VAL(0) { - assert(!val.empty() && "String empty?"); - set_canary(); - fromString(val.c_str(), val.size(), radix); - check_canary(); - } - - INLINE ap_private(const char strStart[], uint32_t slen, uint8_t radix) - : VAL(0) { - set_canary(); - fromString(strStart, slen, radix); - check_canary(); - } - - INLINE ap_private(uint32_t numWords, const uint64_t bigVal[]) - : VAL(bigVal[0]) { - set_canary(); - clearUnusedBits(); - check_canary(); - } - - public: - INLINE ap_private() { - set_canary(); - clearUnusedBits(); - check_canary(); - } - -#define CTOR(TYPE) \ - INLINE ap_private(TYPE v) : VAL((ValType)v) { \ - set_canary(); \ - clearUnusedBits(); \ - check_canary(); \ - } - CTOR(bool) - CTOR(char) - CTOR(signed char) - CTOR(unsigned char) - CTOR(short) - CTOR(unsigned short) - CTOR(int) - CTOR(unsigned int) - CTOR(long) - CTOR(unsigned long) - CTOR(ap_slong) - CTOR(ap_ulong) -#if 0 - CTOR(half) - CTOR(float) - CTOR(double) -#endif -#undef CTOR - - template - INLINE ap_private(const ap_private<_AP_W1, _AP_S1, _AP_OPT>& that) - : VAL((ValType)that.get_VAL()) { - set_canary(); - clearUnusedBits(); - check_canary(); - } - - template - INLINE ap_private(const volatile ap_private<_AP_W1, _AP_S1, _AP_OPT>& that) - : VAL((ValType)that.get_VAL()) { - set_canary(); - clearUnusedBits(); - check_canary(); - } - - explicit INLINE ap_private(const char* val) { - set_canary(); - unsigned char radix = 10; - std::string str = ap_private_ops::parseString(val, radix); // will set radix. - std::string::size_type pos = str.find('.'); - // trunc all fraction part - if (pos != std::string::npos) str = str.substr(pos); - - ap_private<_AP_W, _AP_S> ap_private_val(str, radix); - operator=(ap_private_val); - check_canary(); - } - - INLINE ap_private(const char* val, signed char rd) { - set_canary(); - unsigned char radix = rd; - std::string str = ap_private_ops::parseString(val, radix); // will set radix. - std::string::size_type pos = str.find('.'); - // trunc all fraction part - if (pos != std::string::npos) str = str.substr(pos); - - ap_private<_AP_W, _AP_S> ap_private_val(str, radix); - operator=(ap_private_val); - check_canary(); - } - - INLINE ~ap_private() { check_canary(); } - - INLINE bool isNegative() const { - static const uint64_t sign_mask = 1ULL << (_AP_W - 1); - return _AP_S && (sign_mask & VAL); - } - - INLINE bool isPositive() const { return !isNegative(); } - - INLINE bool isStrictlyPositive() const { return !isNegative() && VAL != 0; } - - INLINE bool isAllOnesValue() const { return (mask & VAL) == mask; } - - INLINE bool operator==(const ap_private<_AP_W, _AP_S>& RHS) const { - return VAL == RHS.get_VAL(); - } - INLINE bool operator==(const ap_private<_AP_W, !_AP_S>& RHS) const { - return (uint64_t)VAL == (uint64_t)RHS.get_VAL(); - } - - INLINE bool operator==(uint64_t Val) const { return ((uint64_t)VAL == Val); } - INLINE bool operator!=(uint64_t Val) const { return ((uint64_t)VAL != Val); } - INLINE bool operator!=(const ap_private<_AP_W, _AP_S>& RHS) const { - return VAL != RHS.get_VAL(); - } - INLINE bool operator!=(const ap_private<_AP_W, !_AP_S>& RHS) const { - return (uint64_t)VAL != (uint64_t)RHS.get_VAL(); - } - - /// postfix increment. - const ap_private operator++(int) { - ap_private orig(*this); - VAL++; - clearUnusedBits(); - return orig; - } - - /// prefix increment. - const ap_private operator++() { - ++VAL; - clearUnusedBits(); - return *this; - } - - /// postfix decrement. - const ap_private operator--(int) { - ap_private orig(*this); - --VAL; - clearUnusedBits(); - return orig; - } - - /// prefix decrement. - const ap_private operator--() { - --VAL; - clearUnusedBits(); - return *this; - } - - /// one's complement. - INLINE ap_private<_AP_W + !_AP_S, true> operator~() const { - ap_private<_AP_W + !_AP_S, true> Result(*this); - Result.flip(); - return Result; - } - - /// two's complement. - INLINE typename RType<1, false>::minus operator-() const { - return ap_private<1, false>(0) - (*this); - } - - /// logic negation. - INLINE bool operator!() const { return !VAL; } - - INLINE std::string toString(uint8_t radix, bool wantSigned) const; - INLINE std::string toStringUnsigned(uint8_t radix = 10) const { - return toString(radix, false); - } - INLINE std::string toStringSigned(uint8_t radix = 10) const { - return toString(radix, true); - } - INLINE void clear() { VAL = 0; } - INLINE ap_private& clear(uint32_t bitPosition) { - VAL &= ~(1ULL << (bitPosition)); - clearUnusedBits(); - return *this; - } - - INLINE ap_private ashr(uint32_t shiftAmt) const { - if (_AP_S) - return ap_private((shiftAmt == BitWidth) ? 0 - : ((int64_t)VAL) >> (shiftAmt)); - else - return ap_private((shiftAmt == BitWidth) ? 0 - : ((uint64_t)VAL) >> (shiftAmt)); - } - - INLINE ap_private lshr(uint32_t shiftAmt) const { - return ap_private((shiftAmt == BitWidth) - ? ap_private(0) - : ap_private((VAL & mask) >> (shiftAmt))); - } - - INLINE ap_private shl(uint32_t shiftAmt) const -// just for clang compiler -#if defined(__clang__) && !defined(__CLANG_3_1__) - __attribute__((no_sanitize("undefined"))) -#endif - { - if (shiftAmt > BitWidth) { - if (!isNegative()) - return ap_private(0); - else - return ap_private(-1); - } - if (shiftAmt == BitWidth) - return ap_private(0); - else - return ap_private((VAL) << (shiftAmt)); - // return ap_private((shiftAmt == BitWidth) ? ap_private(0ULL) : - // ap_private(VAL << shiftAmt)); - } - - INLINE int64_t getSExtValue() const { return VAL; } - - // XXX XXX this function is used in CBE - INLINE uint64_t getZExtValue() const { return VAL & mask; } - - template - INLINE ap_private(const _private_range_ref<_AP_W2, _AP_S2>& ref) { - set_canary(); - *this = ref.get(); - check_canary(); - } - - template - INLINE ap_private(const _private_bit_ref<_AP_W2, _AP_S2>& ref) { - set_canary(); - *this = ((uint64_t)(bool)ref); - check_canary(); - } - -// template -// INLINE ap_private(const ap_concat_ref<_AP_W2, _AP_T2, _AP_W3, _AP_T3>& ref) { -// set_canary(); -// *this = ref.get(); -// check_canary(); -// } -// -// template -// INLINE ap_private( -// const af_range_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2>& val) { -// set_canary(); -// *this = ((val.operator ap_private<_AP_W2, false>())); -// check_canary(); -// } -// -// template -// INLINE ap_private( -// const af_bit_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2>& val) { -// set_canary(); -// *this = (uint64_t)(bool)val; -// check_canary(); -// } - - INLINE void write(const ap_private<_AP_W, _AP_S>& op2) volatile { - *this = (op2); - } - - // Explicit conversions to C interger types - //----------------------------------------------------------- - INLINE operator ValType() const { return get_VAL(); } - - INLINE int to_uchar() const { return (unsigned char)get_VAL(); } - - INLINE int to_char() const { return (signed char)get_VAL(); } - - INLINE int to_ushort() const { return (unsigned short)get_VAL(); } - - INLINE int to_short() const { return (short)get_VAL(); } - - INLINE int to_int() const { - // ap_private<64 /* _AP_W */, _AP_S> res(V); - return (int)get_VAL(); - } - - INLINE unsigned to_uint() const { return (unsigned)get_VAL(); } - - INLINE long to_long() const { return (long)get_VAL(); } - - INLINE unsigned long to_ulong() const { return (unsigned long)get_VAL(); } - - INLINE ap_slong to_int64() const { return (ap_slong)get_VAL(); } - - INLINE ap_ulong to_uint64() const { return (ap_ulong)get_VAL(); } - - INLINE double to_double() const { - if (isNegative()) - return roundToDouble(true); - else - return roundToDouble(false); - } - - INLINE unsigned length() const { return _AP_W; } - - INLINE bool isMinValue() const { return VAL == 0; } - template - INLINE ap_private& operator&=(const ap_private<_AP_W1, _AP_S1>& RHS) { - VAL = (ValType)(((uint64_t)VAL) & RHS.get_VAL()); - clearUnusedBits(); - return *this; - } - - template - INLINE ap_private& operator|=(const ap_private<_AP_W1, _AP_S1>& RHS) { - VAL = (ValType)(((uint64_t)VAL) | RHS.get_VAL()); - clearUnusedBits(); - return *this; - } - - template - INLINE ap_private& operator^=(const ap_private<_AP_W1, _AP_S1>& RHS) { - VAL = (ValType)(((uint64_t)VAL) ^ RHS.get_VAL()); - clearUnusedBits(); - return *this; - } - - template - INLINE ap_private& operator*=(const ap_private<_AP_W1, _AP_S1>& RHS) { - VAL = (ValType)(((uint64_t)VAL) * RHS.get_VAL()); - clearUnusedBits(); - return *this; - } - - template - INLINE ap_private& operator+=(const ap_private<_AP_W1, _AP_S1>& RHS) { - VAL = (ValType)(((uint64_t)VAL) + RHS.get_VAL()); - clearUnusedBits(); - return *this; - } - - template - INLINE ap_private& operator-=(const ap_private<_AP_W1, _AP_S1>& RHS) { - VAL = (ValType)(((uint64_t)VAL) - RHS.get_VAL()); - clearUnusedBits(); - return *this; - } - - template - INLINE typename RType<_AP_W1, _AP_S1>::logic operator&( - const ap_private<_AP_W1, _AP_S1>& RHS) const { - if (RType<_AP_W1, _AP_S1>::logic_w <= 64) { - typename RType<_AP_W1, _AP_S1>::logic Ret(((uint64_t)VAL) & - RHS.get_VAL()); - return Ret; - } else { - typename RType<_AP_W1, _AP_S1>::logic Ret = *this; - return Ret & RHS; - } - } - - template - INLINE typename RType<_AP_W1, _AP_S1>::logic operator^( - const ap_private<_AP_W1, _AP_S1>& RHS) const { - if (RType<_AP_W1, _AP_S1>::logic_w <= 64) { - typename RType<_AP_W1, _AP_S1>::logic Ret(((uint64_t)VAL) ^ - RHS.get_VAL()); - return Ret; - } else { - typename RType<_AP_W1, _AP_S1>::logic Ret = *this; - return Ret ^ RHS; - } - } - - template - INLINE typename RType<_AP_W1, _AP_S1>::logic operator|( - const ap_private<_AP_W1, _AP_S1>& RHS) const { - if (RType<_AP_W1, _AP_S1>::logic_w <= 64) { - typename RType<_AP_W1, _AP_S1>::logic Ret(((uint64_t)VAL) | - RHS.get_VAL()); - return Ret; - } else { - typename RType<_AP_W1, _AP_S1>::logic Ret = *this; - return Ret | RHS; - } - } - - INLINE ap_private And(const ap_private& RHS) const { - return ap_private(VAL & RHS.get_VAL()); - } - - INLINE ap_private Or(const ap_private& RHS) const { - return ap_private(VAL | RHS.get_VAL()); - } - - INLINE ap_private Xor(const ap_private& RHS) const { - return ap_private(VAL ^ RHS.get_VAL()); - } -#if 1 - template - INLINE typename RType<_AP_W1, _AP_S1>::mult operator*( - const ap_private<_AP_W1, _AP_S1>& RHS) const { - if (RType<_AP_W1, _AP_S1>::mult_w <= 64) { - typename RType<_AP_W1, _AP_S1>::mult Result(((uint64_t)VAL) * - RHS.get_VAL()); - return Result; - } else { - typename RType<_AP_W1, _AP_S1>::mult Result(*this); - Result *= RHS; - return Result; - } - } -#endif - INLINE ap_private Mul(const ap_private& RHS) const { - return ap_private(VAL * RHS.get_VAL()); - } - - INLINE ap_private Add(const ap_private& RHS) const { - return ap_private(VAL + RHS.get_VAL()); - } - - INLINE ap_private Sub(const ap_private& RHS) const { - return ap_private(VAL - RHS.get_VAL()); - } - - INLINE ap_private& operator&=(uint64_t RHS) { - VAL &= (ValType)RHS; - clearUnusedBits(); - return *this; - } - INLINE ap_private& operator|=(uint64_t RHS) { - VAL |= (ValType)RHS; - clearUnusedBits(); - return *this; - } - INLINE ap_private& operator^=(uint64_t RHS) { - VAL ^= (ValType)RHS; - clearUnusedBits(); - return *this; - } - INLINE ap_private& operator*=(uint64_t RHS) { - VAL *= (ValType)RHS; - clearUnusedBits(); - return *this; - } - INLINE ap_private& operator+=(uint64_t RHS) { - VAL += (ValType)RHS; - clearUnusedBits(); - return *this; - } - INLINE ap_private& operator-=(uint64_t RHS) { - VAL -= (ValType)RHS; - clearUnusedBits(); - return *this; - } - - INLINE bool isMinSignedValue() const { - static const uint64_t min_mask = ~(~0ULL << (_AP_W - 1)); - return BitWidth == 1 ? VAL == 1 - : (ap_private_ops::isNegative<_AP_W>(*this) && - ((min_mask & VAL) == 0)); - } - - template - INLINE typename RType<_AP_W1, _AP_S1>::plus operator+( - const ap_private<_AP_W1, _AP_S1>& RHS) const { - if (RType<_AP_W1, _AP_S1>::plus_w <= 64) - return typename RType<_AP_W1, _AP_S1>::plus( - RType<_AP_W1, _AP_S1>::plus_s - ? int64_t(((uint64_t)VAL) + RHS.get_VAL()) - : uint64_t(((uint64_t)VAL) + RHS.get_VAL())); - typename RType<_AP_W1, _AP_S1>::plus Result = RHS; - Result += VAL; - return Result; - } - - template - INLINE typename RType<_AP_W1, _AP_S1>::minus operator-( - const ap_private<_AP_W1, _AP_S1>& RHS) const { - if (RType<_AP_W1, _AP_S1>::minus_w <= 64) - return typename RType<_AP_W1, _AP_S1>::minus( - int64_t(((uint64_t)VAL) - RHS.get_VAL())); - typename RType<_AP_W1, _AP_S1>::minus Result = *this; - Result -= RHS; - return Result; - } - - INLINE uint32_t countPopulation() const { - return ap_private_ops::CountPopulation_64(VAL); - } - INLINE uint32_t countLeadingZeros() const { - int remainder = BitWidth % 64; - int excessBits = (64 - remainder) % 64; - uint32_t Count = ap_private_ops::CountLeadingZeros_64(VAL); - if (Count) Count -= excessBits; - return AESL_std::min(Count, (uint32_t)_AP_W); - } - - /// HiBits - This function returns the high "numBits" bits of this ap_private. - INLINE ap_private<_AP_W, _AP_S> getHiBits(uint32_t numBits) const { - ap_private<_AP_W, _AP_S> ret(*this); - ret = (ret) >> (BitWidth - numBits); - return ret; - } - - /// LoBits - This function returns the low "numBits" bits of this ap_private. - INLINE ap_private<_AP_W, _AP_S> getLoBits(uint32_t numBits) const { - ap_private<_AP_W, _AP_S> ret(((uint64_t)VAL) << (BitWidth - numBits)); - ret = (ret) >> (BitWidth - numBits); - return ret; - // return ap_private(numBits, (VAL << (BitWidth - numBits))>> (BitWidth - - // numBits)); - } - - INLINE ap_private<_AP_W, _AP_S>& set(uint32_t bitPosition) { - VAL |= (1ULL << (bitPosition)); - clearUnusedBits(); - return *this; // clearUnusedBits(); - } - - INLINE void set() { - VAL = (ValType)~0ULL; - clearUnusedBits(); - } - - template - INLINE void set(const ap_private<_AP_W3, false>& val) { - operator=(ap_private<_AP_W3, _AP_S>(val)); - } - - INLINE void set(const ap_private& val) { operator=(val); } - - INLINE void clearUnusedBits(void) volatile -// just for clang compiler -#if defined(__clang__) && !defined(__CLANG_3_1__) - __attribute__((no_sanitize("undefined"))) -#endif - { - enum { excess_bits = (_AP_W % 64) ? 64 - _AP_W % 64 : 0 }; - VAL = (ValType)( - _AP_S - ? ((((int64_t)VAL) << (excess_bits)) >> (excess_bits)) - : (excess_bits ? (((uint64_t)VAL) << (excess_bits)) >> (excess_bits) - : (uint64_t)VAL)); - } - - INLINE void clearUnusedBitsToZero(void) { - enum { excess_bits = (_AP_W % 64) ? 64 - _AP_W % 64 : 0 }; - static uint64_t mask = ~0ULL >> (excess_bits); - VAL &= mask; - } - - INLINE ap_private udiv(const ap_private& RHS) const { - return ap_private((uint64_t)VAL / RHS.get_VAL()); - } - - /// Signed divide this ap_private by ap_private RHS. - /// @brief Signed division function for ap_private. - INLINE ap_private sdiv(const ap_private& RHS) const { - if (isNegative()) - if (RHS.isNegative()) - return ((uint64_t)(0 - (*this))) / (uint64_t)(0 - RHS); - else - return 0 - ((uint64_t)(0 - (*this)) / (uint64_t)(RHS)); - else if (RHS.isNegative()) - return 0 - (this->udiv((ap_private)(0 - RHS))); - return this->udiv(RHS); - } - - template - INLINE ap_private urem(const ap_private<_AP_W, _AP_S2>& RHS) const { - assert(RHS.get_VAL() != 0 && "Divide by 0"); - return ap_private(((uint64_t)VAL) % ((uint64_t)RHS.get_VAL())); - } - - /// Signed remainder operation on ap_private. - /// @brief Function for signed remainder operation. - template - INLINE ap_private srem(const ap_private<_AP_W, _AP_S2>& RHS) const { - if (isNegative()) { - ap_private lhs = 0 - (*this); - if (RHS.isNegative()) { - ap_private rhs = 0 - RHS; - return 0 - (lhs.urem(rhs)); - } else - return 0 - (lhs.urem(RHS)); - } else if (RHS.isNegative()) { - ap_private rhs = 0 - RHS; - return this->urem(rhs); - } - return this->urem(RHS); - } - - template - INLINE bool eq(const ap_private<_AP_W1, _AP_S1>& RHS) const { - return (*this) == RHS; - } - - template - INLINE bool ne(const ap_private<_AP_W1, _AP_S1>& RHS) const { - return !((*this) == RHS); - } - - /// Regards both *this and RHS as unsigned quantities and compares them for - /// the validity of the less-than relationship. - /// @returns true if *this < RHS when both are considered unsigned. - /// @brief Unsigned less than comparison - template - INLINE bool ult(const ap_private<_AP_W1, _AP_S1>& RHS) const { - if (_AP_W1 <= 64) { - uint64_t lhsZext = ((uint64_t(VAL)) << (64 - _AP_W)) >> (64 - _AP_W); - uint64_t rhsZext = - ((uint64_t(RHS.get_VAL())) << (64 - _AP_W1)) >> (64 - _AP_W1); - return lhsZext < rhsZext; - } else - return RHS.uge(*this); - } - - /// Regards both *this and RHS as signed quantities and compares them for - /// validity of the less-than relationship. - /// @returns true if *this < RHS when both are considered signed. - /// @brief Signed less than comparison - template - INLINE bool slt(const ap_private<_AP_W1, _AP_S1>& RHS) const -// just for clang compiler -#if defined(__clang__) && !defined(__CLANG_3_1__) - __attribute__((no_sanitize("undefined"))) -#endif - { - if (_AP_W1 <= 64) { - int64_t lhsSext = ((int64_t(VAL)) << (64 - _AP_W)) >> (64 - _AP_W); - int64_t rhsSext = - ((int64_t(RHS.get_VAL())) << (64 - _AP_W1)) >> (64 - _AP_W1); - return lhsSext < rhsSext; - } else - return RHS.sge(*this); - } - - /// Regards both *this and RHS as unsigned quantities and compares them for - /// validity of the less-or-equal relationship. - /// @returns true if *this <= RHS when both are considered unsigned. - /// @brief Unsigned less or equal comparison - template - INLINE bool ule(const ap_private<_AP_W1, _AP_S1>& RHS) const { - return ult(RHS) || eq(RHS); - } - - /// Regards both *this and RHS as signed quantities and compares them for - /// validity of the less-or-equal relationship. - /// @returns true if *this <= RHS when both are considered signed. - /// @brief Signed less or equal comparison - template - INLINE bool sle(const ap_private<_AP_W1, _AP_S1>& RHS) const { - return slt(RHS) || eq(RHS); - } - - /// Regards both *this and RHS as unsigned quantities and compares them for - /// the validity of the greater-than relationship. - /// @returns true if *this > RHS when both are considered unsigned. - /// @brief Unsigned greather than comparison - template - INLINE bool ugt(const ap_private<_AP_W1, _AP_S1>& RHS) const { - return !ult(RHS) && !eq(RHS); - } - - /// Regards both *this and RHS as signed quantities and compares them for - /// the validity of the greater-than relationship. - /// @returns true if *this > RHS when both are considered signed. - /// @brief Signed greather than comparison - template - INLINE bool sgt(const ap_private<_AP_W1, _AP_S1>& RHS) const { - return !slt(RHS) && !eq(RHS); - } - - /// Regards both *this and RHS as unsigned quantities and compares them for - /// validity of the greater-or-equal relationship. - /// @returns true if *this >= RHS when both are considered unsigned. - /// @brief Unsigned greater or equal comparison - template - INLINE bool uge(const ap_private<_AP_W1, _AP_S1>& RHS) const { - return !ult(RHS); - } - - /// Regards both *this and RHS as signed quantities and compares them for - /// validity of the greater-or-equal relationship. - /// @returns true if *this >= RHS when both are considered signed. - /// @brief Signed greather or equal comparison - template - INLINE bool sge(const ap_private<_AP_W1, _AP_S1>& RHS) const { - return !slt(RHS); - } - - INLINE ap_private abs() const { - if (isNegative()) return -(*this); - return *this; - } - - INLINE ap_private<_AP_W, false> get() const { - ap_private<_AP_W, false> ret(*this); - return ret; - } - - INLINE static uint32_t getBitsNeeded(const char* str, uint32_t slen, - uint8_t radix) { - return _AP_W; - } - - INLINE uint32_t getActiveBits() const { - uint32_t bits = _AP_W - countLeadingZeros(); - return bits ? bits : 1; - } - - INLINE double roundToDouble(bool isSigned = false) const { - return isSigned ? double((int64_t)VAL) : double((uint64_t)VAL); - } - - /*Reverse the contents of ap_private instance. I.e. LSB becomes MSB and vise - * versa*/ - INLINE ap_private& reverse() { - for (int i = 0; i < _AP_W / 2; ++i) { - bool tmp = operator[](i); - if (operator[](_AP_W - 1 - i)) - set(i); - else - clear(i); - if (tmp) - set(_AP_W - 1 - i); - else - clear(_AP_W - 1 - i); - } - clearUnusedBits(); - return *this; - } - - /*Return true if the value of ap_private instance is zero*/ - INLINE bool iszero() const { return isMinValue(); } - - INLINE bool to_bool() const { return !iszero(); } - - /* x < 0 */ - INLINE bool sign() const { - if (isNegative()) return true; - return false; - } - - /* x[i] = !x[i] */ - INLINE void invert(int i) { - assert(i >= 0 && "Attempting to read bit with negative index"); - assert(i < _AP_W && "Attempting to read bit beyond MSB"); - flip(i); - } - - /* x[i] */ - INLINE bool test(int i) const { - assert(i >= 0 && "Attempting to read bit with negative index"); - assert(i < _AP_W && "Attempting to read bit beyond MSB"); - return operator[](i); - } - - // This is used for sc_lv and sc_bv, which is implemented by sc_uint - // Rotate an ap_private object n places to the left - INLINE void lrotate(int n) { - assert(n >= 0 && "Attempting to shift negative index"); - assert(n < _AP_W && "Shift value larger than bit width"); - operator=(shl(n) | lshr(_AP_W - n)); - } - - // This is used for sc_lv and sc_bv, which is implemented by sc_uint - // Rotate an ap_private object n places to the right - INLINE void rrotate(int n) { - assert(n >= 0 && "Attempting to shift negative index"); - assert(n < _AP_W && "Shift value larger than bit width"); - operator=(lshr(n) | shl(_AP_W - n)); - } - - // Set the ith bit into v - INLINE void set(int i, bool v) { - assert(i >= 0 && "Attempting to write bit with negative index"); - assert(i < _AP_W && "Attempting to write bit beyond MSB"); - v ? set(i) : clear(i); - } - - // Set the ith bit into v - INLINE void set_bit(int i, bool v) { - assert(i >= 0 && "Attempting to write bit with negative index"); - assert(i < _AP_W && "Attempting to write bit beyond MSB"); - v ? set(i) : clear(i); - } - - // Get the value of ith bit - INLINE bool get_bit(int i) const { - assert(i >= 0 && "Attempting to read bit with negative index"); - assert(i < _AP_W && "Attempting to read bit beyond MSB"); - return (((1ULL << i) & VAL) != 0); - } - - /// Toggle all bits. - INLINE ap_private& flip() { - VAL = (ValType)((~0ULL ^ VAL) & mask); - clearUnusedBits(); - return *this; - } - - /// Toggles a given bit to its opposite value. - INLINE ap_private& flip(uint32_t bitPosition) { - assert(bitPosition < BitWidth && "Out of the bit-width range!"); - set_bit(bitPosition, !get_bit(bitPosition)); - return *this; - } - - // complements every bit - INLINE void b_not() { flip(); } - -// Binary Arithmetic -//----------------------------------------------------------- -#define OP_BIN_AP(Sym, Rty, Fun) \ - template \ - INLINE typename RType<_AP_W2, _AP_S2>::Rty operator Sym( \ - const ap_private<_AP_W2, _AP_S2>& op) const { \ - typename RType<_AP_W2, _AP_S2>::Rty lhs(*this); \ - typename RType<_AP_W2, _AP_S2>::Rty rhs(op); \ - return lhs.Fun(rhs); \ - } - -/// Bitwise and, or, xor -// OP_BIN_AP(&,logic, And) -// OP_BIN_AP(|,logic, Or) -// OP_BIN_AP(^,logic, Xor) -#undef OP_BIN_AP - - template - INLINE typename RType<_AP_W2, _AP_S2>::div operator/( - const ap_private<_AP_W2, _AP_S2>& op) const { - ap_private _AP_W2 ? _AP_S - : (_AP_W2 > _AP_W ? _AP_S2 : _AP_S || _AP_S2))> - lhs = *this; - ap_private _AP_W2 ? _AP_S - : (_AP_W2 > _AP_W ? _AP_S2 : _AP_S || _AP_S2))> - rhs = op; - return typename RType<_AP_W2, _AP_S2>::div( - (_AP_S || _AP_S2) ? lhs.sdiv(rhs) : lhs.udiv(rhs)); - } - - template - INLINE typename RType<_AP_W2, _AP_S2>::mod operator%( - const ap_private<_AP_W2, _AP_S2>& op) const { - ap_private _AP_W2 ? _AP_S - : (_AP_W2 > _AP_W ? _AP_S2 : _AP_S || _AP_S2))> - lhs = *this; - ap_private _AP_W2 ? _AP_S - : (_AP_W2 > _AP_W ? _AP_S2 : _AP_S || _AP_S2))> - rhs = op; - typename RType<_AP_W2, _AP_S2>::mod res = - typename RType<_AP_W2, _AP_S2>::mod(_AP_S ? lhs.srem(rhs) - : lhs.urem(rhs)); - return res; - } - -#define OP_ASSIGN_AP_2(Sym) \ - template \ - INLINE ap_private<_AP_W, _AP_S>& operator Sym##=( \ - const ap_private<_AP_W2, _AP_S2>& op) { \ - *this = operator Sym(op); \ - return *this; \ - } - - OP_ASSIGN_AP_2(/) - OP_ASSIGN_AP_2(%) -#undef OP_ASSIGN_AP_2 - -/// Bitwise assign: and, or, xor -//------------------------------------------------------------- -// OP_ASSIGN_AP(&) -// OP_ASSIGN_AP(^) -// OP_ASSIGN_AP(|) - -#define OP_LEFT_SHIFT_CTYPE(TYPE, SIGNED) \ - INLINE ap_private operator<<(const TYPE op) const { \ - if (op >= _AP_W) return ap_private(0); \ - if (SIGNED && op < 0) return *this >> (0 - op); \ - return shl(op); \ - } - - // OP_LEFT_SHIFT_CTYPE(bool, false) - OP_LEFT_SHIFT_CTYPE(char, CHAR_IS_SIGNED) - OP_LEFT_SHIFT_CTYPE(signed char, true) - OP_LEFT_SHIFT_CTYPE(unsigned char, false) - OP_LEFT_SHIFT_CTYPE(short, true) - OP_LEFT_SHIFT_CTYPE(unsigned short, false) - OP_LEFT_SHIFT_CTYPE(int, true) - OP_LEFT_SHIFT_CTYPE(unsigned int, false) - OP_LEFT_SHIFT_CTYPE(long, true) - OP_LEFT_SHIFT_CTYPE(unsigned long, false) - OP_LEFT_SHIFT_CTYPE(long long, true) - OP_LEFT_SHIFT_CTYPE(unsigned long long, false) -#if 0 - OP_LEFT_SHIFT_CTYPE(half, false) - OP_LEFT_SHIFT_CTYPE(float, false) - OP_LEFT_SHIFT_CTYPE(double, false) -#endif - -#undef OP_LEFT_SHIFT_CTYPE - - template - INLINE ap_private operator<<(const ap_private<_AP_W2, _AP_S2>& op2) const { - if (_AP_S2 == false) { - uint32_t sh = op2.to_uint(); - return *this << sh; - } else { - int sh = op2.to_int(); - return *this << sh; - } - } - -#define OP_RIGHT_SHIFT_CTYPE(TYPE, SIGNED) \ - INLINE ap_private operator>>(const TYPE op) const { \ - if (op >= _AP_W) { \ - if (isNegative()) \ - return ap_private(-1); \ - else \ - return ap_private(0); \ - } \ - if ((SIGNED) && op < 0) return *this << (0 - op); \ - if (_AP_S) \ - return ashr(op); \ - else \ - return lshr(op); \ - } - - // OP_RIGHT_SHIFT_CTYPE(bool, false) - OP_RIGHT_SHIFT_CTYPE(char, CHAR_IS_SIGNED) - OP_RIGHT_SHIFT_CTYPE(signed char, true) - OP_RIGHT_SHIFT_CTYPE(unsigned char, false) - OP_RIGHT_SHIFT_CTYPE(short, true) - OP_RIGHT_SHIFT_CTYPE(unsigned short, false) - OP_RIGHT_SHIFT_CTYPE(int, true) - OP_RIGHT_SHIFT_CTYPE(unsigned int, false) - OP_RIGHT_SHIFT_CTYPE(long, true) - OP_RIGHT_SHIFT_CTYPE(unsigned long, false) - OP_RIGHT_SHIFT_CTYPE(unsigned long long, false) - OP_RIGHT_SHIFT_CTYPE(long long, true) -#if 0 - OP_RIGHT_SHIFT_CTYPE(half, false) - OP_RIGHT_SHIFT_CTYPE(float, false) - OP_RIGHT_SHIFT_CTYPE(double, false) -#endif - -#undef OP_RIGHT_SHIFT_CTYPE - - template - INLINE ap_private operator>>(const ap_private<_AP_W2, _AP_S2>& op2) const { - if (_AP_S2 == false) { - uint32_t sh = op2.to_uint(); - return *this >> sh; - } else { - int sh = op2.to_int(); - return *this >> sh; - } - } - - /// Shift assign - //----------------------------------------------------------------- - - //INLINE const ap_private& operator<<=(uint32_t shiftAmt) { - // VAL <<= shiftAmt; - // clearUnusedBits(); - // return *this; - //} - -#define OP_ASSIGN_AP(Sym) \ - template \ - INLINE ap_private& operator Sym##=(int op) { \ - *this = operator Sym(op); \ - clearUnusedBits(); \ - return *this; \ - } \ - INLINE ap_private& operator Sym##=(unsigned int op) { \ - *this = operator Sym(op); \ - clearUnusedBits(); \ - return *this; \ - } \ - template \ - INLINE ap_private& operator Sym##=(const ap_private<_AP_W2, _AP_S2>& op) { \ - *this = operator Sym(op); \ - clearUnusedBits(); \ - return *this; \ - } - - OP_ASSIGN_AP(>>) - OP_ASSIGN_AP(<<) -#undef OP_ASSIGN_AP - - /// Comparisons - //----------------------------------------------------------------- - template - INLINE bool operator==(const ap_private<_AP_W1, _AP_S1>& op) const { - enum { _AP_MAX_W = AP_MAX(AP_MAX(_AP_W, _AP_W1), 32) }; - ap_private<_AP_MAX_W, false> lhs(*this); - ap_private<_AP_MAX_W, false> rhs(op); - if (_AP_MAX_W <= 64) { - return (uint64_t)lhs.get_VAL() == (uint64_t)rhs.get_VAL(); - } else - return lhs == rhs; - } - - template - INLINE bool operator!=(const ap_private<_AP_W2, _AP_S2>& op) const { - return !(*this == op); - } - - template - INLINE bool operator>(const ap_private<_AP_W2, _AP_S2>& op) const { - enum { - _AP_MAX_W = AP_MAX(_AP_W + (_AP_S || _AP_S2), _AP_W2 + (_AP_S || _AP_S2)) - }; - ap_private<_AP_MAX_W, _AP_S> lhs(*this); - ap_private<_AP_MAX_W, _AP_S2> rhs(op); - // this will follow gcc rule for comparison - // between different bitwidth and signness - if (_AP_S == _AP_S2) - return _AP_S ? lhs.sgt(rhs) : lhs.ugt(rhs); - else if (_AP_W < 32 && _AP_W2 < 32) - // different signness but both bitwidth is less than 32 - return lhs.sgt(rhs); - else - // different signness but bigger bitwidth - // is greater or equal to 32 - if (_AP_S) - if (_AP_W2 >= _AP_W) - return lhs.ugt(rhs); - else - return lhs.sgt(rhs); - else if (_AP_W >= _AP_W2) - return lhs.ugt(rhs); - else - return lhs.sgt(rhs); - } - - template - INLINE bool operator<=(const ap_private<_AP_W2, _AP_S2>& op) const { - return !(*this > op); - } - - template - INLINE bool operator<(const ap_private<_AP_W2, _AP_S2>& op) const { - enum { - _AP_MAX_W = AP_MAX(_AP_W + (_AP_S || _AP_S2), _AP_W2 + (_AP_S || _AP_S2)) - }; - ap_private<_AP_MAX_W, _AP_S> lhs(*this); - ap_private<_AP_MAX_W, _AP_S2> rhs(op); - if (_AP_S == _AP_S2) - return _AP_S ? lhs.slt(rhs) : lhs.ult(rhs); - else if (_AP_W < 32 && _AP_W2 < 32) - return lhs.slt(rhs); - else if (_AP_S) - if (_AP_W2 >= _AP_W) - return lhs.ult(rhs); - else - return lhs.slt(rhs); - else if (_AP_W >= _AP_W2) - return lhs.ult(rhs); - else - return lhs.slt(rhs); - } - - template - INLINE bool operator>=(const ap_private<_AP_W2, _AP_S2>& op) const { - return !(*this < op); - } - - /// Bit and Part Select - //-------------------------------------------------------------- - // FIXME now _private_range_ref refs to _AP_ROOT_TYPE(struct ssdm_int). - INLINE _private_range_ref<_AP_W, _AP_S> operator()(int Hi, int Lo) { - return _private_range_ref<_AP_W, _AP_S>(this, Hi, Lo); - } - - INLINE _private_range_ref<_AP_W, _AP_S> operator()(int Hi, int Lo) const { - return _private_range_ref<_AP_W, _AP_S>( - const_cast*>(this), Hi, Lo); - } - - INLINE _private_range_ref<_AP_W, _AP_S> range(int Hi, int Lo) const { - return _private_range_ref<_AP_W, _AP_S>( - (const_cast*>(this)), Hi, Lo); - } - - INLINE _private_range_ref<_AP_W, _AP_S> range(int Hi, int Lo) { - return _private_range_ref<_AP_W, _AP_S>(this, Hi, Lo); - } - - INLINE _private_bit_ref<_AP_W, _AP_S> operator[](int index) { - return _private_bit_ref<_AP_W, _AP_S>(*this, index); - } - - template - INLINE _private_bit_ref<_AP_W, _AP_S> operator[]( - const ap_private<_AP_W2, _AP_S2>& index) { - return _private_bit_ref<_AP_W, _AP_S>(*this, index.to_int()); - } - - INLINE const _private_bit_ref<_AP_W, _AP_S> operator[](int index) const { - return _private_bit_ref<_AP_W, _AP_S>( - const_cast&>(*this), index); - } - - template - INLINE const _private_bit_ref<_AP_W, _AP_S> operator[]( - const ap_private<_AP_W2, _AP_S2>& index) const { - return _private_bit_ref<_AP_W, _AP_S>( - const_cast&>(*this), index.to_int()); - } - - INLINE _private_bit_ref<_AP_W, _AP_S> bit(int index) { - return _private_bit_ref<_AP_W, _AP_S>(*this, index); - } - - template - INLINE _private_bit_ref<_AP_W, _AP_S> bit(const ap_private<_AP_W2, _AP_S2>& index) { - return _private_bit_ref<_AP_W, _AP_S>(*this, index.to_int()); - } - - INLINE const _private_bit_ref<_AP_W, _AP_S> bit(int index) const { - return _private_bit_ref<_AP_W, _AP_S>( - const_cast&>(*this), index); - } - - template - INLINE const _private_bit_ref<_AP_W, _AP_S> bit( - const ap_private<_AP_W2, _AP_S2>& index) const { - return _private_bit_ref<_AP_W, _AP_S>( - const_cast&>(*this), index.to_int()); - } - -// template -// INLINE ap_concat_ref<_AP_W, ap_private<_AP_W, _AP_S>, _AP_W2, -// ap_private<_AP_W2, _AP_S2> > -// concat(const ap_private<_AP_W2, _AP_S2>& a2) const { -// return ap_concat_ref<_AP_W, ap_private<_AP_W, _AP_S>, _AP_W2, -// ap_private<_AP_W2, _AP_S2> >( -// const_cast&>(*this), -// const_cast&>(a2)); -// } -// -// template -// INLINE ap_concat_ref<_AP_W, ap_private<_AP_W, _AP_S>, _AP_W2, -// ap_private<_AP_W2, _AP_S2> > -// concat(ap_private<_AP_W2, _AP_S2>& a2) { -// return ap_concat_ref<_AP_W, ap_private<_AP_W, _AP_S>, _AP_W2, -// ap_private<_AP_W2, _AP_S2> >(*this, a2); -// } -// -// template -// INLINE ap_concat_ref<_AP_W, ap_private, _AP_W2, ap_private<_AP_W2, _AP_S2> > -// operator,(const ap_private<_AP_W2, _AP_S2> &a2) const { -// return ap_concat_ref<_AP_W, ap_private, _AP_W2, -// ap_private<_AP_W2, _AP_S2> >( -// const_cast&>(*this), -// const_cast&>(a2)); -// } -// -// template -// INLINE ap_concat_ref<_AP_W, ap_private, _AP_W2, ap_private<_AP_W2, _AP_S2> > -// operator,(const ap_private<_AP_W2, _AP_S2> &a2) { -// return ap_concat_ref<_AP_W, ap_private, _AP_W2, -// ap_private<_AP_W2, _AP_S2> >( -// *this, const_cast&>(a2)); -// } -// -// template -// INLINE ap_concat_ref<_AP_W, ap_private, _AP_W2, ap_private<_AP_W2, _AP_S2> > -// operator,(ap_private<_AP_W2, _AP_S2> &a2) const { -// return ap_concat_ref<_AP_W, ap_private, _AP_W2, -// ap_private<_AP_W2, _AP_S2> >( -// const_cast&>(*this), a2); -// } -// -// template -// INLINE ap_concat_ref<_AP_W, ap_private, _AP_W2, ap_private<_AP_W2, _AP_S2> > -// operator,(ap_private<_AP_W2, _AP_S2> &a2) { -// return ap_concat_ref<_AP_W, ap_private, _AP_W2, -// ap_private<_AP_W2, _AP_S2> >(*this, a2); -// } -// -// template -// INLINE ap_concat_ref<_AP_W, ap_private<_AP_W, _AP_S>, _AP_W2, -// _private_range_ref<_AP_W2, _AP_S2> > -// operator,(const _private_range_ref<_AP_W2, _AP_S2> &a2) const { -// return ap_concat_ref<_AP_W, ap_private<_AP_W, _AP_S>, _AP_W2, -// _private_range_ref<_AP_W2, _AP_S2> >( -// const_cast&>(*this), -// const_cast<_private_range_ref<_AP_W2, _AP_S2>&>(a2)); -// } -// -// template -// INLINE ap_concat_ref<_AP_W, ap_private<_AP_W, _AP_S>, _AP_W2, -// _private_range_ref<_AP_W2, _AP_S2> > -// operator,(_private_range_ref<_AP_W2, _AP_S2> &a2) { -// return ap_concat_ref<_AP_W, ap_private<_AP_W, _AP_S>, _AP_W2, -// _private_range_ref<_AP_W2, _AP_S2> >(*this, a2); -// } -// -// template -// INLINE ap_concat_ref<_AP_W, ap_private<_AP_W, _AP_S>, 1, -// _private_bit_ref<_AP_W2, _AP_S2> > -// operator,(const _private_bit_ref<_AP_W2, _AP_S2> &a2) const { -// return ap_concat_ref<_AP_W, ap_private<_AP_W, _AP_S>, 1, -// _private_bit_ref<_AP_W2, _AP_S2> >( -// const_cast&>(*this), -// const_cast<_private_bit_ref<_AP_W2, _AP_S2>&>(a2)); -// } -// -// template -// INLINE ap_concat_ref<_AP_W, ap_private<_AP_W, _AP_S>, 1, -// _private_bit_ref<_AP_W2, _AP_S2> > -// operator,(_private_bit_ref<_AP_W2, _AP_S2> &a2) { -// return ap_concat_ref<_AP_W, ap_private<_AP_W, _AP_S>, 1, -// _private_bit_ref<_AP_W2, _AP_S2> >(*this, a2); -// } -// -// template -// INLINE ap_concat_ref<_AP_W, ap_private<_AP_W, _AP_S>, _AP_W2 + _AP_W3, -// ap_concat_ref<_AP_W2, _AP_T2, _AP_W3, _AP_T3> > -// operator,(const ap_concat_ref<_AP_W2, _AP_T2, _AP_W3, _AP_T3> &a2) const { -// return ap_concat_ref<_AP_W, ap_private<_AP_W, _AP_S>, _AP_W2 + _AP_W3, -// ap_concat_ref<_AP_W2, _AP_T2, _AP_W3, _AP_T3> >( -// const_cast&>(*this), -// const_cast&>(a2)); -// } -// -// template -// INLINE ap_concat_ref<_AP_W, ap_private<_AP_W, _AP_S>, _AP_W2 + _AP_W3, -// ap_concat_ref<_AP_W2, _AP_T2, _AP_W3, _AP_T3> > -// operator,(ap_concat_ref<_AP_W2, _AP_T2, _AP_W3, _AP_T3> &a2) { -// return ap_concat_ref<_AP_W, ap_private<_AP_W, _AP_S>, _AP_W2 + _AP_W3, -// ap_concat_ref<_AP_W2, _AP_T2, _AP_W3, _AP_T3> >(*this, -// a2); -// } -// -// template -// INLINE ap_concat_ref< -// _AP_W, ap_private, _AP_W2, -// af_range_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2> > -// operator,(const af_range_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2> -// &a2) const { -// return ap_concat_ref< -// _AP_W, ap_private, _AP_W2, -// af_range_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2> >( -// const_cast&>(*this), -// const_cast< -// af_range_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2>&>(a2)); -// } -// -// template -// INLINE ap_concat_ref< -// _AP_W, ap_private, _AP_W2, -// af_range_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2> > -// operator,(af_range_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2> &a2) { -// return ap_concat_ref< -// _AP_W, ap_private, _AP_W2, -// af_range_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2> >(*this, -// a2); -// } -// -// template -// INLINE -// ap_concat_ref<_AP_W, ap_private, 1, -// af_bit_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2> > -// operator,(const af_bit_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2> -// &a2) const { -// return ap_concat_ref< -// _AP_W, ap_private, 1, -// af_bit_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2> >( -// const_cast&>(*this), -// const_cast&>( -// a2)); -// } -// -// template -// INLINE -// ap_concat_ref<_AP_W, ap_private, 1, -// af_bit_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2> > -// operator,( -// af_bit_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2> &a2) { -// return ap_concat_ref< -// _AP_W, ap_private, 1, -// af_bit_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2> >(*this, a2); -// } -// -// template -// INLINE ap_private operator&( -// const ap_concat_ref<_AP_W2, _AP_T2, _AP_W3, _AP_T3>& a2) { -// return *this & a2.get(); -// } -// -// template -// INLINE ap_private operator|( -// const ap_concat_ref<_AP_W2, _AP_T2, _AP_W3, _AP_T3>& a2) { -// return *this | a2.get(); -// } -// -// template -// INLINE ap_private operator^( -// const ap_concat_ref<_AP_W2, _AP_T2, _AP_W3, _AP_T3>& a2) { -// return *this ^ a2.get(); -// } - - // Reduce operation - //----------------------------------------------------------- - INLINE bool and_reduce() const { return (VAL & mask) == mask; } - - INLINE bool nand_reduce() const { return (VAL & mask) != mask; } - - INLINE bool or_reduce() const { return (bool)VAL; } - - INLINE bool nor_reduce() const { return VAL == 0; } - - INLINE bool xor_reduce() const { - unsigned int i = countPopulation(); - return (i % 2) ? true : false; - } - - INLINE bool xnor_reduce() const { - unsigned int i = countPopulation(); - return (i % 2) ? false : true; - } - - INLINE std::string to_string(uint8_t radix = 2, bool sign = false) const { - return toString(radix, radix == 10 ? _AP_S : sign); - } -}; // End of class ap_private <_AP_W, _AP_S, true> - -template -std::string ap_private<_AP_W, _AP_S, true>::toString(uint8_t radix, - bool wantSigned) const { - assert((radix == 10 || radix == 8 || radix == 16 || radix == 2) && - "Radix should be 2, 8, 10, or 16!"); - static const char* digits[] = {"0", "1", "2", "3", "4", "5", "6", "7", - "8", "9", "a", "b", "c", "d", "e", "f"}; - std::string result; - if (radix != 10) { - // For the 2, 8 and 16 bit cases, we can just shift instead of divide - // because the number of bits per digit (1,3 and 4 respectively) divides - // equaly. We just shift until there value is zero. - - // First, check for a zero value and just short circuit the logic below. - if (*this == (uint64_t)(0)) { - // Always generate a radix indicator because fixed-point - // formats require it. - switch (radix) { - case 2: - result = "0b0"; - break; - case 8: - result = "0o0"; - break; - case 16: - result = "0x0"; - break; - default: - assert("invalid radix" && 0); - } - } else { - ap_private<_AP_W, false, true> tmp(*this); - size_t insert_at = 0; - bool leading_zero = true; - if (wantSigned && isNegative()) { - // They want to print the signed version and it is a negative value - // Flip the bits and add one to turn it into the equivalent positive - // value and put a '-' in the result. - tmp.flip(); - tmp++; - result = "-"; - insert_at = 1; - leading_zero = false; - } - switch (radix) { - case 2: - result += "0b"; - break; - case 8: - result += "0o"; - break; - case 16: - result += "0x"; - break; - default: - assert("invalid radix" && 0); - } - insert_at += 2; - - // Just shift tmp right for each digit width until it becomes zero - uint32_t shift = (radix == 16 ? 4 : (radix == 8 ? 3 : 1)); - uint64_t mask = radix - 1; - ap_private<_AP_W, false, true> zero(0); - unsigned bits = 0; - bool msb = false; - while (tmp.ne(zero)) { - unsigned digit = (unsigned)(tmp.get_VAL() & mask); - result.insert(insert_at, digits[digit]); - tmp = tmp.lshr(shift); - bits++; - msb = (digit >> (shift - 1)) == 1; - } - bits *= shift; - if (bits < _AP_W && leading_zero && msb) - result.insert(insert_at, digits[0]); - } - return result; - } - - ap_private<_AP_W, false, true> tmp(*this); - ap_private<6, false, true> divisor(radix); - ap_private<_AP_W, _AP_S, true> zero(0); - size_t insert_at = 0; - if (wantSigned && isNegative()) { - // They want to print the signed version and it is a negative value - // Flip the bits and add one to turn it into the equivalent positive - // value and put a '-' in the result. - tmp.flip(); - tmp++; - result = "-"; - insert_at = 1; - } - if (tmp == ap_private<_AP_W, false, true>(0ULL)) - result = "0"; - else - while (tmp.ne(zero)) { - ap_private<_AP_W, false, true> APdigit = tmp % divisor; - ap_private<_AP_W, false, true> tmp2 = tmp / divisor; - uint32_t digit = (uint32_t)(APdigit.getZExtValue()); - assert(digit < radix && "divide failed"); - result.insert(insert_at, digits[digit]); - tmp = tmp2; - } - return result; - -} // End of ap_private<_AP_W, _AP_S, true>::toString() - -// bitwidth > 64 -template -class ap_private<_AP_W, _AP_S, false> { - // SFINAE pattern. Only consider this class when _AP_W > 64 - const static bool valid = ap_private_enable_if<(_AP_W > 64)>::isValid; - -#ifdef _MSC_VER -#pragma warning(disable : 4521 4522) -#endif - public: - enum { BitWidth = _AP_W, _AP_N = (_AP_W + 63) / 64 }; - static const int width = _AP_W; - - private: - /// This constructor is used only internally for speed of construction of - /// temporaries. It is unsafe for general use so it is not public. - - /* Constructors */ - /// Note that numWords can be smaller or larger than the corresponding bit - /// width but any extraneous bits will be dropped. - /// @param numWords the number of words in bigVal - /// @param bigVal a sequence of words to form the initial value of the - /// ap_private - /// @brief Construct an ap_private, initialized as bigVal[]. - INLINE ap_private(uint32_t numWords, const uint64_t bigVal[]) { - set_canary(); - assert(bigVal && "Null pointer detected!"); - { - // Get memory, cleared to 0 - memset(pVal, 0, _AP_N * sizeof(uint64_t)); - - // Calculate the number of words to copy - uint32_t words = AESL_std::min(numWords, _AP_N); - // Copy the words from bigVal to pVal - memcpy(pVal, bigVal, words * APINT_WORD_SIZE); - if (words >= _AP_W) clearUnusedBits(); - // Make sure unused high bits are cleared - } - check_canary(); - } - - /// This constructor interprets Val as a string in the given radix. The - /// interpretation stops when the first charater that is not suitable for the - /// radix is encountered. Acceptable radix values are 2, 8, 10 and 16. It is - /// an error for the value implied by the string to require more bits than - /// numBits. - /// @param val the string to be interpreted - /// @param radix the radix of Val to use for the intepretation - /// @brief Construct an ap_private from a string representation. - INLINE ap_private(const std::string& val, uint8_t radix = 2) { - set_canary(); - assert(!val.empty() && "The input string is empty."); - const char* c_str = val.c_str(); - fromString(c_str, val.size(), radix); - check_canary(); - } - - /// This constructor interprets the slen characters starting at StrStart as - /// a string in the given radix. The interpretation stops when the first - /// character that is not suitable for the radix is encountered. Acceptable - /// radix values are 2, 8, 10 and 16. It is an error for the value implied by - /// the string to require more bits than numBits. - /// @param strStart the start of the string to be interpreted - /// @param slen the maximum number of characters to interpret - /// @param radix the radix to use for the conversion - /// @brief Construct an ap_private from a string representation. - /// This method does not consider whether it is negative or not. - INLINE ap_private(const char strStart[], uint32_t slen, uint8_t radix) { - set_canary(); - fromString(strStart, slen, radix); - check_canary(); - } - - INLINE void report() { - _AP_ERROR(_AP_W > MAX_MODE(AP_INT_MAX_W) * 1024, - "ap_%sint<%d>: Bitwidth exceeds the " - "default max value %d. Please use macro " - "AP_INT_MAX_W to set a larger max value.", - _AP_S ? "" : "u", _AP_W, MAX_MODE(AP_INT_MAX_W) * 1024); - } - /// This union is used to store the integer value. When the - /// integer bit-width <= 64, it uses VAL, otherwise it uses pVal. - - /// This enum is used to hold the constants we needed for ap_private. - // uint64_t VAL; ///< Used to store the <= 64 bits integer value. - uint64_t pVal[_AP_N]; ///< Used to store the >64 bits integer value. -#ifdef AP_CANARY - uint64_t CANARY; - INLINE void check_canary() { assert(CANARY == (uint64_t)0xDEADBEEFDEADBEEF); } - INLINE void set_canary() { CANARY = (uint64_t)0xDEADBEEFDEADBEEF; } -#else - INLINE void check_canary() {} - INLINE void set_canary() {} -#endif - - public: - typedef typename valtype<8, _AP_S>::Type ValType; - typedef ap_private<_AP_W, _AP_S> Type; - // FIXME remove friend type? - template - friend struct ap_fixed_base; - /// return type of variety of operations - //---------------------------------------------------------- - template - struct RType { - enum { - mult_w = _AP_W + _AP_W2, - mult_s = _AP_S || _AP_S2, - plus_w = - AP_MAX(_AP_W + (_AP_S2 && !_AP_S), _AP_W2 + (_AP_S && !_AP_S2)) + 1, - plus_s = _AP_S || _AP_S2, - minus_w = - AP_MAX(_AP_W + (_AP_S2 && !_AP_S), _AP_W2 + (_AP_S && !_AP_S2)) + 1, - minus_s = true, - div_w = _AP_W + _AP_S2, - div_s = _AP_S || _AP_S2, - mod_w = AP_MIN(_AP_W, _AP_W2 + (!_AP_S2 && _AP_S)), - mod_s = _AP_S, - logic_w = AP_MAX(_AP_W + (_AP_S2 && !_AP_S), _AP_W2 + (_AP_S && !_AP_S2)), - logic_s = _AP_S || _AP_S2 - }; - typedef ap_private mult; - typedef ap_private plus; - typedef ap_private minus; - typedef ap_private logic; - typedef ap_private div; - typedef ap_private mod; - typedef ap_private<_AP_W, _AP_S> arg1; - typedef bool reduce; - }; - - INLINE uint64_t& get_VAL(void) { return pVal[0]; } - INLINE uint64_t get_VAL(void) const { return pVal[0]; } - INLINE uint64_t get_VAL(void) const volatile { return pVal[0]; } - INLINE void set_VAL(uint64_t value) { pVal[0] = value; } - INLINE uint64_t& get_pVal(int index) { return pVal[index]; } - INLINE uint64_t* get_pVal() { return pVal; } - INLINE const uint64_t* get_pVal() const { return pVal; } - INLINE uint64_t get_pVal(int index) const { return pVal[index]; } - INLINE uint64_t* get_pVal() const volatile { return pVal; } - INLINE uint64_t get_pVal(int index) const volatile { return pVal[index]; } - INLINE void set_pVal(int i, uint64_t value) { pVal[i] = value; } - - /// This enum is used to hold the constants we needed for ap_private. - enum { - APINT_BITS_PER_WORD = sizeof(uint64_t) * 8, ///< Bits in a word - APINT_WORD_SIZE = sizeof(uint64_t) ///< Byte size of a word - }; - - enum { - excess_bits = (_AP_W % APINT_BITS_PER_WORD) - ? APINT_BITS_PER_WORD - (_AP_W % APINT_BITS_PER_WORD) - : 0 - }; - static const uint64_t mask = ((uint64_t)~0ULL >> (excess_bits)); - - public: - // NOTE changed to explicit to be consistent with ap_private - explicit INLINE ap_private(const char* val) { - set_canary(); - unsigned char radix = 10; - std::string str = ap_private_ops::parseString(val, radix); // determine radix. - std::string::size_type pos = str.find('.'); - if (pos != std::string::npos) str = str.substr(pos); - ap_private ap_private_val(str, radix); - operator=(ap_private_val); - report(); - check_canary(); - } - - INLINE ap_private(const char* val, unsigned char rd) { - set_canary(); - unsigned char radix = rd; - std::string str = ap_private_ops::parseString(val, radix); // determine radix. - std::string::size_type pos = str.find('.'); - if (pos != std::string::npos) str = str.substr(pos); - ap_private ap_private_val(str, radix); - operator=(ap_private_val); - report(); - - report(); - check_canary(); - } - - template - INLINE ap_private(const _private_range_ref<_AP_W2, _AP_S2>& ref) { - set_canary(); - *this = ref.get(); - report(); - check_canary(); - } - - template - INLINE ap_private(const _private_bit_ref<_AP_W2, _AP_S2>& ref) { - set_canary(); - *this = ((uint64_t)(bool)ref); - report(); - check_canary(); - } - -// template -// INLINE ap_private(const ap_concat_ref<_AP_W2, _AP_T2, _AP_W3, _AP_T3>& ref) { -// set_canary(); -// *this = ref.get(); -// report(); -// check_canary(); -// } -// -// template -// INLINE ap_private( -// const af_range_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2>& val) { -// set_canary(); -// *this = ((val.operator ap_private<_AP_W2, false>())); -// report(); -// check_canary(); -// } -// -// template -// INLINE ap_private( -// const af_bit_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2>& val) { -// set_canary(); -// *this = (uint64_t)(bool)val; -// report(); -// check_canary(); -// } - - /// Simply makes *this a copy of that. - /// @brief Copy Constructor. - INLINE ap_private(const ap_private& that) { - set_canary(); - memcpy(pVal, that.get_pVal(), _AP_N * APINT_WORD_SIZE); - clearUnusedBits(); - check_canary(); - } - - template - INLINE ap_private(const ap_private<_AP_W1, _AP_S1, false>& that) { - set_canary(); - operator=(that); - check_canary(); - } - - template - INLINE ap_private(const volatile ap_private<_AP_W1, _AP_S1, false>& that) { - set_canary(); - operator=(const_cast&>(that)); - check_canary(); - } - - template - INLINE ap_private(const ap_private<_AP_W1, _AP_S1, true>& that) { - set_canary(); - static const uint64_t that_sign_ext_mask = - (_AP_W1 == APINT_BITS_PER_WORD) - ? 0 - : ~0ULL >> (_AP_W1 % APINT_BITS_PER_WORD) - << (_AP_W1 % APINT_BITS_PER_WORD); - if (that.isNegative()) { - pVal[0] = that.get_VAL() | that_sign_ext_mask; - memset(pVal + 1, ~0, sizeof(uint64_t) * (_AP_N - 1)); - } else { - pVal[0] = that.get_VAL(); - memset(pVal + 1, 0, sizeof(uint64_t) * (_AP_N - 1)); - } - clearUnusedBits(); - check_canary(); - } - - template - INLINE ap_private(const volatile ap_private<_AP_W1, _AP_S1, true>& that) { - set_canary(); - operator=(const_cast&>(that)); - check_canary(); - } - - /// @brief Destructor. - // virtual ~ap_private() {} - INLINE ~ap_private() { check_canary(); } - - /// @name Constructors - /// @{ - - /// Default constructor that creates an uninitialized ap_private. This is - /// useful - /// for object deserialization (pair this with the static method Read). - INLINE ap_private() { - set_canary(); - clearUnusedBits(); - check_canary(); - } - - INLINE ap_private(uint64_t* val, uint32_t bits = _AP_W) { assert(0); } - INLINE ap_private(const uint64_t* const val, uint32_t bits) { assert(0); } - -/// If isSigned is true then val is treated as if it were a signed value -/// (i.e. as an int64_t) and the appropriate sign extension to the bit width -/// will be done. Otherwise, no sign extension occurs (high order bits beyond -/// the range of val are zero filled). -/// @param numBits the bit width of the constructed ap_private -/// @param val the initial value of the ap_private -/// @param isSigned how to treat signedness of val -/// @brief Create a new ap_private of numBits width, initialized as val. -#define CTOR(TYPE, SIGNED) \ - INLINE ap_private(TYPE val, bool isSigned = SIGNED) { \ - set_canary(); \ - pVal[0] = (ValType)val; \ - if (isSigned && int64_t(pVal[0]) < 0) { \ - memset(pVal + 1, ~0, sizeof(uint64_t) * (_AP_N - 1)); \ - } else { \ - memset(pVal + 1, 0, sizeof(uint64_t) * (_AP_N - 1)); \ - } \ - clearUnusedBits(); \ - check_canary(); \ - } - - CTOR(bool, false) - CTOR(char, CHAR_IS_SIGNED) - CTOR(signed char, true) - CTOR(unsigned char, false) - CTOR(short, true) - CTOR(unsigned short, false) - CTOR(int, true) - CTOR(unsigned int, false) - CTOR(long, true) - CTOR(unsigned long, false) - CTOR(ap_slong, true) - CTOR(ap_ulong, false) -#if 0 - CTOR(half, false) - CTOR(float, false) - CTOR(double, false) -#endif -#undef CTOR - - /// @returns true if the number of bits <= 64, false otherwise. - /// @brief Determine if this ap_private just has one word to store value. - INLINE bool isSingleWord() const { return false; } - - /// @returns the word position for the specified bit position. - /// @brief Determine which word a bit is in. - static INLINE uint32_t whichWord(uint32_t bitPosition) { - // return bitPosition / APINT_BITS_PER_WORD; - return (bitPosition) >> 6; - } - - /// @returns the bit position in a word for the specified bit position - /// in the ap_private. - /// @brief Determine which bit in a word a bit is in. - static INLINE uint32_t whichBit(uint32_t bitPosition) { - // return bitPosition % APINT_BITS_PER_WORD; - return bitPosition & 0x3f; - } - - /// bit at a specific bit position. This is used to mask the bit in the - /// corresponding word. - /// @returns a uint64_t with only bit at "whichBit(bitPosition)" set - /// @brief Get a single bit mask. - static INLINE uint64_t maskBit(uint32_t bitPosition) { - return 1ULL << (whichBit(bitPosition)); - } - - /// @returns the corresponding word for the specified bit position. - /// @brief Get the word corresponding to a bit position - INLINE uint64_t getWord(uint32_t bitPosition) const { - return pVal[whichWord(bitPosition)]; - } - - /// This method is used internally to clear the to "N" bits in the high order - /// word that are not used by the ap_private. This is needed after the most - /// significant word is assigned a value to ensure that those bits are - /// zero'd out. - /// @brief Clear unused high order bits - INLINE void clearUnusedBits(void) volatile -// just for clang compiler -#if defined(__clang__) && !defined(__CLANG_3_1__) - __attribute__((no_sanitize("undefined"))) -#endif - { - pVal[_AP_N - 1] = - _AP_S ? ((((int64_t)pVal[_AP_N - 1]) << (excess_bits)) >> excess_bits) - : (excess_bits - ? ((pVal[_AP_N - 1]) << (excess_bits)) >> (excess_bits) - : pVal[_AP_N - 1]); - } - - INLINE void clearUnusedBitsToZero(void) { pVal[_AP_N - 1] &= mask; } - - INLINE void clearUnusedBitsToOne(void) { pVal[_AP_N - 1] |= mask; } - - /// This is used by the constructors that take string arguments. - /// @brief Convert a char array into an ap_private - INLINE void fromString(const char* str, uint32_t slen, uint8_t radix) { - enum { numbits = _AP_W }; - bool isNeg = str[0] == '-'; - if (isNeg) { - str++; - slen--; - } - - if (str[0] == '0' && (str[1] == 'b' || str[1] == 'B')) { - //if(radix == 0) radix = 2; - _AP_WARNING(radix != 2, "%s seems to have base %d, but %d given.", str, 2, radix); - str += 2; - slen -=2; - } else if (str[0] == '0' && (str[1] == 'o' || str[1] == 'O')) { - //if (radix == 0) radix = 8; - _AP_WARNING(radix != 8, "%s seems to have base %d, but %d given.", str, 8, radix); - str += 2; - slen -=2; - } else if (str[0] == '0' && (str[1] == 'x' || str[1] == 'X')) { - //if (radix == 0) radix = 16; - _AP_WARNING(radix != 16, "%s seems to have base %d, but %d given.", str, 16, radix); - str += 2; - slen -=2; - } else if (str[0] == '0' && (str[1] == 'd' || str[1] == 'D')) { - //if (radix == 0) radix = 10; - _AP_WARNING(radix != 10, "%s seems to have base %d, but %d given.", str, 10, radix); - str += 2; - slen -=2; - } else if (radix == 0) { - //radix = 2; // XXX default value - } - - // Check our assumptions here - assert((radix == 10 || radix == 8 || radix == 16 || radix == 2) && - "Radix should be 2, 8, 10, or 16!"); - assert(str && "String is null?"); - - // skip any leading zero - while (*str == '0' && *(str + 1) != '\0') { - str++; - slen--; - } - assert((slen <= numbits || radix != 2) && "Insufficient bit width"); - assert(((slen - 1) * 3 <= numbits || radix != 8) && - "Insufficient bit width"); - assert(((slen - 1) * 4 <= numbits || radix != 16) && - "Insufficient bit width"); - assert((((slen - 1) * 64) / 22 <= numbits || radix != 10) && - "Insufficient bit width"); - - // clear bits - memset(pVal, 0, _AP_N * sizeof(uint64_t)); - - // Figure out if we can shift instead of multiply - uint32_t shift = (radix == 16 ? 4 : radix == 8 ? 3 : radix == 2 ? 1 : 0); - - // Set up an ap_private for the digit to add outside the loop so we don't - // constantly construct/destruct it. - uint64_t bigVal[_AP_N]; - memset(bigVal, 0, _AP_N * sizeof(uint64_t)); - ap_private<_AP_W, _AP_S> apdigit(getBitWidth(), bigVal); - ap_private<_AP_W, _AP_S> apradix(radix); - - // Enter digit traversal loop - for (unsigned i = 0; i < slen; i++) { - // Get a digit - uint32_t digit = 0; - char cdigit = str[i]; - if (radix == 16) { -#define isxdigit(c) \ - (((c) >= '0' && (c) <= '9') || ((c) >= 'a' && (c) <= 'f') || \ - ((c) >= 'A' && (c) <= 'F')) -#define isdigit(c) ((c) >= '0' && (c) <= '9') - if (!isxdigit(cdigit)) assert(0 && "Invalid hex digit in string"); - if (isdigit(cdigit)) - digit = cdigit - '0'; - else if (cdigit >= 'a') - digit = cdigit - 'a' + 10; - else if (cdigit >= 'A') - digit = cdigit - 'A' + 10; - else - assert(0 && "huh? we shouldn't get here"); - } else if (isdigit(cdigit)) { - digit = cdigit - '0'; - } else if (cdigit != '\0') { - assert(0 && "Invalid character in digit string"); - } -#undef isxdigit -#undef isdigit - // Shift or multiply the value by the radix - if (shift) - *this <<= shift; - else - *this *= apradix; - - // Add in the digit we just interpreted - apdigit.set_VAL(digit); - *this += apdigit; - } - // If its negative, put it in two's complement form - if (isNeg) { - (*this)--; - this->flip(); - } - clearUnusedBits(); - } - - INLINE ap_private read() volatile { return *this; } - - INLINE void write(const ap_private& op2) volatile { *this = (op2); } - - INLINE operator ValType() const { return get_VAL(); } - - INLINE int to_uchar() const { return (unsigned char)get_VAL(); } - - INLINE int to_char() const { return (signed char)get_VAL(); } - - INLINE int to_ushort() const { return (unsigned short)get_VAL(); } - - INLINE int to_short() const { return (short)get_VAL(); } - - INLINE int to_int() const { return (int)get_VAL(); } - - INLINE unsigned to_uint() const { return (unsigned)get_VAL(); } - - INLINE long to_long() const { return (long)get_VAL(); } - - INLINE unsigned long to_ulong() const { return (unsigned long)get_VAL(); } - - INLINE ap_slong to_int64() const { return (ap_slong)get_VAL(); } - - INLINE ap_ulong to_uint64() const { return (ap_ulong)get_VAL(); } - - INLINE double to_double() const { - if (isNegative()) - return roundToDouble(true); - else - return roundToDouble(false); - } - - INLINE unsigned length() const { return _AP_W; } - - /*Reverse the contents of ap_private instance. I.e. LSB becomes MSB and vise - * versa*/ - INLINE ap_private& reverse() { - for (int i = 0; i < _AP_W / 2; ++i) { - bool tmp = operator[](i); - if (operator[](_AP_W - 1 - i)) - set(i); - else - clear(i); - if (tmp) - set(_AP_W - 1 - i); - else - clear(_AP_W - 1 - i); - } - clearUnusedBits(); - return *this; - } - - /*Return true if the value of ap_private instance is zero*/ - INLINE bool iszero() const { return isMinValue(); } - - INLINE bool to_bool() const { return !iszero(); } - - /* x < 0 */ - INLINE bool sign() const { - if (isNegative()) return true; - return false; - } - - /* x[i] = !x[i] */ - INLINE void invert(int i) { - assert(i >= 0 && "Attempting to read bit with negative index"); - assert(i < _AP_W && "Attempting to read bit beyond MSB"); - flip(i); - } - - /* x[i] */ - INLINE bool test(int i) const { - assert(i >= 0 && "Attempting to read bit with negative index"); - assert(i < _AP_W && "Attempting to read bit beyond MSB"); - return operator[](i); - } - - // Set the ith bit into v - INLINE void set(int i, bool v) { - assert(i >= 0 && "Attempting to write bit with negative index"); - assert(i < _AP_W && "Attempting to write bit beyond MSB"); - v ? set(i) : clear(i); - } - - // Set the ith bit into v - INLINE void set_bit(int i, bool v) { - assert(i >= 0 && "Attempting to write bit with negative index"); - assert(i < _AP_W && "Attempting to write bit beyond MSB"); - v ? set(i) : clear(i); - } - - // FIXME different argument for different action? - INLINE ap_private& set(uint32_t bitPosition) { - pVal[whichWord(bitPosition)] |= maskBit(bitPosition); - clearUnusedBits(); - return *this; - } - - INLINE void set() { - for (int i = 0; i < _AP_N; ++i) pVal[i] = ~0ULL; - clearUnusedBits(); - } - - // Get the value of ith bit - INLINE bool get(int i) const { - assert(i >= 0 && "Attempting to read bit with negative index"); - assert(i < _AP_W && "Attempting to read bit beyond MSB"); - return ((maskBit(i) & (pVal[whichWord(i)])) != 0); - } - - // Get the value of ith bit - INLINE bool get_bit(int i) const { - assert(i >= 0 && "Attempting to read bit with negative index"); - assert(i < _AP_W && "Attempting to read bit beyond MSB"); - return ((maskBit(i) & (pVal[whichWord(i)])) != 0); - } - - // This is used for sc_lv and sc_bv, which is implemented by sc_uint - // Rotate an ap_private object n places to the left - INLINE void lrotate(int n) { - assert(n >= 0 && "Attempting to shift negative index"); - assert(n < _AP_W && "Shift value larger than bit width"); - operator=(shl(n) | lshr(_AP_W - n)); - } - - // This is used for sc_lv and sc_bv, which is implemented by sc_uint - // Rotate an ap_private object n places to the right - INLINE void rrotate(int n) { - assert(n >= 0 && "Attempting to shift negative index"); - assert(n < _AP_W && "Shift value larger than bit width"); - operator=(lshr(n) | shl(_AP_W - n)); - } - - /// Set the given bit to 0 whose position is given as "bitPosition". - /// @brief Set a given bit to 0. - INLINE ap_private& clear(uint32_t bitPosition) { - pVal[whichWord(bitPosition)] &= ~maskBit(bitPosition); - clearUnusedBits(); - return *this; - } - - /// @brief Set every bit to 0. - INLINE void clear() { memset(pVal, 0, _AP_N * APINT_WORD_SIZE); } - - /// @brief Toggle every bit to its opposite value. - ap_private& flip() { - for (int i = 0; i < _AP_N; ++i) pVal[i] ^= ~0ULL; - clearUnusedBits(); - return *this; - } - - /// @brief Toggles a given bit to its opposite value. - INLINE ap_private& flip(uint32_t bitPosition) { - assert(bitPosition < BitWidth && "Out of the bit-width range!"); - set_bit(bitPosition, !get_bit(bitPosition)); - return *this; - } - - // complements every bit - INLINE void b_not() { flip(); } - - INLINE ap_private getLoBits(uint32_t numBits) const { - return ap_private_ops::lshr(ap_private_ops::shl(*this, _AP_W - numBits), - _AP_W - numBits); - } - - INLINE ap_private getHiBits(uint32_t numBits) const { - return ap_private_ops::lshr(*this, _AP_W - numBits); - } - - // Binary Arithmetic - //----------------------------------------------------------- - -// template -// INLINE ap_private operator&( -// const ap_concat_ref<_AP_W2, _AP_T2, _AP_W3, _AP_T3>& a2) { -// return *this & a2.get(); -// } -// -// template -// INLINE ap_private operator|( -// const ap_concat_ref<_AP_W2, _AP_T2, _AP_W3, _AP_T3>& a2) { -// return *this | a2.get(); -// } -// -// template -// INLINE ap_private operator^( -// const ap_concat_ref<_AP_W2, _AP_T2, _AP_W3, _AP_T3>& a2) { -// return *this ^ a2.get(); -// } - -/// Arithmetic assign -//------------------------------------------------------------- - -#define OP_BIN_LOGIC_ASSIGN_AP(Sym) \ - template \ - INLINE ap_private& operator Sym(const ap_private<_AP_W1, _AP_S1>& RHS) { \ - const int _AP_N1 = ap_private<_AP_W1, _AP_S1>::_AP_N; \ - uint32_t numWords = AESL_std::min((int)_AP_N, _AP_N1); \ - uint32_t i; \ - if (_AP_W != _AP_W1) \ - fprintf(stderr, \ - "Warning! Bitsize mismach for ap_[u]int " #Sym " ap_[u]int.\n"); \ - for (i = 0; i < numWords; ++i) pVal[i] Sym RHS.get_pVal(i); \ - if (_AP_N1 < _AP_N) { \ - uint64_t ext = RHS.isNegative() ? ~0ULL : 0; \ - for (; i < _AP_N; i++) pVal[i] Sym ext; \ - } \ - clearUnusedBits(); \ - return *this; \ - } - - OP_BIN_LOGIC_ASSIGN_AP(&=); - OP_BIN_LOGIC_ASSIGN_AP(|=); - OP_BIN_LOGIC_ASSIGN_AP(^=); -#undef OP_BIN_LOGIC_ASSIGN_AP - - /// Adds the RHS APint to this ap_private. - /// @returns this, after addition of RHS. - /// @brief Addition assignment operator. - template - INLINE ap_private& operator+=(const ap_private<_AP_W1, _AP_S1>& RHS) { - const int _AP_N1 = ap_private<_AP_W1, _AP_S1>::_AP_N; - uint64_t RHSpVal[_AP_N1]; - for (int i = 0; i < _AP_N1; ++i) RHSpVal[i] = RHS.get_pVal(i); - ap_private_ops::add(pVal, pVal, RHSpVal, _AP_N, _AP_N, _AP_N1, _AP_S, - _AP_S1); - clearUnusedBits(); - return *this; - } - - template - INLINE ap_private& operator-=(const ap_private<_AP_W1, _AP_S1>& RHS) { - const int _AP_N1 = ap_private<_AP_W1, _AP_S1>::_AP_N; - uint64_t RHSpVal[_AP_N1]; - for (int i = 0; i < _AP_N1; ++i) RHSpVal[i] = RHS.get_pVal(i); - ap_private_ops::sub(pVal, pVal, RHSpVal, _AP_N, _AP_N, _AP_N1, _AP_S, - _AP_S1); - clearUnusedBits(); - return *this; - } - - template - INLINE ap_private& operator*=(const ap_private<_AP_W1, _AP_S1>& RHS) { - // Get some bit facts about LHS and check for zero - uint32_t lhsBits = getActiveBits(); - uint32_t lhsWords = !lhsBits ? 0 : whichWord(lhsBits - 1) + 1; - if (!lhsWords) { - // 0 * X ===> 0 - return *this; - } - - ap_private dupRHS = RHS; - // Get some bit facts about RHS and check for zero - uint32_t rhsBits = dupRHS.getActiveBits(); - uint32_t rhsWords = !rhsBits ? 0 : whichWord(rhsBits - 1) + 1; - if (!rhsWords) { - // X * 0 ===> 0 - clear(); - return *this; - } - - // Allocate space for the result - uint32_t destWords = rhsWords + lhsWords; - uint64_t* dest = (uint64_t*)malloc(destWords * sizeof(uint64_t)); - - // Perform the long multiply - ap_private_ops::mul(dest, pVal, lhsWords, dupRHS.get_pVal(), rhsWords, - destWords); - - // Copy result back into *this - clear(); - uint32_t wordsToCopy = destWords >= _AP_N ? _AP_N : destWords; - - memcpy(pVal, dest, wordsToCopy * APINT_WORD_SIZE); - - uint64_t ext = (isNegative() ^ RHS.isNegative()) ? ~0ULL : 0ULL; - for (int i = wordsToCopy; i < _AP_N; i++) pVal[i] = ext; - clearUnusedBits(); - // delete dest array and return - free(dest); - return *this; - } - -#define OP_ASSIGN_AP(Sym) \ - template \ - INLINE ap_private& operator Sym##=(const ap_private<_AP_W2, _AP_S2>& op) { \ - *this = operator Sym(op); \ - return *this; \ - } - - OP_ASSIGN_AP(/) - OP_ASSIGN_AP(%) -#undef OP_ASSIGN_AP - -#define OP_BIN_LOGIC_AP(Sym) \ - template \ - INLINE typename RType<_AP_W1, _AP_S1>::logic operator Sym( \ - const ap_private<_AP_W1, _AP_S1>& RHS) const { \ - enum { \ - numWords = (RType<_AP_W1, _AP_S1>::logic_w + APINT_BITS_PER_WORD - 1) / \ - APINT_BITS_PER_WORD \ - }; \ - typename RType<_AP_W1, _AP_S1>::logic Result; \ - uint32_t i; \ - const int _AP_N1 = ap_private<_AP_W1, _AP_S1>::_AP_N; \ - uint32_t min_N = std::min((int)_AP_N, _AP_N1); \ - uint32_t max_N = std::max((int)_AP_N, _AP_N1); \ - for (i = 0; i < min_N; ++i) \ - Result.set_pVal(i, pVal[i] Sym RHS.get_pVal(i)); \ - if (numWords > i) { \ - uint64_t ext = ((_AP_N < _AP_N1 && isNegative()) || \ - (_AP_N1 < _AP_N && RHS.isNegative())) \ - ? ~0ULL \ - : 0; \ - if (_AP_N > _AP_N1) \ - for (; i < max_N; i++) Result.set_pVal(i, pVal[i] Sym ext); \ - else \ - for (; i < max_N; i++) Result.set_pVal(i, RHS.get_pVal(i) Sym ext); \ - if (numWords > i) { \ - uint64_t ext2 = ((_AP_N > _AP_N1 && isNegative()) || \ - (_AP_N1 > _AP_N && RHS.isNegative())) \ - ? ~0ULL \ - : 0; \ - Result.set_pVal(i, ext Sym ext2); \ - } \ - } \ - Result.clearUnusedBits(); \ - return Result; \ - } - - OP_BIN_LOGIC_AP(|); - OP_BIN_LOGIC_AP(&); - OP_BIN_LOGIC_AP(^); - -#undef OP_BIN_LOGIC_AP - - template - INLINE typename RType<_AP_W1, _AP_S1>::plus operator+( - const ap_private<_AP_W1, _AP_S1>& RHS) const { - typename RType<_AP_W1, _AP_S1>::plus Result, lhs(*this), rhs(RHS); - const int Result_AP_N = (RType<_AP_W1, _AP_S1>::plus_w + 63) / 64; - ap_private_ops::add(Result.get_pVal(), lhs.get_pVal(), rhs.get_pVal(), - Result_AP_N, Result_AP_N, Result_AP_N, _AP_S, _AP_S1); - Result.clearUnusedBits(); - return Result; - } - - template - INLINE typename RType<_AP_W1, _AP_S1>::minus operator-( - const ap_private<_AP_W1, _AP_S1>& RHS) const { - typename RType<_AP_W1, _AP_S1>::minus Result, lhs(*this), rhs(RHS); - const int Result_AP_N = (RType<_AP_W1, _AP_S1>::minus_w + 63) / 64; - ap_private_ops::sub(Result.get_pVal(), lhs.get_pVal(), rhs.get_pVal(), - Result_AP_N, Result_AP_N, Result_AP_N, _AP_S, _AP_S1); - Result.clearUnusedBits(); - return Result; - } - - template - INLINE typename RType<_AP_W1, _AP_S1>::mult operator*( - const ap_private<_AP_W1, _AP_S1>& RHS) const { - typename RType<_AP_W1, _AP_S1>::mult temp = *this; - temp *= RHS; - return temp; - } - - template - INLINE typename RType<_AP_W2, _AP_S2>::div operator/( - const ap_private<_AP_W2, _AP_S2>& op) const { - ap_private _AP_W2 ? _AP_S - : (_AP_W2 > _AP_W ? _AP_S2 : _AP_S || _AP_S2))> - lhs = *this; - ap_private _AP_W2 ? _AP_S - : (_AP_W2 > _AP_W ? _AP_S2 : _AP_S || _AP_S2))> - rhs = op; - return typename RType<_AP_W2, _AP_S2>::div( - (_AP_S || _AP_S2) ? lhs.sdiv(rhs) : lhs.udiv(rhs)); - } - - template - INLINE typename RType<_AP_W2, _AP_S2>::mod operator%( - const ap_private<_AP_W2, _AP_S2>& op) const { - ap_private _AP_W2 ? _AP_S - : (_AP_W2 > _AP_W ? _AP_S2 : _AP_S || _AP_S2))> - lhs = *this; - ap_private _AP_W2 ? _AP_S - : (_AP_W2 > _AP_W ? _AP_S2 : _AP_S || _AP_S2))> - rhs = op; - typename RType<_AP_W2, _AP_S2>::mod res = - typename RType<_AP_W2, _AP_S2>::mod(_AP_S ? lhs.srem(rhs) - : lhs.urem(rhs)); - return res; - } - -#define OP_LEFT_SHIFT_CTYPE(TYPE, SIGNED) \ - INLINE ap_private operator<<(const TYPE op) const { \ - if (op >= _AP_W) return ap_private(0); \ - if (SIGNED && op < 0) return *this >> (0 - op); \ - return shl(op); \ - } - - OP_LEFT_SHIFT_CTYPE(int, true) - // OP_LEFT_SHIFT_CTYPE(bool, false) - OP_LEFT_SHIFT_CTYPE(signed char, true) - OP_LEFT_SHIFT_CTYPE(unsigned char, false) - OP_LEFT_SHIFT_CTYPE(short, true) - OP_LEFT_SHIFT_CTYPE(unsigned short, false) - OP_LEFT_SHIFT_CTYPE(unsigned int, false) - OP_LEFT_SHIFT_CTYPE(long, true) - OP_LEFT_SHIFT_CTYPE(unsigned long, false) - OP_LEFT_SHIFT_CTYPE(unsigned long long, false) - OP_LEFT_SHIFT_CTYPE(long long, true) -#if 0 - OP_LEFT_SHIFT_CTYPE(half, false) - OP_LEFT_SHIFT_CTYPE(float, false) - OP_LEFT_SHIFT_CTYPE(double, false) -#endif -#undef OP_LEFT_SHIFT_CTYPE - - template - INLINE ap_private operator<<(const ap_private<_AP_W2, _AP_S2>& op2) const { - if (_AP_S2 == false) { - uint32_t sh = op2.to_uint(); - return *this << sh; - } else { - int sh = op2.to_int(); - return *this << sh; - } - } - -#define OP_RIGHT_SHIFT_CTYPE(TYPE, SIGNED) \ - INLINE ap_private operator>>(const TYPE op) const { \ - if (op >= _AP_W) { \ - if (isNegative()) \ - return ap_private(-1); \ - else \ - return ap_private(0); \ - } \ - if ((SIGNED) && op < 0) return *this << (0 - op); \ - if (_AP_S) \ - return ashr(op); \ - else \ - return lshr(op); \ - } - - // OP_RIGHT_SHIFT_CTYPE(bool, false) - OP_RIGHT_SHIFT_CTYPE(char, CHAR_IS_SIGNED) - OP_RIGHT_SHIFT_CTYPE(signed char, true) - OP_RIGHT_SHIFT_CTYPE(unsigned char, false) - OP_RIGHT_SHIFT_CTYPE(short, true) - OP_RIGHT_SHIFT_CTYPE(unsigned short, false) - OP_RIGHT_SHIFT_CTYPE(int, true) - OP_RIGHT_SHIFT_CTYPE(unsigned int, false) - OP_RIGHT_SHIFT_CTYPE(long, true) - OP_RIGHT_SHIFT_CTYPE(unsigned long, false) - OP_RIGHT_SHIFT_CTYPE(unsigned long long, false) - OP_RIGHT_SHIFT_CTYPE(long long, true) -#if 0 - OP_RIGHT_SHIFT_CTYPE(half, false) - OP_RIGHT_SHIFT_CTYPE(float, false) - OP_RIGHT_SHIFT_CTYPE(double, false) -#endif -#undef OP_RIGHT_SHIFT_CTYPE - - template - INLINE ap_private operator>>(const ap_private<_AP_W2, _AP_S2>& op2) const { - if (_AP_S2 == false) { - uint32_t sh = op2.to_uint(); - return *this >> sh; - } else { - int sh = op2.to_int(); - return *this >> sh; - } - } - - /// Shift assign - //------------------------------------------------------------------ - // TODO call clearUnusedBits ? -#define OP_ASSIGN_AP(Sym) \ - template \ - INLINE ap_private& operator Sym##=(int op) { \ - *this = operator Sym(op); \ - return *this; \ - } \ - INLINE ap_private& operator Sym##=(unsigned int op) { \ - *this = operator Sym(op); \ - return *this; \ - } \ - template \ - INLINE ap_private& operator Sym##=(const ap_private<_AP_W2, _AP_S2>& op) { \ - *this = operator Sym(op); \ - return *this; \ - } - OP_ASSIGN_AP(>>) - OP_ASSIGN_AP(<<) -#undef OP_ASSIGN_AP - - /// Comparisons - //----------------------------------------------------------------- - INLINE bool operator==(const ap_private& RHS) const { - // Get some facts about the number of bits used in the two operands. - uint32_t n1 = getActiveBits(); - uint32_t n2 = RHS.getActiveBits(); - - // If the number of bits isn't the same, they aren't equal - if (n1 != n2) return false; - - // If the number of bits fits in a word, we only need to compare the low - // word. - if (n1 <= APINT_BITS_PER_WORD) return pVal[0] == RHS.get_pVal(0); - - // Otherwise, compare everything - for (int i = whichWord(n1 - 1); i >= 0; --i) - if (pVal[i] != RHS.get_pVal(i)) return false; - return true; - } - - template - INLINE bool operator==(const ap_private<_AP_W2, _AP_S2>& op) const { - enum { - _AP_MAX_W = AP_MAX(_AP_W, _AP_W2), - }; - ap_private<_AP_MAX_W, false> lhs(*this); - ap_private<_AP_MAX_W, false> rhs(op); - return lhs == rhs; - } - - INLINE bool operator==(uint64_t Val) const { - uint32_t n = getActiveBits(); - if (n <= APINT_BITS_PER_WORD) - return pVal[0] == Val; - else - return false; - } - - template - INLINE bool operator!=(const ap_private<_AP_W2, _AP_S2>& op) const { - return !(*this == op); - } - - template - INLINE bool operator!=(const ap_private<_AP_W, _AP_S1>& RHS) const { - return !((*this) == RHS); - } - - INLINE bool operator!=(uint64_t Val) const { return !((*this) == Val); } - - template - INLINE bool operator<=(const ap_private<_AP_W2, _AP_S2>& op) const { - return !(*this > op); - } - - INLINE bool operator<(const ap_private& op) const { - return _AP_S ? slt(op) : ult(op); - } - - template - INLINE bool operator<(const ap_private<_AP_W2, _AP_S2>& op) const { - enum { - _AP_MAX_W = AP_MAX(_AP_W + (_AP_S || _AP_S2), _AP_W2 + (_AP_S || _AP_S2)) - }; - ap_private<_AP_MAX_W, _AP_S> lhs(*this); - ap_private<_AP_MAX_W, _AP_S2> rhs(op); - if (_AP_S == _AP_S2) - return _AP_S ? lhs.slt(rhs) : lhs.ult(rhs); - else if (_AP_S) - if (_AP_W2 >= _AP_W) - return lhs.ult(rhs); - else - return lhs.slt(rhs); - else if (_AP_W >= _AP_W2) - return lhs.ult(rhs); - else - return lhs.slt(rhs); - } - - template - INLINE bool operator>=(const ap_private<_AP_W2, _AP_S2>& op) const { - return !(*this < op); - } - - INLINE bool operator>(const ap_private& op) const { - return _AP_S ? sgt(op) : ugt(op); - } - - template - INLINE bool operator>(const ap_private<_AP_W2, _AP_S2>& op) const { - enum { - _AP_MAX_W = AP_MAX(_AP_W + (_AP_S || _AP_S2), _AP_W2 + (_AP_S || _AP_S2)) - }; - ap_private<_AP_MAX_W, _AP_S> lhs(*this); - ap_private<_AP_MAX_W, _AP_S2> rhs(op); - if (_AP_S == _AP_S2) - return _AP_S ? lhs.sgt(rhs) : lhs.ugt(rhs); - else if (_AP_S) - if (_AP_W2 >= _AP_W) - return lhs.ugt(rhs); - else - return lhs.sgt(rhs); - else if (_AP_W >= _AP_W2) - return lhs.ugt(rhs); - else - return lhs.sgt(rhs); - } - - /// Bit and Part Select - //-------------------------------------------------------------- - INLINE _private_range_ref<_AP_W, _AP_S> operator()(int Hi, int Lo) { - return _private_range_ref<_AP_W, _AP_S>(this, Hi, Lo); - } - - INLINE _private_range_ref<_AP_W, _AP_S> operator()(int Hi, int Lo) const { - return _private_range_ref<_AP_W, _AP_S>( - const_cast*>(this), Hi, Lo); - } - - INLINE _private_range_ref<_AP_W, _AP_S> range(int Hi, int Lo) const { - return _private_range_ref<_AP_W, _AP_S>( - (const_cast*>(this)), Hi, Lo); - } - - INLINE _private_range_ref<_AP_W, _AP_S> range(int Hi, int Lo) { - return _private_range_ref<_AP_W, _AP_S>(this, Hi, Lo); - } - - template - INLINE _private_range_ref<_AP_W, _AP_S> range( - const ap_private<_AP_W2, _AP_S2>& HiIdx, - const ap_private<_AP_W3, _AP_S3>& LoIdx) { - int Hi = HiIdx.to_int(); - int Lo = LoIdx.to_int(); - return _private_range_ref<_AP_W, _AP_S>(this, Hi, Lo); - } - - template - INLINE _private_range_ref<_AP_W, _AP_S> operator()( - const ap_private<_AP_W2, _AP_S2>& HiIdx, - const ap_private<_AP_W3, _AP_S3>& LoIdx) { - int Hi = HiIdx.to_int(); - int Lo = LoIdx.to_int(); - return _private_range_ref<_AP_W, _AP_S>(this, Hi, Lo); - } - - template - INLINE _private_range_ref<_AP_W, _AP_S> range( - const ap_private<_AP_W2, _AP_S2>& HiIdx, - const ap_private<_AP_W3, _AP_S3>& LoIdx) const { - int Hi = HiIdx.to_int(); - int Lo = LoIdx.to_int(); - return _private_range_ref<_AP_W, _AP_S>(const_cast(this), Hi, Lo); - } - - template - INLINE _private_range_ref<_AP_W, _AP_S> operator()( - const ap_private<_AP_W2, _AP_S2>& HiIdx, - const ap_private<_AP_W3, _AP_S3>& LoIdx) const { - int Hi = HiIdx.to_int(); - int Lo = LoIdx.to_int(); - return this->range(Hi, Lo); - } - - INLINE _private_bit_ref<_AP_W, _AP_S> operator[](int index) { - return _private_bit_ref<_AP_W, _AP_S>(*this, index); - } - - template - INLINE _private_bit_ref<_AP_W, _AP_S> operator[]( - const ap_private<_AP_W2, _AP_S2>& index) { - return _private_bit_ref<_AP_W, _AP_S>(*this, index.to_int()); - } - - template - INLINE const _private_bit_ref<_AP_W, _AP_S> operator[]( - const ap_private<_AP_W2, _AP_S2>& index) const { - return _private_bit_ref<_AP_W, _AP_S>( - const_cast&>(*this), index.to_int()); - } - - INLINE const _private_bit_ref<_AP_W, _AP_S> operator[](int index) const { - return _private_bit_ref<_AP_W, _AP_S>( - const_cast&>(*this), index); - } - - INLINE _private_bit_ref<_AP_W, _AP_S> bit(int index) { - return _private_bit_ref<_AP_W, _AP_S>(*this, index); - } - - template - INLINE _private_bit_ref<_AP_W, _AP_S> bit(const ap_private<_AP_W2, _AP_S2>& index) { - return _private_bit_ref<_AP_W, _AP_S>(*this, index.to_int()); - } - - INLINE const _private_bit_ref<_AP_W, _AP_S> bit(int index) const { - return _private_bit_ref<_AP_W, _AP_S>( - const_cast&>(*this), index); - } - - template - INLINE const _private_bit_ref<_AP_W, _AP_S> bit( - const ap_private<_AP_W2, _AP_S2>& index) const { - return _private_bit_ref<_AP_W, _AP_S>( - const_cast&>(*this), index.to_int()); - } - -// template -// INLINE ap_concat_ref<_AP_W, ap_private<_AP_W, _AP_S>, _AP_W2, -// ap_private<_AP_W2, _AP_S2> > -// concat(ap_private<_AP_W2, _AP_S2>& a2) { -// return ap_concat_ref<_AP_W, ap_private<_AP_W, _AP_S>, _AP_W2, -// ap_private<_AP_W2, _AP_S2> >(*this, a2); -// } -// -// template -// INLINE ap_concat_ref<_AP_W, ap_private<_AP_W, _AP_S>, _AP_W2, -// ap_private<_AP_W2, _AP_S2> > -// concat(const ap_private<_AP_W2, _AP_S2>& a2) const { -// return ap_concat_ref<_AP_W, ap_private<_AP_W, _AP_S>, _AP_W2, -// ap_private<_AP_W2, _AP_S2> >( -// const_cast&>(*this), -// const_cast&>(a2)); -// } -// -// template -// INLINE ap_concat_ref<_AP_W, ap_private, _AP_W2, ap_private<_AP_W2, _AP_S2> > -// operator,(ap_private<_AP_W2, _AP_S2> &a2) { -// return ap_concat_ref<_AP_W, ap_private, _AP_W2, -// ap_private<_AP_W2, _AP_S2> >(*this, a2); -// } -// -// template -// INLINE ap_concat_ref<_AP_W, ap_private, _AP_W2, ap_private<_AP_W2, _AP_S2> > -// operator,(ap_private<_AP_W2, _AP_S2> &a2) const { -// return ap_concat_ref<_AP_W, ap_private, _AP_W2, -// ap_private<_AP_W2, _AP_S2> >( -// const_cast&>(*this), a2); -// } -// -// template -// INLINE ap_concat_ref<_AP_W, ap_private, _AP_W2, ap_private<_AP_W2, _AP_S2> > -// operator,(const ap_private<_AP_W2, _AP_S2> &a2) { -// return ap_concat_ref<_AP_W, ap_private, _AP_W2, -// ap_private<_AP_W2, _AP_S2> >( -// *this, const_cast&>(a2)); -// } -// -// template -// INLINE ap_concat_ref<_AP_W, ap_private, _AP_W2, ap_private<_AP_W2, _AP_S2> > -// operator,(const ap_private<_AP_W2, _AP_S2> &a2) const { -// return ap_concat_ref<_AP_W, ap_private, _AP_W2, -// ap_private<_AP_W2, _AP_S2> >( -// const_cast&>(*this), -// const_cast&>(a2)); -// } -// -// template -// INLINE ap_concat_ref<_AP_W, ap_private<_AP_W, _AP_S>, _AP_W2, -// _private_range_ref<_AP_W2, _AP_S2> > -// operator,(const _private_range_ref<_AP_W2, _AP_S2> &a2) const { -// return ap_concat_ref<_AP_W, ap_private<_AP_W, _AP_S>, _AP_W2, -// _private_range_ref<_AP_W2, _AP_S2> >( -// const_cast&>(*this), -// const_cast<_private_range_ref<_AP_W2, _AP_S2>&>(a2)); -// } -// -// template -// INLINE ap_concat_ref<_AP_W, ap_private<_AP_W, _AP_S>, _AP_W2, -// _private_range_ref<_AP_W2, _AP_S2> > -// operator,(_private_range_ref<_AP_W2, _AP_S2> &a2) { -// return ap_concat_ref<_AP_W, ap_private<_AP_W, _AP_S>, _AP_W2, -// _private_range_ref<_AP_W2, _AP_S2> >(*this, a2); -// } -// -// template -// INLINE ap_concat_ref<_AP_W, ap_private<_AP_W, _AP_S>, 1, -// _private_bit_ref<_AP_W2, _AP_S2> > -// operator,(const _private_bit_ref<_AP_W2, _AP_S2> &a2) const { -// return ap_concat_ref<_AP_W, ap_private<_AP_W, _AP_S>, 1, -// _private_bit_ref<_AP_W2, _AP_S2> >( -// const_cast&>(*this), -// const_cast<_private_bit_ref<_AP_W2, _AP_S2>&>(a2)); -// } -// -// template -// INLINE ap_concat_ref<_AP_W, ap_private<_AP_W, _AP_S>, 1, -// _private_bit_ref<_AP_W2, _AP_S2> > -// operator,(_private_bit_ref<_AP_W2, _AP_S2> &a2) { -// return ap_concat_ref<_AP_W, ap_private<_AP_W, _AP_S>, 1, -// _private_bit_ref<_AP_W2, _AP_S2> >(*this, a2); -// } -// -// template -// INLINE ap_concat_ref<_AP_W, ap_private<_AP_W, _AP_S>, _AP_W2 + _AP_W3, -// ap_concat_ref<_AP_W2, _AP_T2, _AP_W3, _AP_T3> > -// operator,(const ap_concat_ref<_AP_W2, _AP_T2, _AP_W3, _AP_T3> &a2) const { -// return ap_concat_ref<_AP_W, ap_private<_AP_W, _AP_S>, _AP_W2 + _AP_W3, -// ap_concat_ref<_AP_W2, _AP_T2, _AP_W3, _AP_T3> >( -// const_cast&>(*this), -// const_cast&>(a2)); -// } -// -// template -// INLINE ap_concat_ref<_AP_W, ap_private<_AP_W, _AP_S>, _AP_W2 + _AP_W3, -// ap_concat_ref<_AP_W2, _AP_T2, _AP_W3, _AP_T3> > -// operator,(ap_concat_ref<_AP_W2, _AP_T2, _AP_W3, _AP_T3> &a2) { -// return ap_concat_ref<_AP_W, ap_private<_AP_W, _AP_S>, _AP_W2 + _AP_W3, -// ap_concat_ref<_AP_W2, _AP_T2, _AP_W3, _AP_T3> >(*this, -// a2); -// } -// -// template -// INLINE ap_concat_ref< -// _AP_W, ap_private, _AP_W2, -// af_range_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2> > -// operator,(const af_range_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2> -// &a2) const { -// return ap_concat_ref< -// _AP_W, ap_private, _AP_W2, -// af_range_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2> >( -// const_cast&>(*this), -// const_cast< -// af_range_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2>&>(a2)); -// } -// -// template -// INLINE ap_concat_ref< -// _AP_W, ap_private, _AP_W2, -// af_range_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2> > -// operator,(af_range_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2> &a2) { -// return ap_concat_ref< -// _AP_W, ap_private, _AP_W2, -// af_range_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2> >(*this, -// a2); -// } -// -// template -// INLINE -// ap_concat_ref<_AP_W, ap_private, 1, -// af_bit_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2> > -// operator,(const af_bit_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2> -// &a2) const { -// return ap_concat_ref< -// _AP_W, ap_private, 1, -// af_bit_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2> >( -// const_cast&>(*this), -// const_cast&>( -// a2)); -// } -// -// template -// INLINE -// ap_concat_ref<_AP_W, ap_private, 1, -// af_bit_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2> > -// operator,( -// af_bit_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2> &a2) { -// return ap_concat_ref< -// _AP_W, ap_private, 1, -// af_bit_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2> >(*this, a2); -// } - - INLINE ap_private<_AP_W, false> get() const { - ap_private<_AP_W, false> ret(*this); - return ret; - } - - template - INLINE void set(const ap_private<_AP_W3, false>& val) { - operator=(ap_private<_AP_W3, _AP_S>(val)); - } - - /// - /// @name Value Tests - /// - /// This tests the high bit of this ap_private to determine if it is set. - /// @returns true if this ap_private is negative, false otherwise - /// @brief Determine sign of this ap_private. - INLINE bool isNegative() const { - // just for get rid of warnings - enum { shift = (_AP_W - APINT_BITS_PER_WORD * (_AP_N - 1) - 1) }; - static const uint64_t mask = 1ULL << (shift); - return _AP_S && (pVal[_AP_N - 1] & mask); - } - - /// This tests the high bit of the ap_private to determine if it is unset. - /// @brief Determine if this ap_private Value is positive (not negative). - INLINE bool isPositive() const { return !isNegative(); } - - /// This tests if the value of this ap_private is strictly positive (> 0). - /// @returns true if this ap_private is Positive and not zero. - /// @brief Determine if this ap_private Value is strictly positive. - INLINE bool isStrictlyPositive() const { - return isPositive() && (*this) != 0; - } - - /// This checks to see if the value has all bits of the ap_private are set or - /// not. - /// @brief Determine if all bits are set - INLINE bool isAllOnesValue() const { return countPopulation() == _AP_W; } - - /// This checks to see if the value of this ap_private is the maximum unsigned - /// value for the ap_private's bit width. - /// @brief Determine if this is the largest unsigned value. - INLINE bool isMaxValue() const { return countPopulation() == _AP_W; } - - /// This checks to see if the value of this ap_private is the maximum signed - /// value for the ap_private's bit width. - /// @brief Determine if this is the largest signed value. - INLINE bool isMaxSignedValue() const { - return !isNegative() && countPopulation() == _AP_W - 1; - } - - /// This checks to see if the value of this ap_private is the minimum unsigned - /// value for the ap_private's bit width. - /// @brief Determine if this is the smallest unsigned value. - INLINE bool isMinValue() const { return countPopulation() == 0; } - - /// This checks to see if the value of this ap_private is the minimum signed - /// value for the ap_private's bit width. - /// @brief Determine if this is the smallest signed value. - INLINE bool isMinSignedValue() const { - return isNegative() && countPopulation() == 1; - } - - /// This function returns a pointer to the internal storage of the ap_private. - /// This is useful for writing out the ap_private in binary form without any - /// conversions. - INLINE const uint64_t* getRawData() const { return &pVal[0]; } - - // Square Root - this method computes and returns the square root of "this". - // Three mechanisms are used for computation. For small values (<= 5 bits), - // a table lookup is done. This gets some performance for common cases. For - // values using less than 52 bits, the value is converted to double and then - // the libc sqrt function is called. The result is rounded and then converted - // back to a uint64_t which is then used to construct the result. Finally, - // the Babylonian method for computing square roots is used. - INLINE ap_private sqrt() const { - // Determine the magnitude of the value. - uint32_t magnitude = getActiveBits(); - - // Use a fast table for some small values. This also gets rid of some - // rounding errors in libc sqrt for small values. - if (magnitude <= 5) { - static const uint8_t results[32] = { - /* 0 */ 0, - /* 1- 2 */ 1, 1, - /* 3- 6 */ 2, 2, 2, 2, - /* 7-12 */ 3, 3, 3, 3, 3, 3, - /* 13-20 */ 4, 4, 4, 4, 4, 4, 4, 4, - /* 21-30 */ 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, - /* 31 */ 6}; - return ap_private<_AP_W, _AP_S>(/*BitWidth,*/ results[get_VAL()]); - } - - // If the magnitude of the value fits in less than 52 bits (the precision of - // an IEEE double precision floating point value), then we can use the - // libc sqrt function which will probably use a hardware sqrt computation. - // This should be faster than the algorithm below. - if (magnitude < 52) { -#ifdef _MSC_VER - // Amazingly, VC++ doesn't have round(). - return ap_private<_AP_W, _AP_S>(/*BitWidth,*/ - uint64_t(::sqrt(double(get_VAL()))) + - 0.5); -#else - return ap_private<_AP_W, _AP_S>(/*BitWidth,*/ - uint64_t( - ::round(::sqrt(double(get_VAL()))))); -#endif - } - - // Okay, all the short cuts are exhausted. We must compute it. The following - // is a classical Babylonian method for computing the square root. This code - // was adapted to APINt from a wikipedia article on such computations. - // See http://www.wikipedia.org/ and go to the page named - // Calculate_an_integer_square_root. - uint32_t nbits = BitWidth, i = 4; - ap_private<_AP_W, _AP_S> testy(16); - ap_private<_AP_W, _AP_S> x_old(/*BitWidth,*/ 1); - ap_private<_AP_W, _AP_S> x_new(0); - ap_private<_AP_W, _AP_S> two(/*BitWidth,*/ 2); - - // Select a good starting value using binary logarithms. - for (;; i += 2, testy = testy.shl(2)) - if (i >= nbits || this->ule(testy)) { - x_old = x_old.shl(i / 2); - break; - } - - // Use the Babylonian method to arrive at the integer square root: - for (;;) { - x_new = (this->udiv(x_old) + x_old).udiv(two); - if (x_old.ule(x_new)) break; - x_old = x_new; - } - - // Make sure we return the closest approximation - // NOTE: The rounding calculation below is correct. It will produce an - // off-by-one discrepancy with results from pari/gp. That discrepancy has - // been - // determined to be a rounding issue with pari/gp as it begins to use a - // floating point representation after 192 bits. There are no discrepancies - // between this algorithm and pari/gp for bit widths < 192 bits. - ap_private<_AP_W, _AP_S> square(x_old * x_old); - ap_private<_AP_W, _AP_S> nextSquare((x_old + 1) * (x_old + 1)); - if (this->ult(square)) - return x_old; - else if (this->ule(nextSquare)) { - ap_private<_AP_W, _AP_S> midpoint((nextSquare - square).udiv(two)); - ap_private<_AP_W, _AP_S> offset(*this - square); - if (offset.ult(midpoint)) - return x_old; - else - return x_old + 1; - } else - assert(0 && "Error in ap_private<_AP_W, _AP_S>::sqrt computation"); - return x_old + 1; - } - - /// - /// @Assignment Operators - /// - /// @returns *this after assignment of RHS. - /// @brief Copy assignment operator. - INLINE ap_private& operator=(const ap_private& RHS) { - if (this != &RHS) memcpy(pVal, RHS.get_pVal(), _AP_N * APINT_WORD_SIZE); - clearUnusedBits(); - return *this; - } - INLINE ap_private& operator=(const volatile ap_private& RHS) { - if (this != &RHS) - for (int i = 0; i < _AP_N; ++i) pVal[i] = RHS.get_pVal(i); - clearUnusedBits(); - return *this; - } - INLINE void operator=(const ap_private& RHS) volatile { - if (this != &RHS) - for (int i = 0; i < _AP_N; ++i) pVal[i] = RHS.get_pVal(i); - clearUnusedBits(); - } - INLINE void operator=(const volatile ap_private& RHS) volatile { - if (this != &RHS) - for (int i = 0; i < _AP_N; ++i) pVal[i] = RHS.get_pVal(i); - clearUnusedBits(); - } - - template - INLINE ap_private& operator=(const ap_private<_AP_W1, _AP_S1>& RHS) { - if (_AP_S1) - cpSextOrTrunc(RHS); - else - cpZextOrTrunc(RHS); - clearUnusedBits(); - return *this; - } - - template - INLINE ap_private& operator=(const volatile ap_private<_AP_W1, _AP_S1>& RHS) { - if (_AP_S1) - cpSextOrTrunc(RHS); - else - cpZextOrTrunc(RHS); - clearUnusedBits(); - return *this; - } - - template - INLINE ap_private& operator=(const _private_range_ref<_AP_W2, _AP_S2>& op2) { - *this = ap_private<_AP_W2, false>(op2); - return *this; - } - -#if 0 - template - INLINE ap_private& operator=(const ap_private<_AP_W1, _AP_S1, true>& RHS) { - static const uint64_t that_sign_ext_mask = (_AP_W1==APINT_BITS_PER_WORD)?0:~0ULL>>(_AP_W1%APINT_BITS_PER_WORD)<<(_AP_W1%APINT_BITS_PER_WORD); - if (RHS.isNegative()) { - pVal[0] = RHS.get_VAL() | that_sign_ext_mask; - memset(pVal+1,~0, APINT_WORD_SIZE*(_AP_N-1)); - } else { - pVal[0] = RHS.get_VAL(); - memset(pVal+1, 0, APINT_WORD_SIZE*(_AP_N-1)); - } - clearUnusedBits(); - return *this; - } - - template - INLINE ap_private& operator=(const volatile ap_private<_AP_W1, _AP_S1, true>& RHS) { - static const uint64_t that_sign_ext_mask = (_AP_W1==APINT_BITS_PER_WORD)?0:~0ULL>>(_AP_W1%APINT_BITS_PER_WORD)<<(_AP_W1%APINT_BITS_PER_WORD); - if (RHS.isNegative()) { - pVal[0] = RHS.get_VAL() | that_sign_ext_mask; - memset(pVal+1,~0, APINT_WORD_SIZE*(_AP_N-1)); - } else { - pVal[0] = RHS.get_VAL(); - memset(pVal+1, 0, APINT_WORD_SIZE*(_AP_N-1)); - } - clearUnusedBits(); - return *this; - } -#endif - -/// from all c types. -#define ASSIGN_OP_FROM_INT(C_TYPE, _AP_W2, _AP_S2) \ - INLINE ap_private& operator=(const C_TYPE rhs) { \ - ap_private<(_AP_W2), (_AP_S2)> tmp = rhs; \ - operator=(tmp); \ - return *this; \ - } - - ASSIGN_OP_FROM_INT(bool, 1, false) - ASSIGN_OP_FROM_INT(char, 8, CHAR_IS_SIGNED) - ASSIGN_OP_FROM_INT(signed char, 8, true) - ASSIGN_OP_FROM_INT(unsigned char, 8, false) - ASSIGN_OP_FROM_INT(short, sizeof(short) * 8, true) - ASSIGN_OP_FROM_INT(unsigned short, sizeof(unsigned short) * 8, false) - ASSIGN_OP_FROM_INT(int, sizeof(int) * 8, true) - ASSIGN_OP_FROM_INT(unsigned int, sizeof(unsigned int) * 8, false) - ASSIGN_OP_FROM_INT(long, sizeof(long) * 8, true) - ASSIGN_OP_FROM_INT(unsigned long, sizeof(unsigned long) * 8, false) - ASSIGN_OP_FROM_INT(ap_slong, sizeof(ap_slong) * 8, true) - ASSIGN_OP_FROM_INT(ap_ulong, sizeof(ap_ulong) * 8, false) -#undef ASSIGN_OP_FROM_INT - - /// from c string. - // XXX this is a must, to prevent pointer being converted to bool. - INLINE ap_private& operator=(const char* s) { - ap_private tmp(s); // XXX direct initialization, as ctor is explicit. - operator=(tmp); - return *this; - } - - /// - /// @name Unary Operators - /// - /// @returns a new ap_private value representing *this incremented by one - /// @brief Postfix increment operator. - INLINE const ap_private operator++(int) { - ap_private API(*this); - ++(*this); - return API; - } - - /// @returns *this incremented by one - /// @brief Prefix increment operator. - INLINE ap_private& operator++() { - ap_private_ops::add_1(pVal, pVal, _AP_N, 1); - clearUnusedBits(); - return *this; - } - - /// @returns a new ap_private representing *this decremented by one. - /// @brief Postfix decrement operator. - INLINE const ap_private operator--(int) { - ap_private API(*this); - --(*this); - return API; - } - - /// @returns *this decremented by one. - /// @brief Prefix decrement operator. - INLINE ap_private& operator--() { - ap_private_ops::sub_1(pVal, _AP_N, 1); - clearUnusedBits(); - return *this; - } - - /// Performs a bitwise complement operation on this ap_private. - /// @returns an ap_private that is the bitwise complement of *this - /// @brief Unary bitwise complement operator. - INLINE ap_private<_AP_W + !_AP_S, true> operator~() const { - ap_private<_AP_W + !_AP_S, true> Result(*this); - Result.flip(); - return Result; - } - - /// Negates *this using two's complement logic. - /// @returns An ap_private value representing the negation of *this. - /// @brief Unary negation operator - INLINE typename RType<1, false>::minus operator-() const { - return ap_private<1, false>(0) - (*this); - } - - /// Performs logical negation operation on this ap_private. - /// @returns true if *this is zero, false otherwise. - /// @brief Logical negation operator. - INLINE bool operator!() const { - for (int i = 0; i < _AP_N; ++i) - if (pVal[i]) return false; - return true; - } - - template - INLINE ap_private<_AP_W, _AP_S || _AP_S1> And( - const ap_private<_AP_W, _AP_S1>& RHS) const { - return this->operator&(RHS); - } - template - INLINE ap_private Or(const ap_private<_AP_W, _AP_S1>& RHS) const { - return this->operator|(RHS); - } - template - INLINE ap_private Xor(const ap_private<_AP_W, _AP_S1>& RHS) const { - return this->operator^(RHS); - } - - INLINE ap_private Mul(const ap_private& RHS) const { - ap_private Result(*this); - Result *= RHS; - return Result; - } - - INLINE ap_private Add(const ap_private& RHS) const { - ap_private Result(0); - ap_private_ops::add(Result.get_pVal(), pVal, RHS.get_pVal(), _AP_N, _AP_N, - _AP_N, _AP_S, _AP_S); - Result.clearUnusedBits(); - return Result; - } - - INLINE ap_private Sub(const ap_private& RHS) const { - ap_private Result(0); - ap_private_ops::sub(Result.get_pVal(), pVal, RHS.get_pVal(), _AP_N, _AP_N, - _AP_N, _AP_S, _AP_S); - Result.clearUnusedBits(); - return Result; - } - - /// Arithmetic right-shift this ap_private by shiftAmt. - /// @brief Arithmetic right-shift function. - INLINE ap_private ashr(uint32_t shiftAmt) const { - assert(shiftAmt <= BitWidth && "Invalid shift amount, too big"); - // Handle a degenerate case - if (shiftAmt == 0) return ap_private(*this); - - // If all the bits were shifted out, the result is, technically, undefined. - // We return -1 if it was negative, 0 otherwise. We check this early to - // avoid - // issues in the algorithm below. - if (shiftAmt == BitWidth) { - if (isNegative()) - return ap_private(-1); - else - return ap_private(0); - } - - // Create some space for the result. - ap_private Retval(0); - uint64_t* val = Retval.get_pVal(); - - // Compute some values needed by the following shift algorithms - uint32_t wordShift = - shiftAmt % APINT_BITS_PER_WORD; // bits to shift per word - uint32_t offset = shiftAmt / APINT_BITS_PER_WORD; // word offset for shift - uint32_t breakWord = _AP_N - 1 - offset; // last word affected - uint32_t bitsInWord = whichBit(BitWidth); // how many bits in last word? - if (bitsInWord == 0) bitsInWord = APINT_BITS_PER_WORD; - - // If we are shifting whole words, just move whole words - if (wordShift == 0) { - // Move the words containing significant bits - for (uint32_t i = 0; i <= breakWord; ++i) - val[i] = pVal[i + offset]; // move whole word - - // Adjust the top significant word for sign bit fill, if negative - if (isNegative()) - if (bitsInWord < APINT_BITS_PER_WORD) - val[breakWord] |= ~0ULL << (bitsInWord); // set high bits - } else { - // Shift the low order words - for (uint32_t i = 0; i < breakWord; ++i) { - // This combines the shifted corresponding word with the low bits from - // the next word (shifted into this word's high bits). - val[i] = ((pVal[i + offset]) >> (wordShift)); - val[i] |= ((pVal[i + offset + 1]) << (APINT_BITS_PER_WORD - wordShift)); - } - - // Shift the break word. In this case there are no bits from the next word - // to include in this word. - val[breakWord] = (pVal[breakWord + offset]) >> (wordShift); - - // Deal with sign extenstion in the break word, and possibly the word - // before - // it. - if (isNegative()) { - if (wordShift > bitsInWord) { - if (breakWord > 0) - val[breakWord - 1] |= - ~0ULL << (APINT_BITS_PER_WORD - (wordShift - bitsInWord)); - val[breakWord] |= ~0ULL; - } else - val[breakWord] |= (~0ULL << (bitsInWord - wordShift)); - } - } - - // Remaining words are 0 or -1, just assign them. - uint64_t fillValue = (isNegative() ? ~0ULL : 0); - for (int i = breakWord + 1; i < _AP_N; ++i) val[i] = fillValue; - Retval.clearUnusedBits(); - return Retval; - } - - /// Logical right-shift this ap_private by shiftAmt. - /// @brief Logical right-shift function. - INLINE ap_private lshr(uint32_t shiftAmt) const { - // If all the bits were shifted out, the result is 0. This avoids issues - // with shifting by the size of the integer type, which produces undefined - // results. We define these "undefined results" to always be 0. - if (shiftAmt == BitWidth) return ap_private(0); - - // If none of the bits are shifted out, the result is *this. This avoids - // issues with shifting byt he size of the integer type, which produces - // undefined results in the code below. This is also an optimization. - if (shiftAmt == 0) return ap_private(*this); - - // Create some space for the result. - ap_private Retval(0); - uint64_t* val = Retval.get_pVal(); - - // If we are shifting less than a word, compute the shift with a simple - // carry - if (shiftAmt < APINT_BITS_PER_WORD) { - uint64_t carry = 0; - for (int i = _AP_N - 1; i >= 0; --i) { - val[i] = ((pVal[i]) >> (shiftAmt)) | carry; - carry = (pVal[i]) << (APINT_BITS_PER_WORD - shiftAmt); - } - Retval.clearUnusedBits(); - return Retval; - } - - // Compute some values needed by the remaining shift algorithms - uint32_t wordShift = shiftAmt % APINT_BITS_PER_WORD; - uint32_t offset = shiftAmt / APINT_BITS_PER_WORD; - - // If we are shifting whole words, just move whole words - if (wordShift == 0) { - for (uint32_t i = 0; i < _AP_N - offset; ++i) val[i] = pVal[i + offset]; - for (uint32_t i = _AP_N - offset; i < _AP_N; i++) val[i] = 0; - Retval.clearUnusedBits(); - return Retval; - } - - // Shift the low order words - uint32_t breakWord = _AP_N - offset - 1; - for (uint32_t i = 0; i < breakWord; ++i) - val[i] = ((pVal[i + offset]) >> (wordShift)) | - ((pVal[i + offset + 1]) << (APINT_BITS_PER_WORD - wordShift)); - // Shift the break word. - val[breakWord] = (pVal[breakWord + offset]) >> (wordShift); - - // Remaining words are 0 - for (int i = breakWord + 1; i < _AP_N; ++i) val[i] = 0; - Retval.clearUnusedBits(); - return Retval; - } - - /// Left-shift this ap_private by shiftAmt. - /// @brief Left-shift function. - INLINE ap_private shl(uint32_t shiftAmt) const { - assert(shiftAmt <= BitWidth && "Invalid shift amount, too big"); - // If all the bits were shifted out, the result is 0. This avoids issues - // with shifting by the size of the integer type, which produces undefined - // results. We define these "undefined results" to always be 0. - if (shiftAmt == BitWidth) return ap_private(0); - - // If none of the bits are shifted out, the result is *this. This avoids a - // lshr by the words size in the loop below which can produce incorrect - // results. It also avoids the expensive computation below for a common - // case. - if (shiftAmt == 0) return ap_private(*this); - - // Create some space for the result. - ap_private Retval(0); - uint64_t* val = Retval.get_pVal(); - // If we are shifting less than a word, do it the easy way - if (shiftAmt < APINT_BITS_PER_WORD) { - uint64_t carry = 0; - for (int i = 0; i < _AP_N; i++) { - val[i] = ((pVal[i]) << (shiftAmt)) | carry; - carry = (pVal[i]) >> (APINT_BITS_PER_WORD - shiftAmt); - } - Retval.clearUnusedBits(); - return Retval; - } - - // Compute some values needed by the remaining shift algorithms - uint32_t wordShift = shiftAmt % APINT_BITS_PER_WORD; - uint32_t offset = shiftAmt / APINT_BITS_PER_WORD; - - // If we are shifting whole words, just move whole words - if (wordShift == 0) { - for (uint32_t i = 0; i < offset; i++) val[i] = 0; - for (int i = offset; i < _AP_N; i++) val[i] = pVal[i - offset]; - Retval.clearUnusedBits(); - return Retval; - } - - // Copy whole words from this to Result. - uint32_t i = _AP_N - 1; - for (; i > offset; --i) - val[i] = (pVal[i - offset]) << (wordShift) | - (pVal[i - offset - 1]) >> (APINT_BITS_PER_WORD - wordShift); - val[offset] = (pVal[0]) << (wordShift); - for (i = 0; i < offset; ++i) val[i] = 0; - Retval.clearUnusedBits(); - return Retval; - } - - INLINE ap_private rotl(uint32_t rotateAmt) const { - if (rotateAmt == 0) return ap_private(*this); - // Don't get too fancy, just use existing shift/or facilities - ap_private hi(*this); - ap_private lo(*this); - hi.shl(rotateAmt); - lo.lshr(BitWidth - rotateAmt); - return hi | lo; - } - - INLINE ap_private rotr(uint32_t rotateAmt) const { - if (rotateAmt == 0) return ap_private(*this); - // Don't get too fancy, just use existing shift/or facilities - ap_private hi(*this); - ap_private lo(*this); - lo.lshr(rotateAmt); - hi.shl(BitWidth - rotateAmt); - return hi | lo; - } - - /// Perform an unsigned divide operation on this ap_private by RHS. Both this - /// and - /// RHS are treated as unsigned quantities for purposes of this division. - /// @returns a new ap_private value containing the division result - /// @brief Unsigned division operation. - INLINE ap_private udiv(const ap_private& RHS) const { - // Get some facts about the LHS and RHS number of bits and words - uint32_t rhsBits = RHS.getActiveBits(); - uint32_t rhsWords = !rhsBits ? 0 : (whichWord(rhsBits - 1) + 1); - assert(rhsWords && "Divided by zero???"); - uint32_t lhsBits = this->getActiveBits(); - uint32_t lhsWords = !lhsBits ? 0 : (whichWord(lhsBits - 1) + 1); - - // Deal with some degenerate cases - if (!lhsWords) - // 0 / X ===> 0 - return ap_private(0); - else if (lhsWords < rhsWords || this->ult(RHS)) { - // X / Y ===> 0, iff X < Y - return ap_private(0); - } else if (*this == RHS) { - // X / X ===> 1 - return ap_private(1); - } else if (lhsWords == 1 && rhsWords == 1) { - // All high words are zero, just use native divide - return ap_private(this->pVal[0] / RHS.get_pVal(0)); - } - - // We have to compute it the hard way. Invoke the Knuth divide algorithm. - ap_private Quotient(0); // to hold result. - ap_private_ops::divide(*this, lhsWords, RHS, rhsWords, &Quotient, - (ap_private*)0); - return Quotient; - } - - /// Signed divide this ap_private by ap_private RHS. - /// @brief Signed division function for ap_private. - INLINE ap_private sdiv(const ap_private& RHS) const { - if (isNegative()) - if (RHS.isNegative()) - return (-(*this)).udiv(-RHS); - else - return -((-(*this)).udiv(RHS)); - else if (RHS.isNegative()) - return -(this->udiv((ap_private)(-RHS))); - return this->udiv(RHS); - } - - /// Perform an unsigned remainder operation on this ap_private with RHS being - /// the - /// divisor. Both this and RHS are treated as unsigned quantities for purposes - /// of this operation. Note that this is a true remainder operation and not - /// a modulo operation because the sign follows the sign of the dividend - /// which is *this. - /// @returns a new ap_private value containing the remainder result - /// @brief Unsigned remainder operation. - INLINE ap_private urem(const ap_private& RHS) const { - // Get some facts about the LHS - uint32_t lhsBits = getActiveBits(); - uint32_t lhsWords = !lhsBits ? 0 : (whichWord(lhsBits - 1) + 1); - - // Get some facts about the RHS - uint32_t rhsBits = RHS.getActiveBits(); - uint32_t rhsWords = !rhsBits ? 0 : (whichWord(rhsBits - 1) + 1); - assert(rhsWords && "Performing remainder operation by zero ???"); - - // Check the degenerate cases - if (lhsWords == 0) { - // 0 % Y ===> 0 - return ap_private(0); - } else if (lhsWords < rhsWords || this->ult(RHS)) { - // X % Y ===> X, iff X < Y - return *this; - } else if (*this == RHS) { - // X % X == 0; - return ap_private(0); - } else if (lhsWords == 1) { - // All high words are zero, just use native remainder - return ap_private(pVal[0] % RHS.get_pVal(0)); - } - - // We have to compute it the hard way. Invoke the Knuth divide algorithm. - ap_private Remainder(0); - ap_private_ops::divide(*this, lhsWords, RHS, rhsWords, (ap_private*)(0), - &Remainder); - return Remainder; - } - - INLINE ap_private urem(uint64_t RHS) const { - // Get some facts about the LHS - uint32_t lhsBits = getActiveBits(); - uint32_t lhsWords = !lhsBits ? 0 : (whichWord(lhsBits - 1) + 1); - // Get some facts about the RHS - uint32_t rhsWords = 1; //! rhsBits ? 0 : (ap_private<_AP_W, - //! _AP_S>::whichWord(rhsBits - 1) + 1); - assert(rhsWords && "Performing remainder operation by zero ???"); - // Check the degenerate cases - if (lhsWords == 0) { - // 0 % Y ===> 0 - return ap_private(0); - } else if (lhsWords < rhsWords || this->ult(RHS)) { - // X % Y ===> X, iff X < Y - return *this; - } else if (*this == RHS) { - // X % X == 0; - return ap_private(0); - } else if (lhsWords == 1) { - // All high words are zero, just use native remainder - return ap_private(pVal[0] % RHS); - } - - // We have to compute it the hard way. Invoke the Knuth divide algorithm. - ap_private Remainder(0); - divide(*this, lhsWords, RHS, (ap_private*)(0), &Remainder); - return Remainder; - } - - /// Signed remainder operation on ap_private. - /// @brief Function for signed remainder operation. - INLINE ap_private srem(const ap_private& RHS) const { - if (isNegative()) { - ap_private lhs = -(*this); - if (RHS.isNegative()) { - ap_private rhs = -RHS; - return -(lhs.urem(rhs)); - } else - return -(lhs.urem(RHS)); - } else if (RHS.isNegative()) { - ap_private rhs = -RHS; - return this->urem(rhs); - } - return this->urem(RHS); - } - - /// Signed remainder operation on ap_private. - /// @brief Function for signed remainder operation. - INLINE ap_private srem(int64_t RHS) const { - if (isNegative()) - if (RHS < 0) - return -((-(*this)).urem(-RHS)); - else - return -((-(*this)).urem(RHS)); - else if (RHS < 0) - return this->urem(-RHS); - return this->urem(RHS); - } - - /// Compares this ap_private with RHS for the validity of the equality - /// relationship. - /// @returns true if *this == Val - /// @brief Equality comparison. - template - INLINE bool eq(const ap_private<_AP_W, _AP_S1>& RHS) const { - return (*this) == RHS; - } - - /// Compares this ap_private with RHS for the validity of the inequality - /// relationship. - /// @returns true if *this != Val - /// @brief Inequality comparison - template - INLINE bool ne(const ap_private<_AP_W, _AP_S1>& RHS) const { - return !((*this) == RHS); - } - - /// Regards both *this and RHS as unsigned quantities and compares them for - /// the validity of the less-than relationship. - /// @returns true if *this < RHS when both are considered unsigned. - /// @brief Unsigned less than comparison - template - INLINE bool ult(const ap_private<_AP_W, _AP_S1>& RHS) const { - // Get active bit length of both operands - uint32_t n1 = getActiveBits(); - uint32_t n2 = RHS.getActiveBits(); - - // If magnitude of LHS is less than RHS, return true. - if (n1 < n2) return true; - - // If magnitude of RHS is greather than LHS, return false. - if (n2 < n1) return false; - - // If they bot fit in a word, just compare the low order word - if (n1 <= APINT_BITS_PER_WORD && n2 <= APINT_BITS_PER_WORD) - return pVal[0] < RHS.get_pVal(0); - - // Otherwise, compare all words - uint32_t topWord = whichWord(AESL_std::max(n1, n2) - 1); - for (int i = topWord; i >= 0; --i) { - if (pVal[i] > RHS.get_pVal(i)) return false; - if (pVal[i] < RHS.get_pVal(i)) return true; - } - return false; - } - - INLINE bool ult(uint64_t RHS) const { - // Get active bit length of both operands - uint32_t n1 = getActiveBits(); - uint32_t n2 = - 64 - ap_private_ops::CountLeadingZeros_64(RHS); // RHS.getActiveBits(); - - // If magnitude of LHS is less than RHS, return true. - if (n1 < n2) return true; - - // If magnitude of RHS is greather than LHS, return false. - if (n2 < n1) return false; - - // If they bot fit in a word, just compare the low order word - if (n1 <= APINT_BITS_PER_WORD && n2 <= APINT_BITS_PER_WORD) - return pVal[0] < RHS; - assert(0); - } - - template - INLINE bool slt(const ap_private<_AP_W, _AP_S1>& RHS) const { - ap_private lhs(*this); - ap_private<_AP_W, _AP_S1> rhs(RHS); - bool lhsNeg = isNegative(); - bool rhsNeg = rhs.isNegative(); - if (lhsNeg) { - // Sign bit is set so perform two's complement to make it positive - lhs.flip(); - lhs++; - } - if (rhsNeg) { - // Sign bit is set so perform two's complement to make it positive - rhs.flip(); - rhs++; - } - - // Now we have unsigned values to compare so do the comparison if necessary - // based on the negativeness of the values. - if (lhsNeg) - if (rhsNeg) - return lhs.ugt(rhs); - else - return true; - else if (rhsNeg) - return false; - else - return lhs.ult(rhs); - } - - /// Regards both *this and RHS as unsigned quantities and compares them for - /// validity of the less-or-equal relationship. - /// @returns true if *this <= RHS when both are considered unsigned. - /// @brief Unsigned less or equal comparison - template - INLINE bool ule(const ap_private<_AP_W, _AP_S1>& RHS) const { - return ult(RHS) || eq(RHS); - } - - /// Regards both *this and RHS as signed quantities and compares them for - /// validity of the less-or-equal relationship. - /// @returns true if *this <= RHS when both are considered signed. - /// @brief Signed less or equal comparison - template - INLINE bool sle(const ap_private<_AP_W, _AP_S1>& RHS) const { - return slt(RHS) || eq(RHS); - } - - /// Regards both *this and RHS as unsigned quantities and compares them for - /// the validity of the greater-than relationship. - /// @returns true if *this > RHS when both are considered unsigned. - /// @brief Unsigned greather than comparison - template - INLINE bool ugt(const ap_private<_AP_W, _AP_S1>& RHS) const { - return !ult(RHS) && !eq(RHS); - } - - /// Regards both *this and RHS as signed quantities and compares them for - /// the validity of the greater-than relationship. - /// @returns true if *this > RHS when both are considered signed. - /// @brief Signed greather than comparison - template - INLINE bool sgt(const ap_private<_AP_W, _AP_S1>& RHS) const { - return !slt(RHS) && !eq(RHS); - } - - /// Regards both *this and RHS as unsigned quantities and compares them for - /// validity of the greater-or-equal relationship. - /// @returns true if *this >= RHS when both are considered unsigned. - /// @brief Unsigned greater or equal comparison - template - INLINE bool uge(const ap_private<_AP_W, _AP_S>& RHS) const { - return !ult(RHS); - } - - /// Regards both *this and RHS as signed quantities and compares them for - /// validity of the greater-or-equal relationship. - /// @returns true if *this >= RHS when both are considered signed. - /// @brief Signed greather or equal comparison - template - INLINE bool sge(const ap_private<_AP_W, _AP_S1>& RHS) const { - return !slt(RHS); - } - - // Sign extend to a new width. - template - INLINE void cpSext(const ap_private<_AP_W1, _AP_S1>& that) { - assert(_AP_W1 < BitWidth && "Invalid ap_private SignExtend request"); - assert(_AP_W1 <= MAX_INT_BITS && "Too many bits"); - // If the sign bit isn't set, this is the same as zext. - if (!that.isNegative()) { - cpZext(that); - return; - } - - // The sign bit is set. First, get some facts - enum { wordBits = _AP_W1 % APINT_BITS_PER_WORD }; - const int _AP_N1 = ap_private<_AP_W1, _AP_S1>::_AP_N; - // Mask the high order word appropriately - if (_AP_N1 == _AP_N) { - enum { newWordBits = _AP_W % APINT_BITS_PER_WORD }; - // The extension is contained to the wordsBefore-1th word. - static const uint64_t mask = wordBits ? (~0ULL << (wordBits)) : 0ULL; - for (int i = 0; i < _AP_N; ++i) pVal[i] = that.get_pVal(i); - pVal[_AP_N - 1] |= mask; - return; - } - - enum { newWordBits = _AP_W % APINT_BITS_PER_WORD }; - // The extension is contained to the wordsBefore-1th word. - static const uint64_t mask = wordBits ? (~0ULL << (wordBits)) : 0ULL; - int i; - for (i = 0; i < _AP_N1; ++i) pVal[i] = that.get_pVal(i); - pVal[i - 1] |= mask; - for (; i < _AP_N - 1; i++) pVal[i] = ~0ULL; - pVal[i] = ~0ULL; - clearUnusedBits(); - return; - } - - // Zero extend to a new width. - template - INLINE void cpZext(const ap_private<_AP_W1, _AP_S1>& that) { - assert(_AP_W1 < BitWidth && "Invalid ap_private ZeroExtend request"); - assert(_AP_W1 <= MAX_INT_BITS && "Too many bits"); - const int _AP_N1 = ap_private<_AP_W1, _AP_S1>::_AP_N; - int i = 0; - for (; i < _AP_N1; ++i) pVal[i] = that.get_pVal(i); - for (; i < _AP_N; ++i) pVal[i] = 0; - clearUnusedBits(); - } - - template - INLINE void cpZextOrTrunc(const ap_private<_AP_W1, _AP_S1>& that) { - if (BitWidth > _AP_W1) - cpZext(that); - else { - for (int i = 0; i < _AP_N; ++i) pVal[i] = that.get_pVal(i); - clearUnusedBits(); - } - } - - template - INLINE void cpSextOrTrunc(const ap_private<_AP_W1, _AP_S1>& that) { - if (BitWidth > _AP_W1) - cpSext(that); - else { - for (int i = 0; i < _AP_N; ++i) pVal[i] = that.get_pVal(i); - clearUnusedBits(); - } - } - - /// @} - /// @name Value Characterization Functions - /// @{ - - /// @returns the total number of bits. - INLINE uint32_t getBitWidth() const { return BitWidth; } - - /// Here one word's bitwidth equals to that of uint64_t. - /// @returns the number of words to hold the integer value of this ap_private. - /// @brief Get the number of words. - INLINE uint32_t getNumWords() const { - return (BitWidth + APINT_BITS_PER_WORD - 1) / APINT_BITS_PER_WORD; - } - - /// This function returns the number of active bits which is defined as the - /// bit width minus the number of leading zeros. This is used in several - /// computations to see how "wide" the value is. - /// @brief Compute the number of active bits in the value - INLINE uint32_t getActiveBits() const { - uint32_t bits = BitWidth - countLeadingZeros(); - return bits ? bits : 1; - } - - /// This method attempts to return the value of this ap_private as a zero - /// extended - /// uint64_t. The bitwidth must be <= 64 or the value must fit within a - /// uint64_t. Otherwise an assertion will result. - /// @brief Get zero extended value - INLINE uint64_t getZExtValue() const { - assert(getActiveBits() <= 64 && "Too many bits for uint64_t"); - return *pVal; - } - - /// This method attempts to return the value of this ap_private as a sign - /// extended - /// int64_t. The bit width must be <= 64 or the value must fit within an - /// int64_t. Otherwise an assertion will result. - /// @brief Get sign extended value - INLINE int64_t getSExtValue() const { - assert(getActiveBits() <= 64 && "Too many bits for int64_t"); - return int64_t(pVal[0]); - } - - /// This method determines how many bits are required to hold the ap_private - /// equivalent of the string given by \p str of length \p slen. - /// @brief Get bits required for string value. - INLINE static uint32_t getBitsNeeded(const char* str, uint32_t slen, - uint8_t radix) { - assert(str != 0 && "Invalid value string"); - assert(slen > 0 && "Invalid string length"); - - // Each computation below needs to know if its negative - uint32_t isNegative = str[0] == '-'; - if (isNegative) { - slen--; - str++; - } - // For radixes of power-of-two values, the bits required is accurately and - // easily computed - if (radix == 2) return slen + isNegative; - if (radix == 8) return slen * 3 + isNegative; - if (radix == 16) return slen * 4 + isNegative; - - // Otherwise it must be radix == 10, the hard case - assert(radix == 10 && "Invalid radix"); - - // Convert to the actual binary value. - // ap_private<_AP_W, _AP_S> tmp(sufficient, str, slen, radix); - - // Compute how many bits are required. - // return isNegative + tmp.logBase2() + 1; - return isNegative + slen * 4; - } - - /// countLeadingZeros - This function is an ap_private version of the - /// countLeadingZeros_{32,64} functions in MathExtras.h. It counts the number - /// of zeros from the most significant bit to the first one bit. - /// @returns BitWidth if the value is zero. - /// @returns the number of zeros from the most significant bit to the first - /// one bits. - INLINE uint32_t countLeadingZeros() const { - enum { - msw_bits = (BitWidth % APINT_BITS_PER_WORD) - ? (BitWidth % APINT_BITS_PER_WORD) - : APINT_BITS_PER_WORD, - excessBits = APINT_BITS_PER_WORD - msw_bits - }; - uint32_t Count = ap_private_ops::CountLeadingZeros_64(pVal[_AP_N - 1]); - if (Count >= excessBits) Count -= excessBits; - if (!pVal[_AP_N - 1]) { - for (int i = _AP_N - 1; i; --i) { - if (!pVal[i - 1]) - Count += APINT_BITS_PER_WORD; - else { - Count += ap_private_ops::CountLeadingZeros_64(pVal[i - 1]); - break; - } - } - } - return Count; - } - - /// countLeadingOnes - This function counts the number of contiguous 1 bits - /// in the high order bits. The count stops when the first 0 bit is reached. - /// @returns 0 if the high order bit is not set - /// @returns the number of 1 bits from the most significant to the least - /// @brief Count the number of leading one bits. - INLINE uint32_t countLeadingOnes() const { - if (isSingleWord()) - return countLeadingOnes_64(get_VAL(), APINT_BITS_PER_WORD - BitWidth); - - uint32_t highWordBits = BitWidth % APINT_BITS_PER_WORD; - uint32_t shift = - (highWordBits == 0 ? 0 : APINT_BITS_PER_WORD - highWordBits); - int i = _AP_N - 1; - uint32_t Count = countLeadingOnes_64(get_pVal(i), shift); - if (Count == highWordBits) { - for (i--; i >= 0; --i) { - if (get_pVal(i) == ~0ULL) - Count += APINT_BITS_PER_WORD; - else { - Count += countLeadingOnes_64(get_pVal(i), 0); - break; - } - } - } - return Count; - } - - /// countTrailingZeros - This function is an ap_private version of the - /// countTrailingZoers_{32,64} functions in MathExtras.h. It counts - /// the number of zeros from the least significant bit to the first set bit. - /// @returns BitWidth if the value is zero. - /// @returns the number of zeros from the least significant bit to the first - /// one bit. - /// @brief Count the number of trailing zero bits. - INLINE uint32_t countTrailingZeros() const { - uint32_t Count = 0; - uint32_t i = 0; - for (; i < _AP_N && get_pVal(i) == 0; ++i) Count += APINT_BITS_PER_WORD; - if (i < _AP_N) Count += ap_private_ops::CountTrailingZeros_64(get_pVal(i)); - return AESL_std::min(Count, BitWidth); - } - /// countPopulation - This function is an ap_private version of the - /// countPopulation_{32,64} functions in MathExtras.h. It counts the number - /// of 1 bits in the ap_private value. - /// @returns 0 if the value is zero. - /// @returns the number of set bits. - /// @brief Count the number of bits set. - INLINE uint32_t countPopulation() const { - uint32_t Count = 0; - for (int i = 0; i < _AP_N - 1; ++i) - Count += ap_private_ops::CountPopulation_64(pVal[i]); - Count += ap_private_ops::CountPopulation_64(pVal[_AP_N - 1] & mask); - return Count; - } - - /// @} - /// @name Conversion Functions - /// @ - - /// This is used internally to convert an ap_private to a string. - /// @brief Converts an ap_private to a std::string - INLINE std::string toString(uint8_t radix, bool wantSigned) const; - - /// Considers the ap_private to be unsigned and converts it into a string in - /// the - /// radix given. The radix can be 2, 8, 10 or 16. - /// @returns a character interpretation of the ap_private - /// @brief Convert unsigned ap_private to string representation. - INLINE std::string toStringUnsigned(uint8_t radix = 10) const { - return toString(radix, false); - } - - /// Considers the ap_private to be unsigned and converts it into a string in - /// the - /// radix given. The radix can be 2, 8, 10 or 16. - /// @returns a character interpretation of the ap_private - /// @brief Convert unsigned ap_private to string representation. - INLINE std::string toStringSigned(uint8_t radix = 10) const { - return toString(radix, true); - } - - /// @brief Converts this ap_private to a double value. - INLINE double roundToDouble(bool isSigned) const { - // Handle the simple case where the value is contained in one uint64_t. - if (isSingleWord() || getActiveBits() <= APINT_BITS_PER_WORD) { - uint64_t val = pVal[0]; - if (isSigned) { - int64_t sext = ((int64_t(val)) << (64 - BitWidth)) >> (64 - BitWidth); - return double(sext); - } else - return double(val); - } - - // Determine if the value is negative. - bool isNeg = isSigned ? (*this)[BitWidth - 1] : false; - - // Construct the absolute value if we're negative. - ap_private<_AP_W, _AP_S> Tmp(isNeg ? -(*this) : (*this)); - - // Figure out how many bits we're using. - uint32_t n = Tmp.getActiveBits(); - - // The exponent (without bias normalization) is just the number of bits - // we are using. Note that the sign bit is gone since we constructed the - // absolute value. - uint64_t exp = n; - - // Return infinity for exponent overflow - if (exp > 1023) { - if (!isSigned || !isNeg) - return std::numeric_limits::infinity(); - else - return -std::numeric_limits::infinity(); - } - exp += 1023; // Increment for 1023 bias - - // Number of bits in mantissa is 52. To obtain the mantissa value, we must - // extract the high 52 bits from the correct words in pVal. - uint64_t mantissa; - unsigned hiWord = whichWord(n - 1); - if (hiWord == 0) { - mantissa = Tmp.get_pVal(0); - if (n > 52) - (mantissa) >>= (n - 52); // shift down, we want the top 52 bits. - } else { - assert(hiWord > 0 && "High word is negative?"); - uint64_t hibits = (Tmp.get_pVal(hiWord)) - << (52 - n % APINT_BITS_PER_WORD); - uint64_t lobits = - (Tmp.get_pVal(hiWord - 1)) >> (11 + n % APINT_BITS_PER_WORD); - mantissa = hibits | lobits; - } - - // The leading bit of mantissa is implicit, so get rid of it. - uint64_t sign = isNeg ? (1ULL << (APINT_BITS_PER_WORD - 1)) : 0; - union { - double __D; - uint64_t __I; - } __T; - __T.__I = sign | ((exp) << 52) | mantissa; - return __T.__D; - } - - /// @brief Converts this unsigned ap_private to a double value. - INLINE double roundToDouble() const { return roundToDouble(false); } - - /// @brief Converts this signed ap_private to a double value. - INLINE double signedRoundToDouble() const { return roundToDouble(true); } - - /// The conversion does not do a translation from integer to double, it just - /// re-interprets the bits as a double. Note that it is valid to do this on - /// any bit width. Exactly 64 bits will be translated. - /// @brief Converts ap_private bits to a double - INLINE double bitsToDouble() const { - union { - uint64_t __I; - double __D; - } __T; - __T.__I = pVal[0]; - return __T.__D; - } - - /// The conversion does not do a translation from integer to float, it just - /// re-interprets the bits as a float. Note that it is valid to do this on - /// any bit width. Exactly 32 bits will be translated. - /// @brief Converts ap_private bits to a double - INLINE float bitsToFloat() const { - union { - uint32_t __I; - float __F; - } __T; - __T.__I = uint32_t(pVal[0]); - return __T.__F; - } - - /// The conversion does not do a translation from double to integer, it just - /// re-interprets the bits of the double. Note that it is valid to do this on - /// any bit width but bits from V may get truncated. - /// @brief Converts a double to ap_private bits. - INLINE ap_private& doubleToBits(double __V) { - union { - uint64_t __I; - double __D; - } __T; - __T.__D = __V; - pVal[0] = __T.__I; - return *this; - } - - /// The conversion does not do a translation from float to integer, it just - /// re-interprets the bits of the float. Note that it is valid to do this on - /// any bit width but bits from V may get truncated. - /// @brief Converts a float to ap_private bits. - INLINE ap_private& floatToBits(float __V) { - union { - uint32_t __I; - float __F; - } __T; - __T.__F = __V; - pVal[0] = __T.__I; - } - - // Reduce operation - //----------------------------------------------------------- - INLINE bool and_reduce() const { return isMaxValue(); } - - INLINE bool nand_reduce() const { return isMinValue(); } - - INLINE bool or_reduce() const { return (bool)countPopulation(); } - - INLINE bool nor_reduce() const { return countPopulation() == 0; } - - INLINE bool xor_reduce() const { - unsigned int i = countPopulation(); - return (i % 2) ? true : false; - } - - INLINE bool xnor_reduce() const { - unsigned int i = countPopulation(); - return (i % 2) ? false : true; - } - INLINE std::string to_string(uint8_t radix = 16, bool sign = false) const { - return toString(radix, radix == 10 ? _AP_S : sign); - } -}; // End of class ap_private <_AP_W, _AP_S, false> - -namespace ap_private_ops { - -enum { APINT_BITS_PER_WORD = 64 }; -template -INLINE bool operator==(uint64_t V1, const ap_private<_AP_W, _AP_S>& V2) { - return V2 == V1; -} - -template -INLINE bool operator!=(uint64_t V1, const ap_private<_AP_W, _AP_S>& V2) { - return V2 != V1; -} - -template -INLINE bool get(const ap_private<_AP_W, _AP_S>& a) { - static const uint64_t mask = 1ULL << (index & 0x3f); - return ((mask & a.get_pVal((index) >> 6)) != 0); -} - -template -INLINE void set(ap_private<_AP_W, _AP_S>& a, - const ap_private& mark1 = 0, - const ap_private& mark2 = 0) { - enum { - APINT_BITS_PER_WORD = 64, - lsb_word = lsb_index / APINT_BITS_PER_WORD, - msb_word = msb_index / APINT_BITS_PER_WORD, - msb = msb_index % APINT_BITS_PER_WORD, - lsb = lsb_index % APINT_BITS_PER_WORD - }; - if (msb_word == lsb_word) { - const uint64_t mask = ~0ULL >> - (lsb) << (APINT_BITS_PER_WORD - msb + lsb - 1) >> - (APINT_BITS_PER_WORD - msb - 1); - // a.set_pVal(msb_word, a.get_pVal(msb_word) | mask); - a.get_pVal(msb_word) |= mask; - } else { - const uint64_t lsb_mask = ~0ULL >> (lsb) << (lsb); - const uint64_t msb_mask = ~0ULL << (APINT_BITS_PER_WORD - msb - 1) >> - (APINT_BITS_PER_WORD - msb - 1); - // a.set_pVal(lsb_word, a.get_pVal(lsb_word) | lsb_mask); - a.get_pVal(lsb_word) |= lsb_mask; - for (int i = lsb_word + 1; i < msb_word; i++) { - a.set_pVal(i, ~0ULL); - // a.get_pVal(i)=0; - } - // a.set_pVal(msb_word, a.get_pVal(msb_word) | msb_mask); - - a.get_pVal(msb_word) |= msb_mask; - } - a.clearUnusedBits(); -} - -template -INLINE void clear(ap_private<_AP_W, _AP_S>& a, - const ap_private& mark1 = 0, - const ap_private& mark2 = 0) { - enum { - APINT_BITS_PER_WORD = 64, - lsb_word = lsb_index / APINT_BITS_PER_WORD, - msb_word = msb_index / APINT_BITS_PER_WORD, - msb = msb_index % APINT_BITS_PER_WORD, - lsb = lsb_index % APINT_BITS_PER_WORD - }; - if (msb_word == lsb_word) { - const uint64_t mask = - ~(~0ULL >> (lsb) << (APINT_BITS_PER_WORD - msb + lsb - 1) >> - (APINT_BITS_PER_WORD - msb - 1)); - // a.set_pVal(msb_word, a.get_pVal(msb_word) & mask); - a.get_pVal(msb_word) &= mask; - } else { - const uint64_t lsb_mask = ~(~0ULL >> (lsb) << (lsb)); - const uint64_t msb_mask = ~(~0ULL << (APINT_BITS_PER_WORD - msb - 1) >> - (APINT_BITS_PER_WORD - msb - 1)); - // a.set_pVal(lsb_word, a.get_pVal(lsb_word) & lsb_mask); - a.get_pVal(lsb_word) &= lsb_mask; - for (int i = lsb_word + 1; i < msb_word; i++) { - // a.set_pVal(i, 0); - a.get_pVal(i) = 0; - } - // a.set_pVal(msb_word, a.get_pVal(msb_word) & msb_mask); - a.get_pVal(msb_word) &= msb_mask; - } - a.clearUnusedBits(); -} - -template -INLINE void set(ap_private<_AP_W, _AP_S>& a, - const ap_private& mark = 0) { - enum { APINT_BITS_PER_WORD = 64, word = index / APINT_BITS_PER_WORD }; - static const uint64_t mask = 1ULL << (index % APINT_BITS_PER_WORD); - // a.set_pVal(word, a.get_pVal(word) | mask); - a.get_pVal(word) |= mask; - a.clearUnusedBits(); -} - -template -INLINE void clear(ap_private<_AP_W, _AP_S>& a, - const ap_private& mark = 0) { - enum { APINT_BITS_PER_WORD = 64, word = index / APINT_BITS_PER_WORD }; - static const uint64_t mask = ~(1ULL << (index % APINT_BITS_PER_WORD)); - // a.set_pVal(word, a.get_pVal(word) & mask); - a.get_pVal(word) &= mask; - a.clearUnusedBits(); -} - -} // End of ap_private_ops namespace - -template -INLINE std::string ap_private<_AP_W, _AP_S, false>::toString( - uint8_t radix, bool wantSigned) const { - assert((radix == 10 || radix == 8 || radix == 16 || radix == 2) && - "Radix should be 2, 8, 10, or 16!"); - static const char* digits[] = {"0", "1", "2", "3", "4", "5", "6", "7", - "8", "9", "A", "B", "C", "D", "E", "F"}; - std::string result; - - if (radix != 10) { - // For the 2, 8 and 16 bit cases, we can just shift instead of divide - // because the number of bits per digit (1,3 and 4 respectively) divides - // equaly. We just shift until there value is zero. - - // First, check for a zero value and just short circuit the logic below. - if (*this == (uint64_t)(0)) - result = "0"; - else { - ap_private<_AP_W, false> tmp(*this); - size_t insert_at = 0; - bool leading_zero = true; - if (wantSigned && isNegative()) { - // They want to print the signed version and it is a negative value - // Flip the bits and add one to turn it into the equivalent positive - // value and put a '-' in the result. - tmp.flip(); - tmp++; - tmp.clearUnusedBitsToZero(); - result = "-"; - insert_at = 1; - leading_zero = false; - } - switch (radix) { - case 2: - result += "0b"; - break; - case 8: - result += "0o"; - break; - case 16: - result += "0x"; - break; - default: - assert("invalid radix" && 0); - } - insert_at += 2; - // Just shift tmp right for each digit width until it becomes zero - uint32_t shift = (radix == 16 ? 4 : (radix == 8 ? 3 : 1)); - uint64_t mask = radix - 1; - ap_private<_AP_W, false> zero(0); - unsigned bits = 0; - while (tmp.ne(zero)) { - uint64_t digit = tmp.get_VAL() & mask; - result.insert(insert_at, digits[digit]); - tmp = tmp.lshr(shift); - ++bits; - } - bits *= shift; - if (bits < _AP_W && leading_zero) result.insert(insert_at, digits[0]); - } - return result; - } - - ap_private<_AP_W, false> tmp(*this); - ap_private<_AP_W, false> divisor(radix); - ap_private<_AP_W, false> zero(0); - size_t insert_at = 0; - if (wantSigned && isNegative()) { - // They want to print the signed version and it is a negative value - // Flip the bits and add one to turn it into the equivalent positive - // value and put a '-' in the result. - tmp.flip(); - tmp++; - tmp.clearUnusedBitsToZero(); - result = "-"; - insert_at = 1; - } - if (tmp == ap_private<_AP_W, false>(0)) - result = "0"; - else - while (tmp.ne(zero)) { - ap_private<_AP_W, false> APdigit(0); - ap_private<_AP_W, false> tmp2(0); - ap_private_ops::divide(tmp, tmp.getNumWords(), divisor, - divisor.getNumWords(), &tmp2, &APdigit); - uint64_t digit = APdigit.getZExtValue(); - assert(digit < radix && "divide failed"); - result.insert(insert_at, digits[digit]); - tmp = tmp2; - } - - return result; -} // End of ap_private<_AP_W, _AP_S, false>::toString() - -template -std::ostream &operator<<(std::ostream &os, const ap_private<_AP_W, _AP_S> &x) { - std::ios_base::fmtflags ff = std::cout.flags(); - if (ff & std::cout.hex) { - os << x.toString(16, false); // don't print sign - } else if (ff & std::cout.oct) { - os << x.toString(8, false); // don't print sign - } else { - os << x.toString(10, _AP_S); - } - return os; -} - -// ------------------------------------------------------------ // -// XXX moved here from ap_int_sim.h XXX // -// ------------------------------------------------------------ // - -/// Concatination reference. -/// Proxy class which allows concatination to be used as rvalue(for reading) and -/// lvalue(for writing) -// ---------------------------------------------------------------- -// template -// struct ap_concat_ref { -//#ifdef _MSC_VER -//#pragma warning(disable : 4521 4522) -//#endif -// enum { -// _AP_WR = _AP_W1 + _AP_W2, -// }; -// _AP_T1& mbv1; -// _AP_T2& mbv2; -// -// INLINE ap_concat_ref(const ap_concat_ref<_AP_W1, _AP_T1, _AP_W2, _AP_T2>& -// ref) -// : mbv1(ref.mbv1), mbv2(ref.mbv2) {} -// -// INLINE ap_concat_ref(_AP_T1& bv1, _AP_T2& bv2) : mbv1(bv1), mbv2(bv2) {} -// -// template -// INLINE ap_concat_ref& operator=(const ap_private<_AP_W3, _AP_S3>& val) { -// ap_private<_AP_W1 + _AP_W2, false> vval(val); -// int W_ref1 = mbv1.length(); -// int W_ref2 = mbv2.length(); -// ap_private<_AP_W1, false> mask1(-1); -// mask1 >>= _AP_W1 - W_ref1; -// ap_private<_AP_W2, false> mask2(-1); -// mask2 >>= _AP_W2 - W_ref2; -// mbv1.set(ap_private<_AP_W1, false>((vval >> W_ref2) & mask1)); -// mbv2.set(ap_private<_AP_W2, false>(vval & mask2)); -// return *this; -// } -// -// INLINE ap_concat_ref& operator=(unsigned long long val) { -// ap_private<_AP_W1 + _AP_W2, false> tmpVal(val); -// return operator=(tmpVal); -// } -// -// template -// INLINE ap_concat_ref& operator=( -// const ap_concat_ref<_AP_W3, _AP_T3, _AP_W4, _AP_T4>& val) { -// ap_private<_AP_W1 + _AP_W2, false> tmpVal(val); -// return operator=(tmpVal); -// } -// -// INLINE ap_concat_ref& operator=( -// const ap_concat_ref<_AP_W1, _AP_T1, _AP_W2, _AP_T2>& val) { -// ap_private<_AP_W1 + _AP_W2, false> tmpVal(val); -// return operator=(tmpVal); -// } -// -// template -// INLINE ap_concat_ref& operator=(const _private_bit_ref<_AP_W3, _AP_S3>& -// val) { -// ap_private<_AP_W1 + _AP_W2, false> tmpVal(val); -// return operator=(tmpVal); -// } -// -// template -// INLINE ap_concat_ref& operator=(const _private_range_ref<_AP_W3, _AP_S3>& -// val) { -// ap_private<_AP_W1 + _AP_W2, false> tmpVal(val); -// return operator=(tmpVal); -// } -// -// template -// INLINE ap_concat_ref& operator=( -// const af_range_ref<_AP_W3, _AP_I3, _AP_S3, _AP_Q3, _AP_O3, _AP_N3>& val) -// { -// return operator=((const ap_private<_AP_W3, false>)(val)); -// } -// -// template -// INLINE ap_concat_ref& operator=( -// const ap_fixed_base<_AP_W3, _AP_I3, _AP_S3, _AP_Q3, _AP_O3, _AP_N3>& -// val) { -// return operator=(val.to_ap_private()); -// } -// -// template -// INLINE ap_concat_ref& operator=( -// const af_bit_ref<_AP_W3, _AP_I3, _AP_S3, _AP_Q3, _AP_O3, _AP_N3>& val) { -// return operator=((unsigned long long)(bool)(val)); -// } -// -// INLINE operator ap_private<_AP_WR, false>() const { return get(); } -// -// INLINE operator unsigned long long() const { return get().to_uint64(); } -// -// template -// INLINE ap_concat_ref<_AP_WR, ap_concat_ref, _AP_W3, -// _private_range_ref<_AP_W3, _AP_S3> > -// operator,(const _private_range_ref<_AP_W3, _AP_S3> &a2) { -// return ap_concat_ref<_AP_WR, ap_concat_ref, _AP_W3, -// _private_range_ref<_AP_W3, _AP_S3> >( -// *this, const_cast<_private_range_ref<_AP_W3, _AP_S3>&>(a2)); -// } -// -// template -// INLINE -// ap_concat_ref<_AP_WR, ap_concat_ref, _AP_W3, ap_private<_AP_W3, _AP_S3> -// > -// operator,(ap_private<_AP_W3, _AP_S3> &a2) { -// return ap_concat_ref<_AP_WR, ap_concat_ref, _AP_W3, -// ap_private<_AP_W3, _AP_S3> >(*this, a2); -// } -// -// template -// INLINE -// ap_concat_ref<_AP_WR, ap_concat_ref, _AP_W3, ap_private<_AP_W3, _AP_S3> -// > -// operator,(const ap_private<_AP_W3, _AP_S3> &a2) { -// return ap_concat_ref<_AP_WR, ap_concat_ref, _AP_W3, -// ap_private<_AP_W3, _AP_S3> >( -// *this, const_cast&>(a2)); -// } -// -// template -// INLINE ap_concat_ref<_AP_WR, ap_concat_ref, 1, _private_bit_ref<_AP_W3, -// _AP_S3> > -// operator,(const _private_bit_ref<_AP_W3, _AP_S3> &a2) { -// return ap_concat_ref<_AP_WR, ap_concat_ref, 1, _private_bit_ref<_AP_W3, -// _AP_S3> >( -// *this, const_cast<_private_bit_ref<_AP_W3, _AP_S3>&>(a2)); -// } -// -// template -// INLINE ap_concat_ref<_AP_WR, ap_concat_ref, _AP_W3 + _AP_W4, -// ap_concat_ref<_AP_W3, _AP_T3, _AP_W4, _AP_T4> > -// operator,(const ap_concat_ref<_AP_W3, _AP_T3, _AP_W4, _AP_T4> &a2) { -// return ap_concat_ref<_AP_WR, ap_concat_ref, _AP_W3 + _AP_W4, -// ap_concat_ref<_AP_W3, _AP_T3, _AP_W4, _AP_T4> >( -// *this, const_cast&>(a2)); -// } -// -// template -// INLINE ap_concat_ref< -// _AP_WR, ap_concat_ref, _AP_W3, -// af_range_ref<_AP_W3, _AP_I3, _AP_S3, _AP_Q3, _AP_O3, _AP_N3> > -// operator,( -// const af_range_ref<_AP_W3, _AP_I3, _AP_S3, _AP_Q3, _AP_O3, _AP_N3> &a2) -// { -// return ap_concat_ref< -// _AP_WR, ap_concat_ref, _AP_W3, -// af_range_ref<_AP_W3, _AP_I3, _AP_S3, _AP_Q3, _AP_O3, _AP_N3> >( -// *this, -// const_cast< -// af_range_ref<_AP_W3, _AP_I3, _AP_S3, _AP_Q3, _AP_O3, -// _AP_N3>&>(a2)); -// } -// -// template -// INLINE -// ap_concat_ref<_AP_WR, ap_concat_ref, 1, -// af_bit_ref<_AP_W3, _AP_I3, _AP_S3, _AP_Q3, _AP_O3, _AP_N3> -// > -// operator,(const af_bit_ref<_AP_W3, _AP_I3, _AP_S3, _AP_Q3, _AP_O3, -// _AP_N3> -// &a2) { -// return ap_concat_ref< -// _AP_WR, ap_concat_ref, 1, -// af_bit_ref<_AP_W3, _AP_I3, _AP_S3, _AP_Q3, _AP_O3, _AP_N3> >( -// *this, -// const_cast&>( -// a2)); -// } -// -// template -// INLINE ap_private operator&( -// const ap_private<_AP_W3, _AP_S3>& a2) { -// return get() & a2; -// } -// -// template -// INLINE ap_private operator|( -// const ap_private<_AP_W3, _AP_S3>& a2) { -// return get() | a2; -// } -// -// template -// INLINE ap_private operator^( -// const ap_private<_AP_W3, _AP_S3>& a2) { -// return ap_private(get() ^ a2); -// } -// -// INLINE const ap_private<_AP_WR, false> get() const { -// ap_private<_AP_W1 + _AP_W2, false> tmpVal = -// ap_private<_AP_W1 + _AP_W2, false>(mbv1.get()); -// ap_private<_AP_W1 + _AP_W2, false> tmpVal2 = -// ap_private<_AP_W1 + _AP_W2, false>(mbv2.get()); -// int W_ref2 = mbv2.length(); -// tmpVal <<= W_ref2; -// tmpVal |= tmpVal2; -// return tmpVal; -// } -// -// INLINE const ap_private<_AP_WR, false> get() { -// ap_private<_AP_W1 + _AP_W2, false> tmpVal = -// ap_private<_AP_W1 + _AP_W2, false>(mbv1.get()); -// ap_private<_AP_W1 + _AP_W2, false> tmpVal2 = -// ap_private<_AP_W1 + _AP_W2, false>(mbv2.get()); -// int W_ref2 = mbv2.length(); -// tmpVal <<= W_ref2; -// tmpVal |= tmpVal2; -// return tmpVal; -// } -// -// template -// INLINE void set(const ap_private<_AP_W3, false>& val) { -// ap_private<_AP_W1 + _AP_W2, false> vval(val); -// int W_ref1 = mbv1.length(); -// int W_ref2 = mbv2.length(); -// ap_private<_AP_W1, false> mask1(-1); -// mask1 >>= _AP_W1 - W_ref1; -// ap_private<_AP_W2, false> mask2(-1); -// mask2 >>= _AP_W2 - W_ref2; -// mbv1.set(ap_private<_AP_W1, false>((vval >> W_ref2) & mask1)); -// mbv2.set(ap_private<_AP_W2, false>(vval & mask2)); -// } -// -// INLINE int length() const { return mbv1.length() + mbv2.length(); } -// -// INLINE std::string to_string(uint8_t radix = 2) const { -// return get().to_string(radix); -// } -//}; // struct ap_concat_ref. - -/// Range(slice) reference -/// Proxy class, which allows part selection to be used as rvalue(for reading) -/// and lvalue(for writing) -//------------------------------------------------------------ -template -struct _private_range_ref { -#ifdef _MSC_VER -#pragma warning(disable : 4521 4522) -#endif - ap_private<_AP_W, _AP_S>& d_bv; - int l_index; - int h_index; - - public: - /// copy ctor. - INLINE _private_range_ref(const _private_range_ref<_AP_W, _AP_S>& ref) - : d_bv(ref.d_bv), l_index(ref.l_index), h_index(ref.h_index) {} - - /// direct ctor. - INLINE _private_range_ref(ap_private<_AP_W, _AP_S>* bv, int h, int l) - : d_bv(*bv), l_index(l), h_index(h) { - _AP_WARNING(h < 0 || l < 0, - "Higher bound (%d) and lower bound (%d) cannot be " - "negative.", - h, l); - _AP_WARNING(h >= _AP_W || l >= _AP_W, - "Higher bound (%d) or lower bound (%d) out of range (%d).", h, l, - _AP_W); - } - - /// compound or assignment. - template - INLINE _private_range_ref<_AP_W, _AP_S>& operator|=( - const _private_range_ref<_AP_W2, _AP_S2>& ref) { - _AP_WARNING((h_index - l_index) != (ref.h_index - ref.l_index), - "Bitsize mismach for ap_private<>.range() &= " - "ap_private<>.range()."); - this->d_bv |= ref.d_bv; - return *this; - } - - /// compound or assignment with root type. - template - INLINE _private_range_ref<_AP_W, _AP_S>& operator|=( - const _AP_ROOT_TYPE<_AP_W2, _AP_S2>& ref) { - _AP_WARNING((h_index - l_index + 1) != _AP_W2, - "Bitsize mismach for ap_private<>.range() |= _AP_ROOT_TYPE<>."); - this->d_bv |= ref.V; - return *this; - } - - /// compound and assignment. - template - INLINE _private_range_ref<_AP_W, _AP_S>& operator&=( - const _private_range_ref<_AP_W2, _AP_S2>& ref) { - _AP_WARNING((h_index - l_index) != (ref.h_index - ref.l_index), - "Bitsize mismach for ap_private<>.range() &= " - "ap_private<>.range()."); - this->d_bv &= ref.d_bv; - return *this; - }; - - /// compound and assignment with root type. - template - INLINE _private_range_ref<_AP_W, _AP_S>& operator&=( - const _AP_ROOT_TYPE<_AP_W2, _AP_S2>& ref) { - _AP_WARNING((h_index - l_index + 1) != _AP_W2, - "Bitsize mismach for ap_private<>.range() &= _AP_ROOT_TYPE<>."); - this->d_bv &= ref.V; - return *this; - } - - /// compound xor assignment. - template - INLINE _private_range_ref<_AP_W, _AP_S>& operator^=( - const _private_range_ref<_AP_W2, _AP_S2>& ref) { - _AP_WARNING((h_index - l_index) != (ref.h_index - ref.l_index), - "Bitsize mismach for ap_private<>.range() ^= " - "ap_private<>.range()."); - this->d_bv ^= ref.d_bv; - return *this; - }; - - /// compound xor assignment with root type. - template - INLINE _private_range_ref<_AP_W, _AP_S>& operator^=( - const _AP_ROOT_TYPE<_AP_W2, _AP_S2>& ref) { - _AP_WARNING((h_index - l_index + 1) != _AP_W2, - "Bitsize mismach for ap_private<>.range() ^= _AP_ROOT_TYPE<>."); - this->d_bv ^= ref.V; - return *this; - } - - /// @name convertors. - // @{ - INLINE operator ap_private<_AP_W, false>() const { - ap_private<_AP_W, false> val(0); - if (h_index >= l_index) { - if (_AP_W > 64) { - val = d_bv; - ap_private<_AP_W, false> mask(-1); - mask >>= _AP_W - (h_index - l_index + 1); - val >>= l_index; - val &= mask; - } else { - const static uint64_t mask = (~0ULL >> (64 > _AP_W ? (64 - _AP_W) : 0)); - val = (d_bv >> l_index) & (mask >> (_AP_W - (h_index - l_index + 1))); - } - } else { - for (int i = 0, j = l_index; j >= 0 && j >= h_index; j--, i++) - if ((d_bv)[j]) val.set(i); - } - return val; - } - - INLINE operator unsigned long long() const { return to_uint64(); } - // @} - - template - INLINE _private_range_ref& operator=(const ap_private<_AP_W2, _AP_S2>& val) { - ap_private<_AP_W, false> vval = ap_private<_AP_W, false>(val); - if (l_index > h_index) { - for (int i = 0, j = l_index; j >= 0 && j >= h_index; j--, i++) - (vval)[i] ? d_bv.set(j) : d_bv.clear(j); - } else { - if (_AP_W > 64) { - ap_private<_AP_W, false> mask(-1); - if (l_index > 0) { - mask <<= l_index; - vval <<= l_index; - } - if (h_index < _AP_W - 1) { - ap_private<_AP_W, false> mask2(-1); - mask2 >>= _AP_W - h_index - 1; - mask &= mask2; - vval &= mask2; - } - mask.flip(); - d_bv &= mask; - d_bv |= vval; - } else { - unsigned shift = 64 - _AP_W; - uint64_t mask = ~0ULL >> (shift); - if (l_index > 0) { - vval = mask & vval << l_index; - mask = mask & mask << l_index; - } - if (h_index < _AP_W - 1) { - uint64_t mask2 = mask; - mask2 >>= (_AP_W - h_index - 1); - mask &= mask2; - vval &= mask2; - } - mask = ~mask; - d_bv &= mask; - d_bv |= vval; - } - } - return *this; - } // operator=(const ap_private<>&) - - INLINE _private_range_ref& operator=(unsigned long long val) { - const ap_private<_AP_W, _AP_S> vval = val; - return operator=(vval); - } - - template - INLINE _private_range_ref& operator=( - const _private_bit_ref<_AP_W2, _AP_S2>& val) { - return operator=((unsigned long long)(bool)val); - } - - template - INLINE _private_range_ref& operator=( - const _private_range_ref<_AP_W2, _AP_S2>& val) { - const ap_private<_AP_W, false> tmpVal(val); - return operator=(tmpVal); - } - -// template -// INLINE _private_range_ref& operator=( -// const ap_concat_ref<_AP_W3, _AP_T3, _AP_W4, _AP_T4>& val) { -// const ap_private<_AP_W, false> tmpVal(val); -// return operator=(tmpVal); -// } - - // TODO from ap_int_base, ap_bit_ref and ap_range_ref. - - template - INLINE _private_range_ref& operator=( - const ap_fixed_base<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2>& val) { - return operator=(val.to_ap_int_base().V); - } - - template - INLINE _private_range_ref& operator=( - const af_range_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2>& val) { - return operator=(val.operator ap_int_base<_AP_W2, false>().V); - } - - template - INLINE _private_range_ref& operator=( - const af_bit_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2>& val) { - return operator=((unsigned long long)(bool)val); - } - -// template -// INLINE ap_concat_ref<_AP_W, _private_range_ref, _AP_W2, -// _private_range_ref<_AP_W2, _AP_S2> > -// operator,(const _private_range_ref<_AP_W2, _AP_S2> &a2) { -// return ap_concat_ref<_AP_W, _private_range_ref, _AP_W2, -// _private_range_ref<_AP_W2, _AP_S2> >( -// *this, const_cast<_private_range_ref<_AP_W2, _AP_S2>&>(a2)); -// } -// -// template -// INLINE ap_concat_ref<_AP_W, _private_range_ref, _AP_W2, -// ap_private<_AP_W2, _AP_S2> > -// operator,(ap_private<_AP_W2, _AP_S2> &a2) { -// return ap_concat_ref<_AP_W, _private_range_ref, _AP_W2, -// ap_private<_AP_W2, _AP_S2> >(*this, a2); -// } -// -// INLINE -// ap_concat_ref<_AP_W, _private_range_ref, _AP_W, ap_private<_AP_W, _AP_S> > -// operator,(ap_private<_AP_W, _AP_S>& a2) { -// return ap_concat_ref<_AP_W, _private_range_ref, _AP_W, -// ap_private<_AP_W, _AP_S> >(*this, a2); -// } -// -// template -// INLINE ap_concat_ref<_AP_W, _private_range_ref, 1, -// _private_bit_ref<_AP_W2, _AP_S2> > -// operator,(const _private_bit_ref<_AP_W2, _AP_S2> &a2) { -// return ap_concat_ref<_AP_W, _private_range_ref, 1, -// _private_bit_ref<_AP_W2, _AP_S2> >( -// *this, const_cast<_private_bit_ref<_AP_W2, _AP_S2>&>(a2)); -// } -// -// template -// INLINE ap_concat_ref<_AP_W, _private_range_ref, _AP_W2 + _AP_W3, -// ap_concat_ref<_AP_W2, _AP_T2, _AP_W3, _AP_T3> > -// operator,(const ap_concat_ref<_AP_W2, _AP_T2, _AP_W3, _AP_T3> &a2) { -// return ap_concat_ref<_AP_W, _private_range_ref, _AP_W2 + _AP_W3, -// ap_concat_ref<_AP_W2, _AP_T2, _AP_W3, _AP_T3> >( -// *this, const_cast&>(a2)); -// } -// -// template -// INLINE ap_concat_ref< -// _AP_W, _private_range_ref, _AP_W2, -// af_range_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2> > -// operator,( -// const af_range_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2> &a2) { -// return ap_concat_ref< -// _AP_W, _private_range_ref, _AP_W2, -// af_range_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2> >( -// *this, -// const_cast< -// af_range_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2>&>(a2)); -// } -// -// template -// INLINE -// ap_concat_ref<_AP_W, _private_range_ref, 1, -// af_bit_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2> > -// operator,(const af_bit_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2> -// &a2) { -// return ap_concat_ref< -// _AP_W, _private_range_ref, 1, -// af_bit_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2> >( -// *this, -// const_cast&>( -// a2)); -// } - - template - INLINE bool operator==(const _private_range_ref<_AP_W2, _AP_S2>& op2) { - ap_private<_AP_W, false> lhs = get(); - ap_private<_AP_W2, false> rhs = op2.get(); - return lhs == rhs; - } - - template - INLINE bool operator!=(const _private_range_ref<_AP_W2, _AP_S2>& op2) { - ap_private<_AP_W, false> lhs = get(); - ap_private<_AP_W2, false> rhs = op2.get(); - return lhs != rhs; - } - - template - INLINE bool operator>(const _private_range_ref<_AP_W2, _AP_S2>& op2) { - ap_private<_AP_W, false> lhs = get(); - ap_private<_AP_W2, false> rhs = op2.get(); - return lhs > rhs; - } - - template - INLINE bool operator>=(const _private_range_ref<_AP_W2, _AP_S2>& op2) { - ap_private<_AP_W, false> lhs = get(); - ap_private<_AP_W2, false> rhs = op2.get(); - return lhs >= rhs; - } - - template - INLINE bool operator<(const _private_range_ref<_AP_W2, _AP_S2>& op2) { - ap_private<_AP_W, false> lhs = get(); - ap_private<_AP_W2, false> rhs = op2.get(); - return lhs < rhs; - } - - template - INLINE bool operator<=(const _private_range_ref<_AP_W2, _AP_S2>& op2) { - ap_private<_AP_W, false> lhs = get(); - ap_private<_AP_W2, false> rhs = op2.get(); - return lhs <= rhs; - } - - template - INLINE void set(const ap_private<_AP_W2, false>& val) { - ap_private<_AP_W, _AP_S> vval = val; - if (l_index > h_index) { - for (int i = 0, j = l_index; j >= 0 && j >= h_index; j--, i++) - (vval)[i] ? d_bv.set(j) : d_bv.clear(j); - } else { - if (_AP_W > 64) { - ap_private<_AP_W, _AP_S> mask(-1); - if (l_index > 0) { - ap_private<_AP_W, false> mask1(-1); - mask1 >>= _AP_W - l_index; - mask1.flip(); - mask = mask1; - // vval&=mask1; - vval <<= l_index; - } - if (h_index < _AP_W - 1) { - ap_private<_AP_W, false> mask2(-1); - mask2 <<= h_index + 1; - mask2.flip(); - mask &= mask2; - vval &= mask2; - } - mask.flip(); - d_bv &= mask; - d_bv |= vval; - } else { - uint64_t mask = ~0ULL >> (64 - _AP_W); - if (l_index > 0) { - uint64_t mask1 = mask; - mask1 = mask & (mask1 >> (_AP_W - l_index)); - vval = mask & (vval << l_index); - mask = ~mask1 & mask; - // vval&=mask1; - } - if (h_index < _AP_W - 1) { - uint64_t mask2 = ~0ULL >> (64 - _AP_W); - mask2 = mask & (mask2 << (h_index + 1)); - mask &= ~mask2; - vval &= ~mask2; - } - d_bv &= (~mask & (~0ULL >> (64 - _AP_W))); - d_bv |= vval; - } - } - } - - INLINE ap_private<_AP_W, false> get() const { - ap_private<_AP_W, false> val(0); - if (h_index < l_index) { - for (int i = 0, j = l_index; j >= 0 && j >= h_index; j--, i++) - if ((d_bv)[j]) val.set(i); - } else { - val = d_bv; - val >>= l_index; - if (h_index < _AP_W - 1) { - if (_AP_W <= 64) { - const static uint64_t mask = - (~0ULL >> (64 > _AP_W ? (64 - _AP_W) : 0)); - val &= (mask >> (_AP_W - (h_index - l_index + 1))); - } else { - ap_private<_AP_W, false> mask(-1); - mask >>= _AP_W - (h_index - l_index + 1); - val &= mask; - } - } - } - return val; - } - - INLINE ap_private<_AP_W, false> get() { - ap_private<_AP_W, false> val(0); - if (h_index < l_index) { - for (int i = 0, j = l_index; j >= 0 && j >= h_index; j--, i++) - if ((d_bv)[j]) val.set(i); - } else { - val = d_bv; - val >>= l_index; - if (h_index < _AP_W - 1) { - if (_AP_W <= 64) { - static const uint64_t mask = ~0ULL >> (64 > _AP_W ? (64 - _AP_W) : 0); - return val &= ((mask) >> (_AP_W - (h_index - l_index + 1))); - } else { - ap_private<_AP_W, false> mask(-1); - mask >>= _AP_W - (h_index - l_index + 1); - val &= mask; - } - } - } - return val; - } - - INLINE int length() const { - return h_index >= l_index ? h_index - l_index + 1 : l_index - h_index + 1; - } - - INLINE int to_int() const { - ap_private<_AP_W, false> val = get(); - return val.to_int(); - } - - INLINE unsigned int to_uint() const { - ap_private<_AP_W, false> val = get(); - return val.to_uint(); - } - - INLINE long to_long() const { - ap_private<_AP_W, false> val = get(); - return val.to_long(); - } - - INLINE unsigned long to_ulong() const { - ap_private<_AP_W, false> val = get(); - return val.to_ulong(); - } - - INLINE ap_slong to_int64() const { - ap_private<_AP_W, false> val = get(); - return val.to_int64(); - } - - INLINE ap_ulong to_uint64() const { - ap_private<_AP_W, false> val = get(); - return val.to_uint64(); - } - - INLINE std::string to_string(uint8_t radix = 2) const { - return get().to_string(radix); - } - - INLINE bool and_reduce() { - bool ret = true; - bool reverse = l_index > h_index; - unsigned low = reverse ? h_index : l_index; - unsigned high = reverse ? l_index : h_index; - for (unsigned i = low; i != high; ++i) ret &= d_bv[i]; - return ret; - } - - INLINE bool or_reduce() { - bool ret = false; - bool reverse = l_index > h_index; - unsigned low = reverse ? h_index : l_index; - unsigned high = reverse ? l_index : h_index; - for (unsigned i = low; i != high; ++i) ret |= d_bv[i]; - return ret; - } - - INLINE bool xor_reduce() { - bool ret = false; - bool reverse = l_index > h_index; - unsigned low = reverse ? h_index : l_index; - unsigned high = reverse ? l_index : h_index; - for (unsigned i = low; i != high; ++i) ret ^= d_bv[i]; - return ret; - } -}; // struct _private_range_ref. - -/// Bit reference -/// Proxy class, which allows bit selection to be used as rvalue(for reading) -/// and lvalue(for writing) -//-------------------------------------------------------------- -template -struct _private_bit_ref { -#ifdef _MSC_VER -#pragma warning(disable : 4521 4522) -#endif - ap_private<_AP_W, _AP_S>& d_bv; - int d_index; - - public: - // copy ctor. - INLINE _private_bit_ref(const _private_bit_ref<_AP_W, _AP_S>& ref) - : d_bv(ref.d_bv), d_index(ref.d_index) {} - - // director ctor. - INLINE _private_bit_ref(ap_private<_AP_W, _AP_S>& bv, int index = 0) - : d_bv(bv), d_index(index) { - _AP_WARNING(d_index < 0, "Index of bit vector (%d) cannot be negative.\n", - d_index); - _AP_WARNING(d_index >= _AP_W, - "Index of bit vector (%d) out of range (%d).\n", d_index, _AP_W); - } - - INLINE operator bool() const { return d_bv.get_bit(d_index); } - - INLINE bool to_bool() const { return operator bool(); } - - template - INLINE _private_bit_ref& operator=(const T& val) { - if (!!val) - d_bv.set(d_index); - else - d_bv.clear(d_index); - return *this; - } - -// template -// INLINE ap_concat_ref<1, _private_bit_ref, _AP_W2, ap_private<_AP_W2, -// _AP_S2> > -// operator,(ap_private<_AP_W2, _AP_S2> &a2) const { -// return ap_concat_ref<1, _private_bit_ref, _AP_W2, ap_private<_AP_W2, -// _AP_S2> >( -// const_cast<_private_bit_ref<_AP_W, _AP_S>&>(*this), a2); -// } -// -// template -// INLINE ap_concat_ref<1, _private_bit_ref, _AP_W2, -// _private_range_ref<_AP_W2, -// _AP_S2> > -// operator,(const _private_range_ref<_AP_W2, _AP_S2> &a2) const { -// return ap_concat_ref<1, _private_bit_ref, _AP_W2, -// _private_range_ref<_AP_W2, -// _AP_S2> >( -// const_cast<_private_bit_ref<_AP_W, _AP_S>&>(*this), -// const_cast<_private_range_ref<_AP_W2, _AP_S2>&>(a2)); -// } -// -// template -// INLINE ap_concat_ref<1, _private_bit_ref, 1, _private_bit_ref<_AP_W2, -// _AP_S2> > operator,( -// const _private_bit_ref<_AP_W2, _AP_S2> &a2) const { -// return ap_concat_ref<1, _private_bit_ref, 1, -// _private_bit_ref<_AP_W2, _AP_S2> >( -// const_cast<_private_bit_ref<_AP_W, _AP_S>&>(*this), -// const_cast<_private_bit_ref<_AP_W2, _AP_S2>&>(a2)); -// } -// -// INLINE ap_concat_ref<1, _private_bit_ref, 1, _private_bit_ref> -// operator,( -// const _private_bit_ref &a2) const { -// return ap_concat_ref<1, _private_bit_ref, 1, _private_bit_ref>( -// const_cast<_private_bit_ref<_AP_W, _AP_S>&>(*this), -// const_cast<_private_bit_ref&>(a2)); -// } -// -// template -// INLINE ap_concat_ref<1, _private_bit_ref, _AP_W2 + _AP_W3, -// ap_concat_ref<_AP_W2, _AP_T2, _AP_W3, _AP_T3> > -// operator,(const ap_concat_ref<_AP_W2, _AP_T2, _AP_W3, _AP_T3> &a2) const { -// return ap_concat_ref<1, _private_bit_ref, _AP_W2 + _AP_W3, -// ap_concat_ref<_AP_W2, _AP_T2, _AP_W3, _AP_T3> >( -// const_cast<_private_bit_ref<_AP_W, _AP_S>&>(*this), -// const_cast&>(a2)); -// } -// -// template -// INLINE ap_concat_ref< -// 1, _private_bit_ref, _AP_W2, -// af_range_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2> > -// operator,(const af_range_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, -// _AP_N2> -// &a2) const { -// return ap_concat_ref< -// 1, _private_bit_ref, _AP_W2, -// af_range_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2> >( -// const_cast<_private_bit_ref<_AP_W, _AP_S>&>(*this), -// const_cast< -// af_range_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, -// _AP_N2>&>(a2)); -// } -// -// template -// INLINE -// ap_concat_ref<1, _private_bit_ref, 1, -// af_bit_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, -// _AP_N2> > -// operator,(const af_bit_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, -// _AP_N2> -// &a2) const { -// return ap_concat_ref<1, _private_bit_ref, 1, af_bit_ref<_AP_W2, -// _AP_I2, _AP_S2, -// _AP_Q2, _AP_O2, -// _AP_N2> >( -// const_cast<_private_bit_ref<_AP_W, _AP_S>&>(*this), -// const_cast&>( -// a2)); -// } - - template - INLINE bool operator==(const _private_bit_ref<_AP_W2, _AP_S2>& op) const { - return get() == op.get(); - } - - template - INLINE bool operator!=(const _private_bit_ref<_AP_W2, _AP_S2>& op) const { - return get() != op.get(); - } - - INLINE bool get() const { return operator bool(); } - - // template - // INLINE void set(const ap_private<_AP_W3, false>& val) { - // operator=(val); - // } - - // INLINE bool operator~() const { - // bool bit = (d_bv)[d_index]; - // return bit ? false : true; - // } - - INLINE int length() const { return 1; } - - // INLINE std::string to_string() const { - // bool val = get(); - // return val ? "1" : "0"; - // } - -}; // struct _private_bit_ref. - -// char a[100]; -// char* ptr = a; -// ap_int<2> n = 3; -// char* ptr2 = ptr + n*2; -// avoid ambiguous errors -#define OP_BIN_MIX_PTR(BIN_OP) \ - template \ - INLINE PTR_TYPE* operator BIN_OP(PTR_TYPE* i_op, \ - const ap_private<_AP_W, _AP_S>& op) { \ - typename ap_private<_AP_W, _AP_S>::ValType op2 = op; \ - return i_op BIN_OP op2; \ - } \ - template \ - INLINE PTR_TYPE* operator BIN_OP(const ap_private<_AP_W, _AP_S>& op, \ - PTR_TYPE* i_op) { \ - typename ap_private<_AP_W, _AP_S>::ValType op2 = op; \ - return op2 BIN_OP i_op; \ - } - -OP_BIN_MIX_PTR(+) -OP_BIN_MIX_PTR(-) -#undef OP_BIN_MIX_PTR - -// float OP ap_int -// when ap_int's width > 64, then trunc ap_int to ap_int<64> -#define OP_BIN_MIX_FLOAT(BIN_OP, C_TYPE) \ - template \ - INLINE C_TYPE operator BIN_OP(C_TYPE i_op, \ - const ap_private<_AP_W, _AP_S>& op) { \ - typename ap_private<_AP_W, _AP_S>::ValType op2 = op; \ - return i_op BIN_OP op2; \ - } \ - template \ - INLINE C_TYPE operator BIN_OP(const ap_private<_AP_W, _AP_S>& op, \ - C_TYPE i_op) { \ - typename ap_private<_AP_W, _AP_S>::ValType op2 = op; \ - return op2 BIN_OP i_op; \ - } - -#define OPS_MIX_FLOAT(C_TYPE) \ - OP_BIN_MIX_FLOAT(*, C_TYPE) \ - OP_BIN_MIX_FLOAT(/, C_TYPE) \ - OP_BIN_MIX_FLOAT(+, C_TYPE) \ - OP_BIN_MIX_FLOAT(-, C_TYPE) - -OPS_MIX_FLOAT(float) -OPS_MIX_FLOAT(double) -#undef OP_BIN_MIX_FLOAT -#undef OPS_MIX_FLOAT - -/// Operators mixing Integers with AP_Int -// ---------------------------------------------------------------- - -// partially specialize template argument _AP_C in order that: -// for _AP_W > 64, we will explicitly convert operand with native data type -// into corresponding ap_private -// for _AP_W <= 64, we will implicitly convert operand with ap_private into -// (unsigned) long long -#define OP_BIN_MIX_INT(BIN_OP, C_TYPE, _AP_WI, _AP_SI, RTYPE) \ - template \ - INLINE \ - typename ap_private<_AP_WI, _AP_SI>::template RType<_AP_W, _AP_S>::RTYPE \ - operator BIN_OP(C_TYPE i_op, const ap_private<_AP_W, _AP_S>& op) { \ - return ap_private<_AP_WI, _AP_SI>(i_op).operator BIN_OP(op); \ - } \ - template \ - INLINE \ - typename ap_private<_AP_W, _AP_S>::template RType<_AP_WI, _AP_SI>::RTYPE \ - operator BIN_OP(const ap_private<_AP_W, _AP_S>& op, C_TYPE i_op) { \ - return op.operator BIN_OP(ap_private<_AP_WI, _AP_SI>(i_op)); \ - } - -#define OP_REL_MIX_INT(REL_OP, C_TYPE, _AP_W2, _AP_S2) \ - template \ - INLINE bool operator REL_OP(const ap_private<_AP_W, _AP_S>& op, \ - C_TYPE op2) { \ - return op.operator REL_OP(ap_private<_AP_W2, _AP_S2>(op2)); \ - } \ - template \ - INLINE bool operator REL_OP(C_TYPE op2, \ - const ap_private<_AP_W, _AP_S, false>& op) { \ - return ap_private<_AP_W2, _AP_S2>(op2).operator REL_OP(op); \ - } - -#define OP_ASSIGN_MIX_INT(ASSIGN_OP, C_TYPE, _AP_W2, _AP_S2) \ - template \ - INLINE ap_private<_AP_W, _AP_S>& operator ASSIGN_OP( \ - ap_private<_AP_W, _AP_S>& op, C_TYPE op2) { \ - return op.operator ASSIGN_OP(ap_private<_AP_W2, _AP_S2>(op2)); \ - } - -#define OP_BIN_SHIFT_INT(BIN_OP, C_TYPE, _AP_WI, _AP_SI, RTYPE) \ - template \ - C_TYPE operator BIN_OP(C_TYPE i_op, \ - const ap_private<_AP_W, _AP_S, false>& op) { \ - return i_op BIN_OP(op.get_VAL()); \ - } \ - template \ - INLINE \ - typename ap_private<_AP_W, _AP_S>::template RType<_AP_WI, _AP_SI>::RTYPE \ - operator BIN_OP(const ap_private<_AP_W, _AP_S>& op, C_TYPE i_op) { \ - return op.operator BIN_OP(i_op); \ - } - -#define OP_ASSIGN_RSHIFT_INT(ASSIGN_OP, C_TYPE, _AP_W2, _AP_S2) \ - template \ - INLINE ap_private<_AP_W, _AP_S>& operator ASSIGN_OP( \ - ap_private<_AP_W, _AP_S>& op, C_TYPE op2) { \ - op = op.operator>>(op2); \ - return op; \ - } - -#define OP_ASSIGN_LSHIFT_INT(ASSIGN_OP, C_TYPE, _AP_W2, _AP_S2) \ - template \ - INLINE ap_private<_AP_W, _AP_S>& operator ASSIGN_OP( \ - ap_private<_AP_W, _AP_S>& op, C_TYPE op2) { \ - op = op.operator<<(op2); \ - return op; \ - } - -#define OPS_MIX_INT(C_TYPE, _AP_W2, _AP_S2) \ - OP_BIN_MIX_INT(*, C_TYPE, (_AP_W2), (_AP_S2), mult) \ - OP_BIN_MIX_INT(+, C_TYPE, (_AP_W2), (_AP_S2), plus) \ - OP_BIN_MIX_INT(-, C_TYPE, (_AP_W2), (_AP_S2), minus) \ - OP_BIN_MIX_INT(/, C_TYPE, (_AP_W2), (_AP_S2), div) \ - OP_BIN_MIX_INT(%, C_TYPE, (_AP_W2), (_AP_S2), mod) \ - OP_BIN_MIX_INT(&, C_TYPE, (_AP_W2), (_AP_S2), logic) \ - OP_BIN_MIX_INT(|, C_TYPE, (_AP_W2), (_AP_S2), logic) \ - OP_BIN_MIX_INT (^, C_TYPE, (_AP_W2), (_AP_S2), logic) \ - OP_BIN_SHIFT_INT(>>, C_TYPE, (_AP_W2), (_AP_S2), arg1) \ - OP_BIN_SHIFT_INT(<<, C_TYPE, (_AP_W2), (_AP_S2), arg1) \ - \ - OP_ASSIGN_MIX_INT(+=, C_TYPE, (_AP_W2), (_AP_S2)) \ - OP_ASSIGN_MIX_INT(-=, C_TYPE, (_AP_W2), (_AP_S2)) \ - OP_ASSIGN_MIX_INT(*=, C_TYPE, (_AP_W2), (_AP_S2)) \ - OP_ASSIGN_MIX_INT(/=, C_TYPE, (_AP_W2), (_AP_S2)) \ - OP_ASSIGN_MIX_INT(%=, C_TYPE, (_AP_W2), (_AP_S2)) \ - OP_ASSIGN_MIX_INT(&=, C_TYPE, (_AP_W2), (_AP_S2)) \ - OP_ASSIGN_MIX_INT(|=, C_TYPE, (_AP_W2), (_AP_S2)) \ - OP_ASSIGN_MIX_INT(^=, C_TYPE, (_AP_W2), (_AP_S2)) \ - OP_ASSIGN_RSHIFT_INT(>>=, C_TYPE, (_AP_W2), (_AP_S2)) \ - OP_ASSIGN_LSHIFT_INT(<<=, C_TYPE, (_AP_W2), (_AP_S2)) \ - \ - OP_REL_MIX_INT(>, C_TYPE, (_AP_W2), (_AP_S2)) \ - OP_REL_MIX_INT(<, C_TYPE, (_AP_W2), (_AP_S2)) \ - OP_REL_MIX_INT(>=, C_TYPE, (_AP_W2), (_AP_S2)) \ - OP_REL_MIX_INT(<=, C_TYPE, (_AP_W2), (_AP_S2)) \ - OP_REL_MIX_INT(==, C_TYPE, (_AP_W2), (_AP_S2)) \ - OP_REL_MIX_INT(!=, C_TYPE, (_AP_W2), (_AP_S2)) - -OPS_MIX_INT(bool, 1, false) -OPS_MIX_INT(char, 8, CHAR_IS_SIGNED) -OPS_MIX_INT(signed char, 8, true) -OPS_MIX_INT(unsigned char, 8, false) -OPS_MIX_INT(short, sizeof(short) * 8, true) -OPS_MIX_INT(unsigned short, sizeof(unsigned short) * 8, false) -OPS_MIX_INT(int, sizeof(int) * 8, true) -OPS_MIX_INT(unsigned int, sizeof(unsigned int) * 8, false) -OPS_MIX_INT(long, sizeof(long) * 8, true) -OPS_MIX_INT(unsigned long, sizeof(unsigned long) * 8, false) -OPS_MIX_INT(ap_slong, sizeof(ap_slong) * 8, true) -OPS_MIX_INT(ap_ulong, sizeof(ap_ulong) * 8, false) - -#undef OP_BIN_MIX_INT -#undef OP_BIN_SHIFT_INT -#undef OP_ASSIGN_MIX_INT -#undef OP_ASSIGN_RSHIFT_INT -#undef OP_ASSIGN_LSHIFT_INT -#undef OP_REL_MIX_INT -#undef OPS_MIX_INT - -#define OP_BIN_MIX_RANGE(BIN_OP, RTYPE) \ - template \ - INLINE typename ap_private<_AP_W1, _AP_S1>::template RType<_AP_W2, \ - _AP_S2>::RTYPE \ - operator BIN_OP(const _private_range_ref<_AP_W1, _AP_S1>& op1, \ - const ap_private<_AP_W2, _AP_S2>& op2) { \ - return ap_private<_AP_W1, false>(op1).operator BIN_OP(op2); \ - } \ - template \ - INLINE typename ap_private<_AP_W1, _AP_S1>::template RType<_AP_W2, \ - _AP_S2>::RTYPE \ - operator BIN_OP(const ap_private<_AP_W1, _AP_S1>& op1, \ - const _private_range_ref<_AP_W2, _AP_S2>& op2) { \ - return op1.operator BIN_OP(ap_private<_AP_W2, false>(op2)); \ - } - -#define OP_ASSIGN_MIX_RANGE(ASSIGN_OP) \ - template \ - INLINE ap_private<_AP_W1, _AP_S1>& operator ASSIGN_OP( \ - ap_private<_AP_W1, _AP_S1>& op1, \ - const _private_range_ref<_AP_W2, _AP_S2>& op2) { \ - return op1.operator ASSIGN_OP(ap_private<_AP_W2, false>(op2)); \ - } \ - template \ - INLINE _private_range_ref<_AP_W1, _AP_S1>& operator ASSIGN_OP( \ - _private_range_ref<_AP_W1, _AP_S1>& op1, \ - ap_private<_AP_W2, _AP_S2>& op2) { \ - ap_private<_AP_W1, false> tmp(op1); \ - tmp.operator ASSIGN_OP(op2); \ - op1 = tmp; \ - return op1; \ - } - -#define OP_REL_MIX_RANGE(REL_OP) \ - template \ - INLINE bool operator REL_OP(const _private_range_ref<_AP_W1, _AP_S1>& op1, \ - const ap_private<_AP_W2, _AP_S2>& op2) { \ - return ap_private<_AP_W1, false>(op1).operator REL_OP(op2); \ - } \ - template \ - INLINE bool operator REL_OP(const ap_private<_AP_W1, _AP_S1>& op1, \ - const _private_range_ref<_AP_W2, _AP_S2>& op2) { \ - return op1.operator REL_OP(op2.operator ap_private<_AP_W2, false>()); \ - } - -OP_BIN_MIX_RANGE(+, plus) -OP_BIN_MIX_RANGE(-, minus) -OP_BIN_MIX_RANGE(*, mult) -OP_BIN_MIX_RANGE(/, div) -OP_BIN_MIX_RANGE(%, mod) -OP_BIN_MIX_RANGE(&, logic) -OP_BIN_MIX_RANGE(|, logic) -OP_BIN_MIX_RANGE(^, logic) -OP_BIN_MIX_RANGE(>>, arg1) -OP_BIN_MIX_RANGE(<<, arg1) -#undef OP_BIN_MIX_RANGE - -OP_ASSIGN_MIX_RANGE(+=) -OP_ASSIGN_MIX_RANGE(-=) -OP_ASSIGN_MIX_RANGE(*=) -OP_ASSIGN_MIX_RANGE(/=) -OP_ASSIGN_MIX_RANGE(%=) -OP_ASSIGN_MIX_RANGE(&=) -OP_ASSIGN_MIX_RANGE(|=) -OP_ASSIGN_MIX_RANGE(^=) -OP_ASSIGN_MIX_RANGE(>>=) -OP_ASSIGN_MIX_RANGE(<<=) -#undef OP_ASSIGN_MIX_RANGE - -OP_REL_MIX_RANGE(>) -OP_REL_MIX_RANGE(<) -OP_REL_MIX_RANGE(>=) -OP_REL_MIX_RANGE(<=) -OP_REL_MIX_RANGE(==) -OP_REL_MIX_RANGE(!=) -#undef OP_REL_MIX_RANGE - -#define OP_BIN_MIX_BIT(BIN_OP, RTYPE) \ - template \ - INLINE typename ap_private<1, false>::template RType<_AP_W2, _AP_S2>::RTYPE \ - operator BIN_OP(const _private_bit_ref<_AP_W1, _AP_S1>& op1, \ - const ap_private<_AP_W2, _AP_S2>& op2) { \ - return ap_private<1, false>(op1).operator BIN_OP(op2); \ - } \ - template \ - INLINE typename ap_private<_AP_W1, _AP_S1>::template RType<1, false>::RTYPE \ - operator BIN_OP(const ap_private<_AP_W1, _AP_S1>& op1, \ - const _private_bit_ref<_AP_W2, _AP_S2>& op2) { \ - return op1.operator BIN_OP(ap_private<1, false>(op2)); \ - } - -#define OP_ASSIGN_MIX_BIT(ASSIGN_OP) \ - template \ - INLINE ap_private<_AP_W1, _AP_S1>& operator ASSIGN_OP( \ - ap_private<_AP_W1, _AP_S1>& op1, \ - _private_bit_ref<_AP_W2, _AP_S2>& op2) { \ - return op1.operator ASSIGN_OP(ap_private<1, false>(op2)); \ - } \ - template \ - INLINE _private_bit_ref<_AP_W1, _AP_S1>& operator ASSIGN_OP( \ - _private_bit_ref<_AP_W1, _AP_S1>& op1, \ - ap_private<_AP_W2, _AP_S2>& op2) { \ - ap_private<1, false> tmp(op1); \ - tmp.operator ASSIGN_OP(op2); \ - op1 = tmp; \ - return op1; \ - } - -#define OP_REL_MIX_BIT(REL_OP) \ - template \ - INLINE bool operator REL_OP(const _private_bit_ref<_AP_W1, _AP_S1>& op1, \ - const ap_private<_AP_W2, _AP_S2>& op2) { \ - return ap_private<_AP_W1, false>(op1).operator REL_OP(op2); \ - } \ - template \ - INLINE bool operator REL_OP(const ap_private<_AP_W1, _AP_S1>& op1, \ - const _private_bit_ref<_AP_W2, _AP_S2>& op2) { \ - return op1.operator REL_OP(ap_private<1, false>(op2)); \ - } - -OP_ASSIGN_MIX_BIT(+=) -OP_ASSIGN_MIX_BIT(-=) -OP_ASSIGN_MIX_BIT(*=) -OP_ASSIGN_MIX_BIT(/=) -OP_ASSIGN_MIX_BIT(%=) -OP_ASSIGN_MIX_BIT(&=) -OP_ASSIGN_MIX_BIT(|=) -OP_ASSIGN_MIX_BIT(^=) -OP_ASSIGN_MIX_BIT(>>=) -OP_ASSIGN_MIX_BIT(<<=) -#undef OP_ASSIGN_MIX_BIT - -OP_BIN_MIX_BIT(+, plus) -OP_BIN_MIX_BIT(-, minus) -OP_BIN_MIX_BIT(*, mult) -OP_BIN_MIX_BIT(/, div) -OP_BIN_MIX_BIT(%, mod) -OP_BIN_MIX_BIT(&, logic) -OP_BIN_MIX_BIT(|, logic) -OP_BIN_MIX_BIT(^, logic) -OP_BIN_MIX_BIT(>>, arg1) -OP_BIN_MIX_BIT(<<, arg1) -#undef OP_BIN_MIX_BIT - -OP_REL_MIX_BIT(>) -OP_REL_MIX_BIT(<) -OP_REL_MIX_BIT(<=) -OP_REL_MIX_BIT(>=) -OP_REL_MIX_BIT(==) -OP_REL_MIX_BIT(!=) -#undef OP_REL_MIX_BIT - -#define REF_REL_OP_MIX_INT(REL_OP, C_TYPE, _AP_W2, _AP_S2) \ - template \ - INLINE bool operator REL_OP(const _private_range_ref<_AP_W, _AP_S>& op, \ - C_TYPE op2) { \ - return (ap_private<_AP_W, false>(op)) \ - . \ - operator REL_OP(ap_private<_AP_W2, _AP_S2>(op2)); \ - } \ - template \ - INLINE bool operator REL_OP(C_TYPE op2, \ - const _private_range_ref<_AP_W, _AP_S>& op) { \ - return ap_private<_AP_W2, _AP_S2>(op2).operator REL_OP( \ - ap_private<_AP_W, false>(op)); \ - } \ - template \ - INLINE bool operator REL_OP(const _private_bit_ref<_AP_W, _AP_S>& op, \ - C_TYPE op2) { \ - return (bool(op))REL_OP op2; \ - } \ - template \ - INLINE bool operator REL_OP(C_TYPE op2, \ - const _private_bit_ref<_AP_W, _AP_S>& op) { \ - return op2 REL_OP(bool(op)); \ - } - -#define REF_REL_MIX_INT(C_TYPE, _AP_W2, _AP_S2) \ - REF_REL_OP_MIX_INT(>, C_TYPE, (_AP_W2), (_AP_S2)) \ - REF_REL_OP_MIX_INT(<, C_TYPE, (_AP_W2), (_AP_S2)) \ - REF_REL_OP_MIX_INT(>=, C_TYPE, (_AP_W2), (_AP_S2)) \ - REF_REL_OP_MIX_INT(<=, C_TYPE, (_AP_W2), (_AP_S2)) \ - REF_REL_OP_MIX_INT(==, C_TYPE, (_AP_W2), (_AP_S2)) \ - REF_REL_OP_MIX_INT(!=, C_TYPE, (_AP_W2), (_AP_S2)) - -REF_REL_MIX_INT(bool, 1, false) -REF_REL_MIX_INT(char, 8, CHAR_IS_SIGNED) -REF_REL_MIX_INT(signed char, 8, true) -REF_REL_MIX_INT(unsigned char, 8, false) -REF_REL_MIX_INT(short, sizeof(short) * 8, true) -REF_REL_MIX_INT(unsigned short, sizeof(unsigned short) * 8, false) -REF_REL_MIX_INT(int, sizeof(int) * 8, true) -REF_REL_MIX_INT(unsigned int, sizeof(unsigned int) * 8, false) -REF_REL_MIX_INT(long, sizeof(long) * 8, true) -REF_REL_MIX_INT(unsigned long, sizeof(unsigned long) * 8, false) -REF_REL_MIX_INT(ap_slong, sizeof(ap_slong) * 8, true) -REF_REL_MIX_INT(ap_ulong, sizeof(ap_ulong) * 8, false) -#undef REF_REL_OP_MIX_INT -#undef REF_REL_MIX_INT - -#define REF_BIN_OP_MIX_INT(BIN_OP, RTYPE, C_TYPE, _AP_W2, _AP_S2) \ - template \ - INLINE \ - typename ap_private<_AP_W, false>::template RType<_AP_W2, _AP_S2>::RTYPE \ - operator BIN_OP(const _private_range_ref<_AP_W, _AP_S>& op, \ - C_TYPE op2) { \ - return (ap_private<_AP_W, false>(op)) \ - . \ - operator BIN_OP(ap_private<_AP_W2, _AP_S2>(op2)); \ - } \ - template \ - INLINE \ - typename ap_private<_AP_W2, _AP_S2>::template RType<_AP_W, false>::RTYPE \ - operator BIN_OP(C_TYPE op2, \ - const _private_range_ref<_AP_W, _AP_S>& op) { \ - return ap_private<_AP_W2, _AP_S2>(op2).operator BIN_OP( \ - ap_private<_AP_W, false>(op)); \ - } - -#define REF_BIN_MIX_INT(C_TYPE, _AP_W2, _AP_S2) \ - REF_BIN_OP_MIX_INT(+, plus, C_TYPE, (_AP_W2), (_AP_S2)) \ - REF_BIN_OP_MIX_INT(-, minus, C_TYPE, (_AP_W2), (_AP_S2)) \ - REF_BIN_OP_MIX_INT(*, mult, C_TYPE, (_AP_W2), (_AP_S2)) \ - REF_BIN_OP_MIX_INT(/, div, C_TYPE, (_AP_W2), (_AP_S2)) \ - REF_BIN_OP_MIX_INT(%, mod, C_TYPE, (_AP_W2), (_AP_S2)) \ - REF_BIN_OP_MIX_INT(&, logic, C_TYPE, (_AP_W2), (_AP_S2)) \ - REF_BIN_OP_MIX_INT(|, logic, C_TYPE, (_AP_W2), (_AP_S2)) \ - REF_BIN_OP_MIX_INT(^, logic, C_TYPE, (_AP_W2), (_AP_S2)) \ - REF_BIN_OP_MIX_INT(>>, arg1, C_TYPE, (_AP_W2), (_AP_S2)) \ - REF_BIN_OP_MIX_INT(<<, arg1, C_TYPE, (_AP_W2), (_AP_S2)) - -REF_BIN_MIX_INT(bool, 1, false) -REF_BIN_MIX_INT(char, 8, CHAR_IS_SIGNED) -REF_BIN_MIX_INT(signed char, 8, true) -REF_BIN_MIX_INT(unsigned char, 8, false) -REF_BIN_MIX_INT(short, sizeof(short) * 8, true) -REF_BIN_MIX_INT(unsigned short, sizeof(unsigned short) * 8, false) -REF_BIN_MIX_INT(int, sizeof(int) * 8, true) -REF_BIN_MIX_INT(unsigned int, sizeof(unsigned int) * 8, false) -REF_BIN_MIX_INT(long, sizeof(long) * 8, true) -REF_BIN_MIX_INT(unsigned long, sizeof(unsigned long) * 8, false) -REF_BIN_MIX_INT(ap_slong, sizeof(ap_slong) * 8, true) -REF_BIN_MIX_INT(ap_ulong, sizeof(ap_ulong) * 8, false) -#undef REF_BIN_OP_MIX_INT -#undef REF_BIN_MIX_INT - -#define REF_BIN_OP(BIN_OP, RTYPE) \ - template \ - INLINE \ - typename ap_private<_AP_W, false>::template RType<_AP_W2, false>::RTYPE \ - operator BIN_OP(const _private_range_ref<_AP_W, _AP_S>& lhs, \ - const _private_range_ref<_AP_W2, _AP_S2>& rhs) { \ - return ap_private<_AP_W, false>(lhs).operator BIN_OP( \ - ap_private<_AP_W2, false>(rhs)); \ - } - -REF_BIN_OP(+, plus) -REF_BIN_OP(-, minus) -REF_BIN_OP(*, mult) -REF_BIN_OP(/, div) -REF_BIN_OP(%, mod) -REF_BIN_OP(&, logic) -REF_BIN_OP(|, logic) -REF_BIN_OP(^, logic) -REF_BIN_OP(>>, arg1) -REF_BIN_OP(<<, arg1) -#undef REF_BIN_OP - -//************************************************************************ -// Implement -// ap_private = ap_concat_ref OP ap_concat_ref -// for operators +, -, *, /, %, >>, <<, &, |, ^ -// Without these operators the operands are converted to int64 and -// larger results lose informations (higher order bits). -// -// operand OP -// / | -// left-concat right-concat -// / | / | -// -// -// _AP_LW1, _AP_LT1 (width and type of left-concat's left side) -// _AP_LW2, _AP_LT2 (width and type of left-concat's right side) -// Similarly for RHS of operand OP: _AP_RW1, AP_RW2, _AP_RT1, _AP_RT2 -// -// In Verilog 2001 result of concatenation is always unsigned even -// when both sides are signed. -//************************************************************************ - -#endif // ifndef __AP_PRIVATE_H__ - -// -*- cpp -*- diff --git a/TrigScint/include/TrigScint/objdef.h b/TrigScint/include/TrigScint/objdef.h index 230cf1125..b9706db9a 100755 --- a/TrigScint/include/TrigScint/objdef.h +++ b/TrigScint/include/TrigScint/objdef.h @@ -1,7 +1,7 @@ #ifndef OBJDEF_H #define OBJDEF_H -#include "../../../Trigger/HLS_arbitrary_Precision_Types/include/ap_int.h" +#include "ap_int.h" #define NTIMES 6 #define NHITS 25 #define NCLUS 25 diff --git a/TrigScint/src/TrigScint/TrigScintFirmwareTracker.cxx b/TrigScint/src/TrigScint/TrigScintFirmwareTracker.cxx index 63229c0dd..ec427d346 100644 --- a/TrigScint/src/TrigScint/TrigScintFirmwareTracker.cxx +++ b/TrigScint/src/TrigScint/TrigScintFirmwareTracker.cxx @@ -35,15 +35,25 @@ void TrigScintFirmwareTracker::configure(framework::config::Parameters &ps) { } void TrigScintFirmwareTracker::produce(framework::Event &event) { + //This processor takes in TS digis and outputs a track collection. It does so using clusterproducer_sw + //and trackproducer_hw, which are validated pieces of HLS code (though clusterproducer_sw has had its + //instances of pragmas excluded. I will comment on how clusterproducer and trackproducer work more + //thouroughly in them respectively, but generally the clusterproducer makes only two hit clusters (as + //ready that was all that was made from the original sw) and does so by making a digi map and running + //along channels numerically and pairing if possible. The trackproducer takes a LOOKUP array as a LUT + //and does track pattern mathcing. This depends on alignment through the A vector below. if (verbose_) { ldmx_log(debug) << "TrigScintFirmwareTracker: produce() starts! Event number: " << event.getEventHeader().getEventNumber(); } - //I AM FILLING IN THE TRACKING LUT FOR LATER USE ap_int<12> A[3]={0,0,0}; ap_int<12> LOOKUP[NCENT][COMBO][2]; + + //This line fills in the LOOKUP table used for patter matching latter. The array takes in as its first + //argument the centroid of a first pad cluster, then the next two take on which track pattern (of ~9) + //we are matching to and the last if we are matching to a cluster with two hits for(int i = 0; i(digis1_collection_, passName_)}; const auto digis3_{ event.getCollection(digis2_collection_, passName_)}; const auto digis2_{ event.getCollection(digis3_collection_, passName_)}; - + + if (verbose_) { + ldmx_log(debug) << "Got digi collection " << digis1_collection_ << "_" + << passName_ << " with " << digis1_.size() << " entries "; + } + + //The next collection of things fill in the firmware hit objects from reading in the + //digi collections the necessary information. The firmware hit objects only keep + //bID,mID,Time, and PE count. int occupied[NCHAN]; for(int i = 0; i0){ @@ -230,10 +248,15 @@ void TrigScintFirmwareTracker::produce(framework::Event &event) { } event.add(output_collection_, tracks_); tracks_.resize(0); + /*delete Point1; + delete Point2; + delete Point3;*/ return; } ldmx::TrigScintTrack TrigScintFirmwareTracker::makeTrack(Track outTrk) { + //This takes a firmware track object and reverts it into an ldmx track object, unfortunately only + //retaining that information of the track that is retained in the firmware track. ldmx::TrigScintTrack tr; float pe = outTrk.Pad1.Seed.Amp+outTrk.Pad1.Sec.Amp; pe += outTrk.Pad2.Seed.Amp+outTrk.Pad2.Sec.Amp; diff --git a/TrigScint/src/TrigScint/clusterproducer_sw.cxx b/TrigScint/src/TrigScint/clusterproducer_sw.cxx index e54f70897..e94011fc1 100755 --- a/TrigScint/src/TrigScint/clusterproducer_sw.cxx +++ b/TrigScint/src/TrigScint/clusterproducer_sw.cxx @@ -12,6 +12,10 @@ Cluster* clusterproducer_sw(Hit inHit[NHITS]){ Cluster* outClus = new Cluster[NCLUS]; + for(int i = 0;i2*NCHAN){continue;} for(int I = 0;I=0)){ if(outTrk[i-1].resid<=outTrk[i].resid){clearTrack(outTrk[i]);}else{clearTrack(outTrk[i-1]);} @@ -54,9 +65,7 @@ void trackproducer_hw(Cluster Pad1[NTRK],Cluster Pad2[NCLUS],Cluster Pad3[NCLUS] if(outTrk[i-1].resid<=outTrk[i+1].resid){clearTrack(outTrk[i+1]);}else{clearTrack(outTrk[i-1]);} } } - //GETS RID OF SHARED CLUSTERS IN THE THIRD PAD for(int i = 1;i=0)){ if(outTrk[i-1].resid<=outTrk[i].resid){clearTrack(outTrk[i]);}else{clearTrack(outTrk[i-1]);} } From 551b2fb4d84af66b1001ea2e7fd75adafebfcf34 Mon Sep 17 00:00:00 2001 From: rodwyer100 Date: Tue, 17 Sep 2024 09:11:25 -0700 Subject: [PATCH 11/19] Committed smart pointer (for real this time). --- TrigScint/include/TrigScint/clusterproducer.h | 2 +- TrigScint/src/TrigScint/TrigScintFirmwareTracker.cxx | 10 ++++------ TrigScint/src/TrigScint/clusterproducer_sw.cxx | 4 ++-- 3 files changed, 7 insertions(+), 9 deletions(-) diff --git a/TrigScint/include/TrigScint/clusterproducer.h b/TrigScint/include/TrigScint/clusterproducer.h index 75333d8a1..7b1831664 100755 --- a/TrigScint/include/TrigScint/clusterproducer.h +++ b/TrigScint/include/TrigScint/clusterproducer.h @@ -6,7 +6,7 @@ void copyHit1(Hit One, Hit Two); void copyHit2(Hit One, Hit Two); void clusterproducer_ref(Hit inHit[NHITS],Cluster outClus[NCLUS]); -Cluster* clusterproducer_sw(Hit inHit[NHITS]); +std::unique_ptr clusterproducer_sw(Hit inHit[NHITS]); void clusterproducer_hw(Hit inHit[NHITS],Cluster outClus[NCLUS]); #endif diff --git a/TrigScint/src/TrigScint/TrigScintFirmwareTracker.cxx b/TrigScint/src/TrigScint/TrigScintFirmwareTracker.cxx index ec427d346..11e4aa8a0 100644 --- a/TrigScint/src/TrigScint/TrigScintFirmwareTracker.cxx +++ b/TrigScint/src/TrigScint/TrigScintFirmwareTracker.cxx @@ -201,7 +201,7 @@ void TrigScintFirmwareTracker::produce(framework::Event &event) { //is messy, I had to do some post-call cleanup before looping over the clusters and putting them into Point i //which is feed into track producer int counterN=0; - Cluster* Point1=clusterproducer_sw(HPad1); + std::unique_ptr Point1=clusterproducer_sw(HPad1); int topSeed=0; for(int i = 0; i30)and(Point1[i].Seed.bID<(NCHAN+1))and(Point1[i].Seed.bID>=0)and(Point1[i].Sec.Amp<450)and(counterN Point2=clusterproducer_sw(HPad2); topSeed=0; for(int i = 0; i30)and(Point2[i].Seed.bID<(NCHAN+1))and(Point2[i].Seed.bID>=0)and(Point2[i].Sec.Amp<450)){ @@ -224,7 +224,7 @@ void TrigScintFirmwareTracker::produce(framework::Event &event) { } } } - Cluster* Point3=clusterproducer_sw(HPad3); + std::unique_ptr Point3=clusterproducer_sw(HPad3); topSeed=0; for(int i = 0; i30)and(Point3[i].Seed.bID<(NCHAN+1))and(Point3[i].Seed.bID>=0)and(Point3[i].Sec.Amp<450)){ @@ -248,9 +248,7 @@ void TrigScintFirmwareTracker::produce(framework::Event &event) { } event.add(output_collection_, tracks_); tracks_.resize(0); - /*delete Point1; - delete Point2; - delete Point3;*/ + return; } diff --git a/TrigScint/src/TrigScint/clusterproducer_sw.cxx b/TrigScint/src/TrigScint/clusterproducer_sw.cxx index e94011fc1..9d7093269 100755 --- a/TrigScint/src/TrigScint/clusterproducer_sw.cxx +++ b/TrigScint/src/TrigScint/clusterproducer_sw.cxx @@ -3,14 +3,14 @@ #include "TrigScint/objdef.h" #include "TrigScint/clusterproducer.h" -Cluster* clusterproducer_sw(Hit inHit[NHITS]){ +std::unique_ptr clusterproducer_sw(Hit inHit[NHITS]){ ap_int<12> SEEDTHR = 30; ap_int<12> CLUSTHR = 30; ap_int<12> mapL1[NCHAN]; - Cluster* outClus = new Cluster[NCLUS]; + std::unique_ptr outClus(new Cluster[NCLUS]); for(int i = 0;i Date: Tue, 17 Sep 2024 18:53:40 -0700 Subject: [PATCH 12/19] Cleaning firmware objects to align with capitalization scheme --- .../include/TrigScint/{ => Firmware}/clusterproducer.h | 0 TrigScint/include/TrigScint/{ => Firmware}/objdef.h | 0 TrigScint/include/TrigScint/{ => Firmware}/testutils.h | 0 TrigScint/include/TrigScint/{ => Firmware}/trackproducer.h | 0 TrigScint/include/TrigScint/TrigScintFirmwareTracker.h | 2 +- .../src/TrigScint/{ => Firmware}/clusterproducer_sw.cxx | 4 ++-- TrigScint/src/TrigScint/{ => Firmware}/trackproducer_hw.cxx | 4 ++-- TrigScint/src/TrigScint/TrigScintFirmwareTracker.cxx | 6 +++--- 8 files changed, 8 insertions(+), 8 deletions(-) rename TrigScint/include/TrigScint/{ => Firmware}/clusterproducer.h (100%) rename TrigScint/include/TrigScint/{ => Firmware}/objdef.h (100%) rename TrigScint/include/TrigScint/{ => Firmware}/testutils.h (100%) rename TrigScint/include/TrigScint/{ => Firmware}/trackproducer.h (100%) rename TrigScint/src/TrigScint/{ => Firmware}/clusterproducer_sw.cxx (95%) rename TrigScint/src/TrigScint/{ => Firmware}/trackproducer_hw.cxx (98%) diff --git a/TrigScint/include/TrigScint/clusterproducer.h b/TrigScint/include/TrigScint/Firmware/clusterproducer.h similarity index 100% rename from TrigScint/include/TrigScint/clusterproducer.h rename to TrigScint/include/TrigScint/Firmware/clusterproducer.h diff --git a/TrigScint/include/TrigScint/objdef.h b/TrigScint/include/TrigScint/Firmware/objdef.h similarity index 100% rename from TrigScint/include/TrigScint/objdef.h rename to TrigScint/include/TrigScint/Firmware/objdef.h diff --git a/TrigScint/include/TrigScint/testutils.h b/TrigScint/include/TrigScint/Firmware/testutils.h similarity index 100% rename from TrigScint/include/TrigScint/testutils.h rename to TrigScint/include/TrigScint/Firmware/testutils.h diff --git a/TrigScint/include/TrigScint/trackproducer.h b/TrigScint/include/TrigScint/Firmware/trackproducer.h similarity index 100% rename from TrigScint/include/TrigScint/trackproducer.h rename to TrigScint/include/TrigScint/Firmware/trackproducer.h diff --git a/TrigScint/include/TrigScint/TrigScintFirmwareTracker.h b/TrigScint/include/TrigScint/TrigScintFirmwareTracker.h index 70381f50a..0ff791f86 100644 --- a/TrigScint/include/TrigScint/TrigScintFirmwareTracker.h +++ b/TrigScint/include/TrigScint/TrigScintFirmwareTracker.h @@ -15,7 +15,7 @@ #include "TrigScint/TrigScintFirmwareTracker.h" #include "TrigScint/Event/TrigScintHit.h" #include "TrigScint/Event/TrigScintTrack.h" -#include "TrigScint/objdef.h" +#include "TrigScint/Firmware/objdef.h" namespace trigscint { diff --git a/TrigScint/src/TrigScint/clusterproducer_sw.cxx b/TrigScint/src/TrigScint/Firmware/clusterproducer_sw.cxx similarity index 95% rename from TrigScint/src/TrigScint/clusterproducer_sw.cxx rename to TrigScint/src/TrigScint/Firmware/clusterproducer_sw.cxx index 9d7093269..4391cb65a 100755 --- a/TrigScint/src/TrigScint/clusterproducer_sw.cxx +++ b/TrigScint/src/TrigScint/Firmware/clusterproducer_sw.cxx @@ -1,7 +1,7 @@ #include #include -#include "TrigScint/objdef.h" -#include "TrigScint/clusterproducer.h" +#include "TrigScint/Firmware/objdef.h" +#include "TrigScint/Firmware/clusterproducer.h" std::unique_ptr clusterproducer_sw(Hit inHit[NHITS]){ diff --git a/TrigScint/src/TrigScint/trackproducer_hw.cxx b/TrigScint/src/TrigScint/Firmware/trackproducer_hw.cxx similarity index 98% rename from TrigScint/src/TrigScint/trackproducer_hw.cxx rename to TrigScint/src/TrigScint/Firmware/trackproducer_hw.cxx index e20627b9f..57b6a866a 100755 --- a/TrigScint/src/TrigScint/trackproducer_hw.cxx +++ b/TrigScint/src/TrigScint/Firmware/trackproducer_hw.cxx @@ -1,7 +1,7 @@ #include #include -#include "TrigScint/objdef.h" -#include "TrigScint/trackproducer.h" +#include "TrigScint/Firmware/objdef.h" +#include "TrigScint/Firmware/trackproducer.h" void trackproducer_hw(Cluster Pad1[NTRK],Cluster Pad2[NCLUS],Cluster Pad3[NCLUS],Track outTrk[NTRK],ap_int<12> lookup[NCENT][COMBO][2]){ diff --git a/TrigScint/src/TrigScint/TrigScintFirmwareTracker.cxx b/TrigScint/src/TrigScint/TrigScintFirmwareTracker.cxx index 11e4aa8a0..8d4f1e345 100644 --- a/TrigScint/src/TrigScint/TrigScintFirmwareTracker.cxx +++ b/TrigScint/src/TrigScint/TrigScintFirmwareTracker.cxx @@ -1,8 +1,8 @@ #include "TrigScint/TrigScintFirmwareTracker.h" -#include "TrigScint/trackproducer.h" -#include "TrigScint/clusterproducer.h" -#include "TrigScint/objdef.h" +#include "TrigScint/Firmware/trackproducer.h" +#include "TrigScint/Firmware/clusterproducer.h" +#include "TrigScint/Firmware/objdef.h" #include #include From 8c29a15bd5b2e4439a677f081f1de2d72efb769d Mon Sep 17 00:00:00 2001 From: rodwyer100 Date: Tue, 17 Sep 2024 21:53:05 -0700 Subject: [PATCH 13/19] A few more things --- TrigScint/include/TrigScint/TrigScintFirmwareTracker.h | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/TrigScint/include/TrigScint/TrigScintFirmwareTracker.h b/TrigScint/include/TrigScint/TrigScintFirmwareTracker.h index 0ff791f86..ec383a290 100644 --- a/TrigScint/include/TrigScint/TrigScintFirmwareTracker.h +++ b/TrigScint/include/TrigScint/TrigScintFirmwareTracker.h @@ -1,7 +1,7 @@ /** * @file TrigScintFirmwareTracker.h - * @brief Clustering of trigger scintillator hits - * @author Lene Kristian Bryngemark, Stanford University + * @brief Tracker made to emulate and stage real firmware, emulates existing ldmx software but has LUT structure. + * @author Rory O'Dwyer, Stanford University */ #ifndef TRIGSCINT_TRIGSCINTFIRMWARETRACKER_H @@ -118,4 +118,4 @@ class TrigScintFirmwareTracker : public framework::Producer { } // namespace trigscint -#endif /* TRIGSCINT_TRIGSCINTCLUSTERPRODUCER_H */ +#endif /* TRIGSCINT_TRIGSCINTFIRMWARETRACKER_H */ From c026ec6c87830a94b9103e948cdcf9b197bd3a26 Mon Sep 17 00:00:00 2001 From: rodwyer100 Date: Tue, 17 Sep 2024 22:01:11 -0700 Subject: [PATCH 14/19] Trying to do clang fixes --- .../TrigScint/Firmware/clusterproducer.h | 6 +- TrigScint/include/TrigScint/Firmware/objdef.h | 142 +++++--- .../include/TrigScint/Firmware/testutils.h | 19 +- .../TrigScint/Firmware/trackproducer.h | 9 +- .../TrigScint/TrigScintFirmwareTracker.h | 10 +- .../TrigScint/Firmware/clusterproducer_sw.cxx | 123 ++++--- .../TrigScint/Firmware/trackproducer_hw.cxx | 225 ++++++++---- .../TrigScint/TrigScintFirmwareTracker.cxx | 323 ++++++++++-------- 8 files changed, 511 insertions(+), 346 deletions(-) diff --git a/TrigScint/include/TrigScint/Firmware/clusterproducer.h b/TrigScint/include/TrigScint/Firmware/clusterproducer.h index 7b1831664..39e5d3925 100755 --- a/TrigScint/include/TrigScint/Firmware/clusterproducer.h +++ b/TrigScint/include/TrigScint/Firmware/clusterproducer.h @@ -5,8 +5,8 @@ void copyHit1(Hit One, Hit Two); void copyHit2(Hit One, Hit Two); -void clusterproducer_ref(Hit inHit[NHITS],Cluster outClus[NCLUS]); -std::unique_ptr clusterproducer_sw(Hit inHit[NHITS]); -void clusterproducer_hw(Hit inHit[NHITS],Cluster outClus[NCLUS]); +void clusterproducer_ref(Hit inHit[NHITS], Cluster outClus[NCLUS]); +std::unique_ptr clusterproducer_sw(Hit inHit[NHITS]); +void clusterproducer_hw(Hit inHit[NHITS], Cluster outClus[NCLUS]); #endif diff --git a/TrigScint/include/TrigScint/Firmware/objdef.h b/TrigScint/include/TrigScint/Firmware/objdef.h index b9706db9a..39c80cda4 100755 --- a/TrigScint/include/TrigScint/Firmware/objdef.h +++ b/TrigScint/include/TrigScint/Firmware/objdef.h @@ -14,83 +14,121 @@ #define NDIGIS 14 #define COMBO 9 -//2*NCHAN*NTIMES are the number of bytes per event plus 4+4+4+3+1 bytes for the header +// 2*NCHAN*NTIMES are the number of bytes per event plus 4+4+4+3+1 bytes for the +// header #define NSAMPLES 6 -//NSAMPLES/8 is the number of 64 bit words +// NSAMPLES/8 is the number of 64 bit words #define NWORDS 72 struct Digi { - int mID, bID; - int adc0, adc1, adc2, adc3, adc4, adc5; - int tdc0, tdc1, tdc2, tdc3, tdc4, tdc5; + int mID, bID; + int adc0, adc1, adc2, adc3, adc4, adc5; + int tdc0, tdc1, tdc2, tdc3, tdc4, tdc5; }; -inline void clearDigi(Digi & c){ - c.mID=0;c.bID=0; - c.adc0=0;c.adc1=0;c.adc2=0;c.adc3=0;c.adc4=0;c.adc5=0; - c.tdc0=0;c.tdc1=0;c.tdc2=0;c.tdc3=0;c.tdc4=0;c.tdc5=0; +inline void clearDigi(Digi& c) { + c.mID = 0; + c.bID = 0; + c.adc0 = 0; + c.adc1 = 0; + c.adc2 = 0; + c.adc3 = 0; + c.adc4 = 0; + c.adc5 = 0; + c.tdc0 = 0; + c.tdc1 = 0; + c.tdc2 = 0; + c.tdc3 = 0; + c.tdc4 = 0; + c.tdc5 = 0; } struct Hit { - ap_int<12> mID, bID; - ap_int<12> Amp, Time; //TrigTime; + ap_int<12> mID, bID; + ap_int<12> Amp, Time; // TrigTime; }; -inline void clearHit(Hit & c){ - c.mID=0; c.bID=-1; c.Amp=0; c.Time=0; //c.TrigTime=0.0; +inline void clearHit(Hit& c) { + c.mID = 0; + c.bID = -1; + c.Amp = 0; + c.Time = 0; // c.TrigTime=0.0; } -inline void cpyHit(Hit & c1, Hit & c2){ - c1.mID=c2.mID;c1.bID=c2.bID;c1.Amp=c2.Amp;c1.Time=c2.Time; +inline void cpyHit(Hit& c1, Hit& c2) { + c1.mID = c2.mID; + c1.bID = c2.bID; + c1.Amp = c2.Amp; + c1.Time = c2.Time; } struct Cluster { - Hit Seed; Hit Sec; - ap_int<11> Cent; - //int nhits, mID, SeedID; - //float CentX, CentY, CentZ, Amp, Time, TrigTime; + Hit Seed; + Hit Sec; + ap_int<11> Cent; + // int nhits, mID, SeedID; + // float CentX, CentY, CentZ, Amp, Time, TrigTime; }; -inline void clearClus(Cluster & c){ - clearHit(c.Seed);clearHit(c.Sec);c.Cent = (ap_int<11>)(0);//clearHit(c.For); +inline void clearClus(Cluster& c) { + clearHit(c.Seed); + clearHit(c.Sec); + c.Cent = (ap_int<11>)(0); // clearHit(c.For); } -inline void calcCent(Cluster & c){ - if(c.Seed.Amp>0){ - c.Cent = (ap_int<12>)(10.*((float)(c.Seed.Amp*c.Seed.bID+c.Sec.Amp*c.Sec.bID))/((float)(c.Seed.Amp+c.Sec.Amp))); - }else{ - c.Cent=(ap_int<12>)(0); - } +inline void calcCent(Cluster& c) { + if (c.Seed.Amp > 0) { + c.Cent = (ap_int<12>)(10. * + ((float)(c.Seed.Amp * c.Seed.bID + + c.Sec.Amp * c.Sec.bID)) / + ((float)(c.Seed.Amp + c.Sec.Amp))); + } else { + c.Cent = (ap_int<12>)(0); + } } -inline void cpyCluster(Cluster & c1, Cluster & c2){ - cpyHit(c1.Seed,c2.Seed);cpyHit(c1.Sec,c2.Sec); +inline void cpyCluster(Cluster& c1, Cluster& c2) { + cpyHit(c1.Seed, c2.Seed); + cpyHit(c1.Sec, c2.Sec); } struct Track { - Cluster Pad1; Cluster Pad2; Cluster Pad3; - ap_int<12> resid; + Cluster Pad1; + Cluster Pad2; + Cluster Pad3; + ap_int<12> resid; }; -inline void clearTrack(Track & c){ - clearClus(c.Pad1);clearClus(c.Pad2);clearClus(c.Pad3); - c.resid=5000; +inline void clearTrack(Track& c) { + clearClus(c.Pad1); + clearClus(c.Pad2); + clearClus(c.Pad3); + c.resid = 5000; } -inline ap_int<12> calcTCent(Track & c){ - calcCent(c.Pad1);calcCent(c.Pad2);calcCent(c.Pad3); - float one = (float)c.Pad1.Cent; - float two = (float)c.Pad2.Cent; - float three = (float)c.Pad3.Cent; - float mean = (one+two+three)/3.0; - ap_int<12> Cent = (ap_int<10>)((int)(mean)); - return Cent; +inline ap_int<12> calcTCent(Track& c) { + calcCent(c.Pad1); + calcCent(c.Pad2); + calcCent(c.Pad3); + float one = (float)c.Pad1.Cent; + float two = (float)c.Pad2.Cent; + float three = (float)c.Pad3.Cent; + float mean = (one + two + three) / 3.0; + ap_int<12> Cent = (ap_int<10>)((int)(mean)); + return Cent; } -inline void calcResid(Track & c){ - calcCent(c.Pad1);calcCent(c.Pad2);calcCent(c.Pad3); - float one = (float)c.Pad1.Cent; - float two = (float)c.Pad2.Cent; - float three = (float)c.Pad3.Cent; - float mean = (one+two+three)/3.0; - c.resid = (ap_int<12>)((int)(((one-mean)*(one-mean)+(two-mean)*(two-mean)+(three-mean)*(three-mean))/3.0)); +inline void calcResid(Track& c) { + calcCent(c.Pad1); + calcCent(c.Pad2); + calcCent(c.Pad3); + float one = (float)c.Pad1.Cent; + float two = (float)c.Pad2.Cent; + float three = (float)c.Pad3.Cent; + float mean = (one + two + three) / 3.0; + c.resid = (ap_int<12>)((int)(((one - mean) * (one - mean) + + (two - mean) * (two - mean) + + (three - mean) * (three - mean)) / + 3.0)); } -inline void cpyTrack(Track & c1, Track & c2){ - cpyCluster(c1.Pad1,c2.Pad1);cpyCluster(c1.Pad2,c2.Pad2);cpyCluster(c1.Pad3,c2.Pad3); - c1.resid=c2.resid; +inline void cpyTrack(Track& c1, Track& c2) { + cpyCluster(c1.Pad1, c2.Pad1); + cpyCluster(c1.Pad2, c2.Pad2); + cpyCluster(c1.Pad3, c2.Pad3); + c1.resid = c2.resid; } #endif diff --git a/TrigScint/include/TrigScint/Firmware/testutils.h b/TrigScint/include/TrigScint/Firmware/testutils.h index e64a282fc..b6b1e2240 100755 --- a/TrigScint/include/TrigScint/Firmware/testutils.h +++ b/TrigScint/include/TrigScint/Firmware/testutils.h @@ -2,15 +2,20 @@ #define TESTUTILS_H #include "objdef.h" -bool compareHit(Hit Hit1, Hit Hit2){ - return ((Hit1.mID==Hit2.mID)and(Hit1.bID==Hit2.bID)and(Hit1.Amp==Hit2.Amp)and(Hit1.Time==Hit2.Time));//and(Hit1.TrigTime==Hit2.TrigTime)); +bool compareHit(Hit Hit1, Hit Hit2) { + return ((Hit1.mID == Hit2.mID) and (Hit1.bID == Hit2.bID) and + (Hit1.Amp == Hit2.Amp) and + (Hit1.Time == Hit2.Time)); // and(Hit1.TrigTime==Hit2.TrigTime)); } -bool compareClus(Cluster clus1[NHITS], Cluster clus2[NHITS]){ - for(int i = 0; i lookup[NCENT][COMBO][2]); -void trackproducer_hw(Cluster Pad1[NTRK],Cluster Pad2[NCLUS],Cluster Pad3[NCLUS],Track outTrk[NTRK], ap_int<12> lookup[NCENT][COMBO][2]); +void trackproducer_ref(Cluster Pad1[NTRK], Cluster Pad2[NCLUS], + Cluster Pad3[NCLUS], Track outTrk[NTRK], + ap_int<12> lookup[NCENT][COMBO][2]); +void trackproducer_hw(Cluster Pad1[NTRK], Cluster Pad2[NCLUS], + Cluster Pad3[NCLUS], Track outTrk[NTRK], + ap_int<12> lookup[NCENT][COMBO][2]); #endif diff --git a/TrigScint/include/TrigScint/TrigScintFirmwareTracker.h b/TrigScint/include/TrigScint/TrigScintFirmwareTracker.h index ec383a290..974ccc8ad 100644 --- a/TrigScint/include/TrigScint/TrigScintFirmwareTracker.h +++ b/TrigScint/include/TrigScint/TrigScintFirmwareTracker.h @@ -1,6 +1,7 @@ /** * @file TrigScintFirmwareTracker.h - * @brief Tracker made to emulate and stage real firmware, emulates existing ldmx software but has LUT structure. + * @brief Tracker made to emulate and stage real firmware, emulates existing + * ldmx software but has LUT structure. * @author Rory O'Dwyer, Stanford University */ @@ -12,10 +13,10 @@ #include "Framework/Event.h" #include "Framework/EventProcessor.h" //Needed to declare processor #include "Recon/Event/EventConstants.h" -#include "TrigScint/TrigScintFirmwareTracker.h" #include "TrigScint/Event/TrigScintHit.h" #include "TrigScint/Event/TrigScintTrack.h" #include "TrigScint/Firmware/objdef.h" +#include "TrigScint/TrigScintFirmwareTracker.h" namespace trigscint { @@ -38,7 +39,6 @@ class TrigScintFirmwareTracker : public framework::Producer { * add a hit at index idx to a cluster */ - private: // collection of clusters produced std::vector digis1_; @@ -49,8 +49,6 @@ class TrigScintFirmwareTracker : public framework::Producer { // collection of clusters produced std::vector digis3_; - - // min threshold for adding a hit to a cluster double minThr_{0.}; @@ -73,11 +71,9 @@ class TrigScintFirmwareTracker : public framework::Producer { std::string digis1_collection_; std::string digis2_collection_; std::string digis3_collection_; - std::vector tracks_; - // specific pass name to use for track making std::string passName_{""}; diff --git a/TrigScint/src/TrigScint/Firmware/clusterproducer_sw.cxx b/TrigScint/src/TrigScint/Firmware/clusterproducer_sw.cxx index 4391cb65a..e3d181836 100755 --- a/TrigScint/src/TrigScint/Firmware/clusterproducer_sw.cxx +++ b/TrigScint/src/TrigScint/Firmware/clusterproducer_sw.cxx @@ -1,66 +1,77 @@ #include -#include -#include "TrigScint/Firmware/objdef.h" -#include "TrigScint/Firmware/clusterproducer.h" -std::unique_ptr clusterproducer_sw(Hit inHit[NHITS]){ +#include - ap_int<12> SEEDTHR = 30; - ap_int<12> CLUSTHR = 30; +#include "TrigScint/Firmware/clusterproducer.h" +#include "TrigScint/Firmware/objdef.h" - ap_int<12> mapL1[NCHAN]; +std::unique_ptr clusterproducer_sw(Hit inHit[NHITS]) { + ap_int<12> SEEDTHR = 30; + ap_int<12> CLUSTHR = 30; - std::unique_ptr outClus(new Cluster[NCLUS]); + ap_int<12> mapL1[NCHAN]; - for(int i = 0;i outClus(new Cluster[NCLUS]); - //CLEAR THE MAP - for(int i = 0;i-1){ - mapL1[inHit[j].bID]=j; - } - } - //NOW WE JUST LOOK FOR HITS EXCEEDING SEED, IF THEY DO WE PAIR 'EM. - for(int k = 0; k-1)){ - if(inHit[mapL1[2*k]].Amp>SEEDTHR){ - clearClus(outClus[k]); - outClus[k].Seed.mID=inHit[mapL1[2*k]].mID; outClus[k].Seed.bID=inHit[mapL1[2*k]].bID; outClus[k].Seed.Amp=inHit[mapL1[2*k]].Amp; outClus[k].Seed.Time=inHit[mapL1[2*k]].Time; - if(mapL1[2*k+1]>-1){ - if(inHit[mapL1[2*k+1]].Amp>CLUSTHR){ - outClus[k].Sec.mID=inHit[mapL1[2*k+1]].mID; outClus[k].Sec.bID=inHit[mapL1[2*k+1]].bID; outClus[k].Sec.Amp=inHit[mapL1[2*k+1]].Amp; outClus[k].Sec.Time=inHit[mapL1[2*k+1]].Time; - doNextCluster=false; - //You can comment this line to turn it into Serialized - clearHit(inHit[mapL1[2*k+1]]); + for (int i = 0; i < NCLUS; ++i) { + clearClus(outClus[i]); + } - } - } - } - } - if((mapL1[2*k+1]>-1)and(doNextCluster)){ - if(inHit[mapL1[2*k+1]].Amp>SEEDTHR){ - clearClus(outClus[k]); - outClus[k].Seed.mID=inHit[mapL1[2*k+1]].mID; outClus[k].Seed.bID=inHit[mapL1[2*k+1]].bID; outClus[k].Seed.Amp=inHit[mapL1[2*k+1]].Amp; outClus[k].Seed.Time=inHit[mapL1[2*k+1]].Time; - if(k-1){ - if(inHit[mapL1[2*k+2]].Amp>CLUSTHR){ - outClus[k].Sec.mID=inHit[mapL1[2*k+2]].mID; outClus[k].Sec.bID=inHit[mapL1[2*k+2]].bID; outClus[k].Sec.Amp=inHit[mapL1[2*k+2]].Amp; outClus[k].Sec.Time=inHit[mapL1[2*k+2]].Time; - //You can comment this line to turn it into Serialized - clearHit(inHit[mapL1[2*k+2]]); - } - } - } - } - } - } + // CLEAR THE MAP + for (int i = 0; i < NCHAN; ++i) { + mapL1[i] = -1; + } + // MAP TO CHANNELS + for (int j = 0; j < NHITS; ++j) { + if (inHit[j].bID > -1) { + mapL1[inHit[j].bID] = j; + } + } + // NOW WE JUST LOOK FOR HITS EXCEEDING SEED, IF THEY DO WE PAIR 'EM. + for (int k = 0; k < NCLUS; ++k) { + bool doNextCluster = true; + if ((mapL1[2 * k] > -1)) { + if (inHit[mapL1[2 * k]].Amp > SEEDTHR) { + clearClus(outClus[k]); + outClus[k].Seed.mID = inHit[mapL1[2 * k]].mID; + outClus[k].Seed.bID = inHit[mapL1[2 * k]].bID; + outClus[k].Seed.Amp = inHit[mapL1[2 * k]].Amp; + outClus[k].Seed.Time = inHit[mapL1[2 * k]].Time; + if (mapL1[2 * k + 1] > -1) { + if (inHit[mapL1[2 * k + 1]].Amp > CLUSTHR) { + outClus[k].Sec.mID = inHit[mapL1[2 * k + 1]].mID; + outClus[k].Sec.bID = inHit[mapL1[2 * k + 1]].bID; + outClus[k].Sec.Amp = inHit[mapL1[2 * k + 1]].Amp; + outClus[k].Sec.Time = inHit[mapL1[2 * k + 1]].Time; + doNextCluster = false; + // You can comment this line to turn it into Serialized + clearHit(inHit[mapL1[2 * k + 1]]); + } + } + } + } + if ((mapL1[2 * k + 1] > -1) and (doNextCluster)) { + if (inHit[mapL1[2 * k + 1]].Amp > SEEDTHR) { + clearClus(outClus[k]); + outClus[k].Seed.mID = inHit[mapL1[2 * k + 1]].mID; + outClus[k].Seed.bID = inHit[mapL1[2 * k + 1]].bID; + outClus[k].Seed.Amp = inHit[mapL1[2 * k + 1]].Amp; + outClus[k].Seed.Time = inHit[mapL1[2 * k + 1]].Time; + if (k < NCLUS - 1) { + if (mapL1[2 * k + 2] > -1) { + if (inHit[mapL1[2 * k + 2]].Amp > CLUSTHR) { + outClus[k].Sec.mID = inHit[mapL1[2 * k + 2]].mID; + outClus[k].Sec.bID = inHit[mapL1[2 * k + 2]].bID; + outClus[k].Sec.Amp = inHit[mapL1[2 * k + 2]].Amp; + outClus[k].Sec.Time = inHit[mapL1[2 * k + 2]].Time; + // You can comment this line to turn it into Serialized + clearHit(inHit[mapL1[2 * k + 2]]); + } + } + } + } + } + } - return outClus; + return outClus; } - diff --git a/TrigScint/src/TrigScint/Firmware/trackproducer_hw.cxx b/TrigScint/src/TrigScint/Firmware/trackproducer_hw.cxx index 57b6a866a..b894a27cf 100755 --- a/TrigScint/src/TrigScint/Firmware/trackproducer_hw.cxx +++ b/TrigScint/src/TrigScint/Firmware/trackproducer_hw.cxx @@ -1,80 +1,159 @@ #include + #include + #include "TrigScint/Firmware/objdef.h" #include "TrigScint/Firmware/trackproducer.h" +void trackproducer_hw(Cluster Pad1[NTRK], Cluster Pad2[NCLUS], + Cluster Pad3[NCLUS], Track outTrk[NTRK], + ap_int<12> lookup[NCENT][COMBO][2]) { +#pragma HLS ARRAY_PARTITION variable = Pad1 dim = 0 complete +#pragma HLS ARRAY_PARTITION variable = Pad2 dim = 0 complete +#pragma HLS ARRAY_PARTITION variable = Pad3 dim = 0 complete +#pragma HLS ARRAY_PARTITION variable = outTrk dim = 0 complete +#pragma HLS ARRAY_PARTITION variable = lookup dim = 0 complete +#pragma HLS PIPELINE II = 10 + Track test; +#pragma HLS ARRAY_PARTITION variable = test complete -void trackproducer_hw(Cluster Pad1[NTRK],Cluster Pad2[NCLUS],Cluster Pad3[NCLUS],Track outTrk[NTRK],ap_int<12> lookup[NCENT][COMBO][2]){ - #pragma HLS ARRAY_PARTITION variable=Pad1 dim=0 complete - #pragma HLS ARRAY_PARTITION variable=Pad2 dim=0 complete - #pragma HLS ARRAY_PARTITION variable=Pad3 dim=0 complete - #pragma HLS ARRAY_PARTITION variable=outTrk dim=0 complete - #pragma HLS ARRAY_PARTITION variable=lookup dim=0 complete - #pragma HLS PIPELINE II=10 - Track test; - #pragma HLS ARRAY_PARTITION variable=test complete - - //This firmware module loops over first the Pad1 seeds (NTRK) and then the patterns (COMBO) - //For each seed it check 9 combinations of tracks. These combinations, which depend on alignment - //essentially consist of the clusters that have channels immediattely above or below the Pad1 - //cluster in the first layer, which you may observe from the LUT if you printed it. - //I would only need to check the pattern without all these continue statements, but the continue statements - //further reduce the pattern collection size by only applying certain patterns iff a secondary hit is there - //Thats why this looks complicated at all: the continues just include logic on whether a pattern should - //have a secondary hit. It also checks the track residual, only keeping one pattern for each pad1 cluster. - for(int i = 0;i2*NCHAN){continue;} - for(int I = 0;I0)){continue;}//Continue if Seed not Satisfied - ap_int<12> centroid = 2*Pad1[i].Seed.bID; - if(Pad1[i].Sec.Amp>0){ - centroid+=1; - } - cpyCluster(test.Pad1,Pad1[i]); - if((lookup[centroid][I][0]==-1)or(lookup[centroid][I][1]==-1)){continue;}//Pattern Empty - if(not(Pad2[lookup[centroid][I][0]/4].Seed.Amp>0)){continue;}//Continue if Seed not Satisfied - if((lookup[centroid][I][0]%4==0)and((Pad2[lookup[centroid][I][0]/4].Sec.bID>=0)or(Pad2[lookup[centroid][I][0]/4].Seed.bID%2==1))){continue;}//Continue if Sec is not Expected, and not Empty - if((lookup[centroid][I][0]%4==1)and((Pad2[lookup[centroid][I][0]/4].Sec.bID<0)or(Pad2[lookup[centroid][I][0]/4].Seed.bID%2==1))){continue;}//Continue if Sec is Expected, and Empty - if((lookup[centroid][I][0]%4==2)and((Pad2[lookup[centroid][I][0]/4].Sec.bID>=0)or(Pad2[lookup[centroid][I][0]/4].Seed.bID%2==0))){continue;}//Continue if Sec is not Expected, and not Empty - if((lookup[centroid][I][0]%4==3)and((Pad2[lookup[centroid][I][0]/4].Sec.bID<0)or(Pad2[lookup[centroid][I][0]/4].Seed.bID%2==0))){continue;}//Continue if Sec is Expected, and Empty - if(not(Pad3[lookup[centroid][I][1]/4].Seed.Amp>0)){continue;}//Continue if Seed not Satisfied - if((lookup[centroid][I][1]%4==0)and((Pad3[lookup[centroid][I][1]/4].Sec.bID>=0)or(Pad3[lookup[centroid][I][1]/4].Seed.bID%2==1))){continue;}//Continue if Sec is not Expected, and not Empty - if((lookup[centroid][I][1]%4==1)and((Pad3[lookup[centroid][I][1]/4].Sec.bID<0)or(Pad3[lookup[centroid][I][1]/4].Seed.bID%2==1))){continue;}//Continue if Sec is Expected, and Empty - if((lookup[centroid][I][1]%4==2)and((Pad3[lookup[centroid][I][1]/4].Sec.bID>=0)or(Pad3[lookup[centroid][I][1]/4].Seed.bID%2==0))){continue;}//Continue if Sec is not Expected, and not Empty - if((lookup[centroid][I][1]%4==3)and((Pad3[lookup[centroid][I][1]/4].Sec.bID<0)or(Pad3[lookup[centroid][I][1]/4].Seed.bID%2==0))){continue;}//Continue if Sec is Expected, and Empty - cpyCluster(test.Pad2,Pad2[lookup[centroid][I][0]/4]); - cpyCluster(test.Pad3,Pad3[lookup[centroid][I][1]/4]); - calcResid(test); - if(test.resid=0)){ - if(outTrk[i-1].resid<=outTrk[i].resid){clearTrack(outTrk[i]);}else{clearTrack(outTrk[i-1]);} - } - if((outTrk[i].Pad2.Seed.bID==outTrk[i+1].Pad2.Seed.bID)and(outTrk[i+1].Pad2.Seed.bID>=0)){ - if(outTrk[i+1].resid<=outTrk[i].resid){clearTrack(outTrk[i]);}else{clearTrack(outTrk[i+1]);} - } - if((outTrk[i-1].Pad2.Seed.bID==outTrk[i+1].Pad2.Seed.bID)and(outTrk[i+1].Pad2.Seed.bID>=0)){ - if(outTrk[i-1].resid<=outTrk[i+1].resid){clearTrack(outTrk[i+1]);}else{clearTrack(outTrk[i-1]);} - } - } - for(int i = 1;i=0)){ - if(outTrk[i-1].resid<=outTrk[i].resid){clearTrack(outTrk[i]);}else{clearTrack(outTrk[i-1]);} - } - if((outTrk[i].Pad3.Seed.bID==outTrk[i+1].Pad3.Seed.bID)and(outTrk[i+1].Pad3.Seed.bID>=0)){ - if(outTrk[i+1].resid<=outTrk[i].resid){clearTrack(outTrk[i]);}else{clearTrack(outTrk[i+1]);} - } - if((outTrk[i-1].Pad3.Seed.bID==outTrk[i+1].Pad3.Seed.bID)and(outTrk[i+1].Pad3.Seed.bID>=0)){ - if(outTrk[i-1].resid<=outTrk[i+1].resid){clearTrack(outTrk[i+1]);}else{clearTrack(outTrk[i-1]);} - } - } - return; + // This firmware module loops over first the Pad1 seeds (NTRK) and then the + // patterns (COMBO) For each seed it check 9 combinations of tracks. These + // combinations, which depend on alignment essentially consist of the clusters + // that have channels immediattely above or below the Pad1 cluster in the + // first layer, which you may observe from the LUT if you printed it. I would + // only need to check the pattern without all these continue statements, but + // the continue statements further reduce the pattern collection size by only + // applying certain patterns iff a secondary hit is there Thats why this looks + // complicated at all: the continues just include logic on whether a pattern + // should have a secondary hit. It also checks the track residual, only + // keeping one pattern for each pad1 cluster. + for (int i = 0; i < NTRK; i++) { + if (2 * Pad1[i].Seed.bID > 2 * NCHAN) { + continue; + } + for (int I = 0; I < COMBO; I++) { + clearTrack(test); + if (not(Pad1[i].Seed.Amp > 0)) { + continue; + } // Continue if Seed not Satisfied + ap_int<12> centroid = 2 * Pad1[i].Seed.bID; + if (Pad1[i].Sec.Amp > 0) { + centroid += 1; + } + cpyCluster(test.Pad1, Pad1[i]); + if ((lookup[centroid][I][0] == -1) or (lookup[centroid][I][1] == -1)) { + continue; + } // Pattern Empty + if (not(Pad2[lookup[centroid][I][0] / 4].Seed.Amp > 0)) { + continue; + } // Continue if Seed not Satisfied + if ((lookup[centroid][I][0] % 4 == 0) and + ((Pad2[lookup[centroid][I][0] / 4].Sec.bID >= 0) or + (Pad2[lookup[centroid][I][0] / 4].Seed.bID % 2 == 1))) { + continue; + } // Continue if Sec is not Expected, and not Empty + if ((lookup[centroid][I][0] % 4 == 1) and + ((Pad2[lookup[centroid][I][0] / 4].Sec.bID < 0) or + (Pad2[lookup[centroid][I][0] / 4].Seed.bID % 2 == 1))) { + continue; + } // Continue if Sec is Expected, and Empty + if ((lookup[centroid][I][0] % 4 == 2) and + ((Pad2[lookup[centroid][I][0] / 4].Sec.bID >= 0) or + (Pad2[lookup[centroid][I][0] / 4].Seed.bID % 2 == 0))) { + continue; + } // Continue if Sec is not Expected, and not Empty + if ((lookup[centroid][I][0] % 4 == 3) and + ((Pad2[lookup[centroid][I][0] / 4].Sec.bID < 0) or + (Pad2[lookup[centroid][I][0] / 4].Seed.bID % 2 == 0))) { + continue; + } // Continue if Sec is Expected, and Empty + if (not(Pad3[lookup[centroid][I][1] / 4].Seed.Amp > 0)) { + continue; + } // Continue if Seed not Satisfied + if ((lookup[centroid][I][1] % 4 == 0) and + ((Pad3[lookup[centroid][I][1] / 4].Sec.bID >= 0) or + (Pad3[lookup[centroid][I][1] / 4].Seed.bID % 2 == 1))) { + continue; + } // Continue if Sec is not Expected, and not Empty + if ((lookup[centroid][I][1] % 4 == 1) and + ((Pad3[lookup[centroid][I][1] / 4].Sec.bID < 0) or + (Pad3[lookup[centroid][I][1] / 4].Seed.bID % 2 == 1))) { + continue; + } // Continue if Sec is Expected, and Empty + if ((lookup[centroid][I][1] % 4 == 2) and + ((Pad3[lookup[centroid][I][1] / 4].Sec.bID >= 0) or + (Pad3[lookup[centroid][I][1] / 4].Seed.bID % 2 == 0))) { + continue; + } // Continue if Sec is not Expected, and not Empty + if ((lookup[centroid][I][1] % 4 == 3) and + ((Pad3[lookup[centroid][I][1] / 4].Sec.bID < 0) or + (Pad3[lookup[centroid][I][1] / 4].Seed.bID % 2 == 0))) { + continue; + } // Continue if Sec is Expected, and Empty + cpyCluster(test.Pad2, Pad2[lookup[centroid][I][0] / 4]); + cpyCluster(test.Pad3, Pad3[lookup[centroid][I][1] / 4]); + calcResid(test); + if (test.resid < outTrk[i].resid) { + cpyTrack(outTrk[i], test); + } + } + } + // While we ultimately envision having the firmware do duplicate track removal + // in the other two layers in a separate firmware module, they are done here + // so as to not have track over counting and to validate the processor. Thats + // what occurs here below. + for (int i = 1; i < NTRK - 1; i++) { + if ((outTrk[i - 1].Pad2.Seed.bID == outTrk[i].Pad2.Seed.bID) and + (outTrk[i].Pad2.Seed.bID >= 0)) { + if (outTrk[i - 1].resid <= outTrk[i].resid) { + clearTrack(outTrk[i]); + } else { + clearTrack(outTrk[i - 1]); + } + } + if ((outTrk[i].Pad2.Seed.bID == outTrk[i + 1].Pad2.Seed.bID) and + (outTrk[i + 1].Pad2.Seed.bID >= 0)) { + if (outTrk[i + 1].resid <= outTrk[i].resid) { + clearTrack(outTrk[i]); + } else { + clearTrack(outTrk[i + 1]); + } + } + if ((outTrk[i - 1].Pad2.Seed.bID == outTrk[i + 1].Pad2.Seed.bID) and + (outTrk[i + 1].Pad2.Seed.bID >= 0)) { + if (outTrk[i - 1].resid <= outTrk[i + 1].resid) { + clearTrack(outTrk[i + 1]); + } else { + clearTrack(outTrk[i - 1]); + } + } + } + for (int i = 1; i < NTRK - 1; i++) { + if ((outTrk[i - 1].Pad3.Seed.bID == outTrk[i].Pad3.Seed.bID) and + (outTrk[i].Pad3.Seed.bID >= 0)) { + if (outTrk[i - 1].resid <= outTrk[i].resid) { + clearTrack(outTrk[i]); + } else { + clearTrack(outTrk[i - 1]); + } + } + if ((outTrk[i].Pad3.Seed.bID == outTrk[i + 1].Pad3.Seed.bID) and + (outTrk[i + 1].Pad3.Seed.bID >= 0)) { + if (outTrk[i + 1].resid <= outTrk[i].resid) { + clearTrack(outTrk[i]); + } else { + clearTrack(outTrk[i + 1]); + } + } + if ((outTrk[i - 1].Pad3.Seed.bID == outTrk[i + 1].Pad3.Seed.bID) and + (outTrk[i + 1].Pad3.Seed.bID >= 0)) { + if (outTrk[i - 1].resid <= outTrk[i + 1].resid) { + clearTrack(outTrk[i + 1]); + } else { + clearTrack(outTrk[i - 1]); + } + } + } + return; } diff --git a/TrigScint/src/TrigScint/TrigScintFirmwareTracker.cxx b/TrigScint/src/TrigScint/TrigScintFirmwareTracker.cxx index 8d4f1e345..4d167ce98 100644 --- a/TrigScint/src/TrigScint/TrigScintFirmwareTracker.cxx +++ b/TrigScint/src/TrigScint/TrigScintFirmwareTracker.cxx @@ -1,10 +1,12 @@ #include "TrigScint/TrigScintFirmwareTracker.h" -#include "TrigScint/Firmware/trackproducer.h" + +#include +#include + #include "TrigScint/Firmware/clusterproducer.h" #include "TrigScint/Firmware/objdef.h" -#include -#include +#include "TrigScint/Firmware/trackproducer.h" namespace trigscint { @@ -35,44 +37,51 @@ void TrigScintFirmwareTracker::configure(framework::config::Parameters &ps) { } void TrigScintFirmwareTracker::produce(framework::Event &event) { - //This processor takes in TS digis and outputs a track collection. It does so using clusterproducer_sw - //and trackproducer_hw, which are validated pieces of HLS code (though clusterproducer_sw has had its - //instances of pragmas excluded. I will comment on how clusterproducer and trackproducer work more - //thouroughly in them respectively, but generally the clusterproducer makes only two hit clusters (as - //ready that was all that was made from the original sw) and does so by making a digi map and running - //along channels numerically and pairing if possible. The trackproducer takes a LOOKUP array as a LUT - //and does track pattern mathcing. This depends on alignment through the A vector below. + // This processor takes in TS digis and outputs a track collection. It does so + // using clusterproducer_sw and trackproducer_hw, which are validated pieces + // of HLS code (though clusterproducer_sw has had its instances of pragmas + // excluded. I will comment on how clusterproducer and trackproducer work more + // thouroughly in them respectively, but generally the clusterproducer makes + // only two hit clusters (as ready that was all that was made from the + // original sw) and does so by making a digi map and running along channels + // numerically and pairing if possible. The trackproducer takes a LOOKUP array + // as a LUT and does track pattern mathcing. This depends on alignment through + // the A vector below. if (verbose_) { ldmx_log(debug) << "TrigScintFirmwareTracker: produce() starts! Event number: " << event.getEventHeader().getEventNumber(); } - ap_int<12> A[3]={0,0,0}; + ap_int<12> A[3] = {0, 0, 0}; ap_int<12> LOOKUP[NCENT][COMBO][2]; - - //This line fills in the LOOKUP table used for patter matching latter. The array takes in as its first - //argument the centroid of a first pad cluster, then the next two take on which track pattern (of ~9) - //we are matching to and the last if we are matching to a cluster with two hits - for(int i = 0; i=0)and(LOOKUP[i][j][1]>=0)and(LOOKUP[i][j][0]= 0) and (LOOKUP[i][j][1] >= 0) and + (LOOKUP[i][j][0] < NCENT) and (LOOKUP[i][j][1] < NCENT))) { + LOOKUP[i][j][0] = -1; + LOOKUP[i][j][1] = -1; } } } - //Here we instantiate arrays necessary to do the rest of it. + // Here we instantiate arrays necessary to do the rest of it. Hit HPad1[NHITS]; Hit HPad2[NHITS]; Hit HPad3[NHITS]; @@ -81,167 +90,190 @@ void TrigScintFirmwareTracker::produce(framework::Event &event) { Cluster Pad2[NCLUS]; Cluster Pad3[NCLUS]; Track outTrk[NTRK]; - - for(int j = 0; j(digis1_collection_, passName_)}; + event.getCollection(digis1_collection_, passName_)}; const auto digis3_{ - event.getCollection(digis2_collection_, passName_)}; + event.getCollection(digis2_collection_, passName_)}; const auto digis2_{ - event.getCollection(digis3_collection_, passName_)}; - + event.getCollection(digis3_collection_, passName_)}; + if (verbose_) { ldmx_log(debug) << "Got digi collection " << digis1_collection_ << "_" << passName_ << " with " << digis1_.size() << " entries "; } - - //The next collection of things fill in the firmware hit objects from reading in the - //digi collections the necessary information. The firmware hit objects only keep - //bID,mID,Time, and PE count. + + // The next collection of things fill in the firmware hit objects from reading + // in the digi collections the necessary information. The firmware hit objects + // only keep bID,mID,Time, and PE count. int occupied[NCHAN]; - for(int i = 0; i - minThr_)and(digi.getBarID()<=NCHAN)and(digi.getBarID()>=0)){ + if ((digi.getPE() > minThr_) and (digi.getBarID() <= NCHAN) and + (digi.getBarID() >= 0)) { ap_int<12> bID = (ap_int<12>)(digi.getBarID()); ap_int<12> Amp = (ap_int<12>)(digi.getPE()); - int index=count; - if(occupied[(int)digi.getBarID()]>=0){ - if(HPad1[(int)occupied[(int)digi.getBarID()]].Amp)(digi.getBarID()); - HPad1[(int)occupied[(int)digi.getBarID()]].mID=(ap_int<12>)(digi.getModuleID()); - HPad1[(int)occupied[(int)digi.getBarID()]].Amp=(ap_int<12>)(digi.getPE()); - HPad1[(int)occupied[(int)digi.getBarID()]].Time=(ap_int<12>)(digi.getTime()); - } - }else{ - HPad1[count].bID=(ap_int<12>)(digi.getBarID()); - HPad1[count].mID=(ap_int<12>)(digi.getModuleID()); - HPad1[count].Amp=(ap_int<12>)(digi.getPE()); - HPad1[count].Time=(ap_int<12>)(digi.getTime()); - occupied[digi.getBarID()]=count; - count++; + int index = count; + if (occupied[(int)digi.getBarID()] >= 0) { + if (HPad1[(int)occupied[(int)digi.getBarID()]].Amp < digi.getPE()) { + HPad1[(int)occupied[(int)digi.getBarID()]].bID = + (ap_int<12>)(digi.getBarID()); + HPad1[(int)occupied[(int)digi.getBarID()]].mID = + (ap_int<12>)(digi.getModuleID()); + HPad1[(int)occupied[(int)digi.getBarID()]].Amp = + (ap_int<12>)(digi.getPE()); + HPad1[(int)occupied[(int)digi.getBarID()]].Time = + (ap_int<12>)(digi.getTime()); + } + } else { + HPad1[count].bID = (ap_int<12>)(digi.getBarID()); + HPad1[count].mID = (ap_int<12>)(digi.getModuleID()); + HPad1[count].Amp = (ap_int<12>)(digi.getPE()); + HPad1[count].Time = (ap_int<12>)(digi.getTime()); + occupied[digi.getBarID()] = count; + count++; } } } - for(int i = 0; i - minThr_)and(digi.getBarID()<=NCHAN)and(digi.getBarID()>=0)){ + if ((digi.getPE() > minThr_) and (digi.getBarID() <= NCHAN) and + (digi.getBarID() >= 0)) { ap_int<12> bID = (ap_int<12>)(digi.getBarID()); ap_int<12> Amp = (ap_int<12>)(digi.getPE()); - int index=count; - if(occupied[(int)digi.getBarID()]>=0){ - if(HPad2[(int)occupied[(int)digi.getBarID()]].Amp)(digi.getBarID()); - HPad2[(int)occupied[(int)digi.getBarID()]].mID=(ap_int<12>)(digi.getModuleID()); - HPad2[(int)occupied[(int)digi.getBarID()]].Amp=(ap_int<12>)(digi.getPE()); - HPad2[(int)occupied[(int)digi.getBarID()]].Time=(ap_int<12>)(digi.getTime()); - } - }else{ - HPad2[count].bID=(ap_int<12>)(digi.getBarID()); - HPad2[count].mID=(ap_int<12>)(digi.getModuleID()); - HPad2[count].Amp=(ap_int<12>)(digi.getPE()); - HPad2[count].Time=(ap_int<12>)(digi.getTime()); - occupied[digi.getBarID()]=count; - count++; + int index = count; + if (occupied[(int)digi.getBarID()] >= 0) { + if (HPad2[(int)occupied[(int)digi.getBarID()]].Amp < digi.getPE()) { + HPad2[(int)occupied[(int)digi.getBarID()]].bID = + (ap_int<12>)(digi.getBarID()); + HPad2[(int)occupied[(int)digi.getBarID()]].mID = + (ap_int<12>)(digi.getModuleID()); + HPad2[(int)occupied[(int)digi.getBarID()]].Amp = + (ap_int<12>)(digi.getPE()); + HPad2[(int)occupied[(int)digi.getBarID()]].Time = + (ap_int<12>)(digi.getTime()); + } + } else { + HPad2[count].bID = (ap_int<12>)(digi.getBarID()); + HPad2[count].mID = (ap_int<12>)(digi.getModuleID()); + HPad2[count].Amp = (ap_int<12>)(digi.getPE()); + HPad2[count].Time = (ap_int<12>)(digi.getTime()); + occupied[digi.getBarID()] = count; + count++; } } } - for(int i = 0; i - minThr_)and(digi.getBarID()<=NCHAN)and(digi.getBarID()>=0)){ + if ((digi.getPE() > minThr_) and (digi.getBarID() <= NCHAN) and + (digi.getBarID() >= 0)) { ap_int<12> bID = (ap_int<12>)(digi.getBarID()); ap_int<12> Amp = (ap_int<12>)(digi.getPE()); - int index=count; - if(occupied[(int)digi.getBarID()]>=0){ - if(HPad3[(int)occupied[(int)digi.getBarID()]].Amp)(digi.getBarID()); - HPad3[(int)occupied[(int)digi.getBarID()]].mID=(ap_int<12>)(digi.getModuleID()); - HPad3[(int)occupied[(int)digi.getBarID()]].Amp=(ap_int<12>)(digi.getPE()); - HPad3[(int)occupied[(int)digi.getBarID()]].Time=(ap_int<12>)(digi.getTime()); - } - }else{ - HPad3[count].bID=(ap_int<12>)(digi.getBarID()); - HPad3[count].mID=(ap_int<12>)(digi.getModuleID()); - HPad3[count].Amp=(ap_int<12>)(digi.getPE()); - HPad3[count].Time=(ap_int<12>)(digi.getTime()); - occupied[digi.getBarID()]=count; - count++; + int index = count; + if (occupied[(int)digi.getBarID()] >= 0) { + if (HPad3[(int)occupied[(int)digi.getBarID()]].Amp < digi.getPE()) { + HPad3[(int)occupied[(int)digi.getBarID()]].bID = + (ap_int<12>)(digi.getBarID()); + HPad3[(int)occupied[(int)digi.getBarID()]].mID = + (ap_int<12>)(digi.getModuleID()); + HPad3[(int)occupied[(int)digi.getBarID()]].Amp = + (ap_int<12>)(digi.getPE()); + HPad3[(int)occupied[(int)digi.getBarID()]].Time = + (ap_int<12>)(digi.getTime()); + } + } else { + HPad3[count].bID = (ap_int<12>)(digi.getBarID()); + HPad3[count].mID = (ap_int<12>)(digi.getModuleID()); + HPad3[count].Amp = (ap_int<12>)(digi.getPE()); + HPad3[count].Time = (ap_int<12>)(digi.getTime()); + occupied[digi.getBarID()] = count; + count++; } } } - count=0; - //These next lines here calls clusterproducer_sw(HPad1), which is just the validated firmware module. Since ap_* class - //is messy, I had to do some post-call cleanup before looping over the clusters and putting them into Point i - //which is feed into track producer - int counterN=0; - std::unique_ptr Point1=clusterproducer_sw(HPad1); - int topSeed=0; - for(int i = 0; i30)and(Point1[i].Seed.bID<(NCHAN+1))and(Point1[i].Seed.bID>=0)and(Point1[i].Sec.Amp<450)and(counterN=topSeed){ - cpyHit(Pad1[counterN].Seed,Point1[i].Seed);cpyHit(Pad1[counterN].Sec,Point1[i].Sec); - calcCent(Pad1[counterN]); - counterN++; - topSeed=Point1[i].Seed.bID; + count = 0; + // These next lines here calls clusterproducer_sw(HPad1), which is just the + // validated firmware module. Since ap_* class is messy, I had to do some + // post-call cleanup before looping over the clusters and putting them into + // Point i which is feed into track producer + int counterN = 0; + std::unique_ptr Point1 = clusterproducer_sw(HPad1); + int topSeed = 0; + for (int i = 0; i < NCLUS; i++) { + if ((Point1[i].Seed.Amp < 450) and (Point1[i].Seed.Amp > 30) and + (Point1[i].Seed.bID < (NCHAN + 1)) and (Point1[i].Seed.bID >= 0) and + (Point1[i].Sec.Amp < 450) and (counterN < NTRK)) { + if (Point1[i].Seed.bID >= topSeed) { + cpyHit(Pad1[counterN].Seed, Point1[i].Seed); + cpyHit(Pad1[counterN].Sec, Point1[i].Sec); + calcCent(Pad1[counterN]); + counterN++; + topSeed = Point1[i].Seed.bID; } } } - std::unique_ptr Point2=clusterproducer_sw(HPad2); - topSeed=0; - for(int i = 0; i30)and(Point2[i].Seed.bID<(NCHAN+1))and(Point2[i].Seed.bID>=0)and(Point2[i].Sec.Amp<450)){ - if(Point2[i].Seed.bID>=topSeed){ - cpyHit(Pad2[i].Seed,Point2[i].Seed);cpyHit(Pad2[i].Sec,Point2[i].Sec); - calcCent(Pad2[i]); - topSeed=Point2[i].Seed.bID; + std::unique_ptr Point2 = clusterproducer_sw(HPad2); + topSeed = 0; + for (int i = 0; i < NCLUS; i++) { + if ((Point2[i].Seed.Amp < 450) and (Point2[i].Seed.Amp > 30) and + (Point2[i].Seed.bID < (NCHAN + 1)) and (Point2[i].Seed.bID >= 0) and + (Point2[i].Sec.Amp < 450)) { + if (Point2[i].Seed.bID >= topSeed) { + cpyHit(Pad2[i].Seed, Point2[i].Seed); + cpyHit(Pad2[i].Sec, Point2[i].Sec); + calcCent(Pad2[i]); + topSeed = Point2[i].Seed.bID; } } } - std::unique_ptr Point3=clusterproducer_sw(HPad3); - topSeed=0; - for(int i = 0; i30)and(Point3[i].Seed.bID<(NCHAN+1))and(Point3[i].Seed.bID>=0)and(Point3[i].Sec.Amp<450)){ - if(Point3[i].Seed.bID>=topSeed){ - cpyHit(Pad3[i].Seed,Point3[i].Seed);cpyHit(Pad3[i].Sec,Point3[i].Sec); - calcCent(Pad3[i]); - topSeed=Point3[i].Seed.bID; + std::unique_ptr Point3 = clusterproducer_sw(HPad3); + topSeed = 0; + for (int i = 0; i < NCLUS; i++) { + if ((Point3[i].Seed.Amp < 450) and (Point3[i].Seed.Amp > 30) and + (Point3[i].Seed.bID < (NCHAN + 1)) and (Point3[i].Seed.bID >= 0) and + (Point3[i].Sec.Amp < 450)) { + if (Point3[i].Seed.bID >= topSeed) { + cpyHit(Pad3[i].Seed, Point3[i].Seed); + cpyHit(Pad3[i].Sec, Point3[i].Sec); + calcCent(Pad3[i]); + topSeed = Point3[i].Seed.bID; } } } - //I have stagged the digis into firmware digi objects and paired them into firmware cluster objects, so - //at this point I can insert them and the LUT into the trackproducer_hw to create the track collection - //I use makeTrack to revert the firmware track object back into a regular track object for analysis - //purposes - trackproducer_hw(Pad1,Pad2,Pad3,outTrk,LOOKUP); - for(int I = 0; I0){ + // I have stagged the digis into firmware digi objects and paired them into + // firmware cluster objects, so at this point I can insert them and the LUT + // into the trackproducer_hw to create the track collection I use makeTrack to + // revert the firmware track object back into a regular track object for + // analysis purposes + trackproducer_hw(Pad1, Pad2, Pad3, outTrk, LOOKUP); + for (int I = 0; I < NTRK; I++) { + if (outTrk[I].Pad1.Seed.Amp > 0) { ldmx::TrigScintTrack trk = makeTrack(outTrk[I]); tracks_.push_back(trk); } @@ -253,12 +285,13 @@ void TrigScintFirmwareTracker::produce(framework::Event &event) { } ldmx::TrigScintTrack TrigScintFirmwareTracker::makeTrack(Track outTrk) { - //This takes a firmware track object and reverts it into an ldmx track object, unfortunately only - //retaining that information of the track that is retained in the firmware track. + // This takes a firmware track object and reverts it into an ldmx track + // object, unfortunately only retaining that information of the track that is + // retained in the firmware track. ldmx::TrigScintTrack tr; - float pe = outTrk.Pad1.Seed.Amp+outTrk.Pad1.Sec.Amp; - pe += outTrk.Pad2.Seed.Amp+outTrk.Pad2.Sec.Amp; - pe += outTrk.Pad3.Seed.Amp+outTrk.Pad3.Sec.Amp; + float pe = outTrk.Pad1.Seed.Amp + outTrk.Pad1.Sec.Amp; + pe += outTrk.Pad2.Seed.Amp + outTrk.Pad2.Sec.Amp; + pe += outTrk.Pad3.Seed.Amp + outTrk.Pad3.Sec.Amp; tr.setCentroid(calcTCent(outTrk)); calcResid(outTrk); tr.setPE(pe); From 04f3d9bf2f328e39b5c9fc69ea45a9fcc229e9d3 Mon Sep 17 00:00:00 2001 From: rodwyer100 Date: Wed, 18 Sep 2024 08:56:48 -0700 Subject: [PATCH 15/19] Reverting to old ldmx-env.sh --- scripts/ldmx-env.sh | 2 ++ 1 file changed, 2 insertions(+) diff --git a/scripts/ldmx-env.sh b/scripts/ldmx-env.sh index f374afd79..aae7a3f4f 100644 --- a/scripts/ldmx-env.sh +++ b/scripts/ldmx-env.sh @@ -198,6 +198,7 @@ if hash docker &> /dev/null; then -e LDMX_BASE \ -e DISPLAY=${LDMX_CONTAINER_DISPLAY}:0 \ $_envs \ + -v /tmp/.X11-unix:/tmp/.X11-unix \ $_mounts \ -u $(id -u ${USER}):$(id -g ${USER}) \ $LDMX_DOCKER_TAG "$@" @@ -265,6 +266,7 @@ elif hash singularity &> /dev/null; then # Run the container __ldmx_run() { + local csv_list="/tmp/.X11-unix" for dir_to_mount in "${LDMX_CONTAINER_MOUNTS[@]}"; do csv_list="$dir_to_mount,$csv_list" done From 71fd2aa918ba8a97e67bb6c36912b6960f413747 Mon Sep 17 00:00:00 2001 From: rodwyer100 Date: Wed, 18 Sep 2024 11:04:40 -0700 Subject: [PATCH 16/19] Changing to array structure, haven't validated this yes please don't push just now. --- TrigScint/include/TrigScint/Firmware/clusterproducer.h | 2 +- TrigScint/src/TrigScint/Firmware/clusterproducer_sw.cxx | 2 +- TrigScint/src/TrigScint/TrigScintFirmwareTracker.cxx | 6 +++--- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/TrigScint/include/TrigScint/Firmware/clusterproducer.h b/TrigScint/include/TrigScint/Firmware/clusterproducer.h index 39e5d3925..76f07be51 100755 --- a/TrigScint/include/TrigScint/Firmware/clusterproducer.h +++ b/TrigScint/include/TrigScint/Firmware/clusterproducer.h @@ -6,7 +6,7 @@ void copyHit1(Hit One, Hit Two); void copyHit2(Hit One, Hit Two); void clusterproducer_ref(Hit inHit[NHITS], Cluster outClus[NCLUS]); -std::unique_ptr clusterproducer_sw(Hit inHit[NHITS]); +std::array clusterproducer_sw(Hit inHit[NHITS]); void clusterproducer_hw(Hit inHit[NHITS], Cluster outClus[NCLUS]); #endif diff --git a/TrigScint/src/TrigScint/Firmware/clusterproducer_sw.cxx b/TrigScint/src/TrigScint/Firmware/clusterproducer_sw.cxx index e3d181836..99f811bb0 100755 --- a/TrigScint/src/TrigScint/Firmware/clusterproducer_sw.cxx +++ b/TrigScint/src/TrigScint/Firmware/clusterproducer_sw.cxx @@ -5,7 +5,7 @@ #include "TrigScint/Firmware/clusterproducer.h" #include "TrigScint/Firmware/objdef.h" -std::unique_ptr clusterproducer_sw(Hit inHit[NHITS]) { +std::array clusterproducer_sw(Hit inHit[NHITS]) { ap_int<12> SEEDTHR = 30; ap_int<12> CLUSTHR = 30; diff --git a/TrigScint/src/TrigScint/TrigScintFirmwareTracker.cxx b/TrigScint/src/TrigScint/TrigScintFirmwareTracker.cxx index 4d167ce98..e0d0768a8 100644 --- a/TrigScint/src/TrigScint/TrigScintFirmwareTracker.cxx +++ b/TrigScint/src/TrigScint/TrigScintFirmwareTracker.cxx @@ -223,7 +223,7 @@ void TrigScintFirmwareTracker::produce(framework::Event &event) { // post-call cleanup before looping over the clusters and putting them into // Point i which is feed into track producer int counterN = 0; - std::unique_ptr Point1 = clusterproducer_sw(HPad1); + std::array Point1 = clusterproducer_sw(HPad1); int topSeed = 0; for (int i = 0; i < NCLUS; i++) { if ((Point1[i].Seed.Amp < 450) and (Point1[i].Seed.Amp > 30) and @@ -238,7 +238,7 @@ void TrigScintFirmwareTracker::produce(framework::Event &event) { } } } - std::unique_ptr Point2 = clusterproducer_sw(HPad2); + std::array Point2 = clusterproducer_sw(HPad2); topSeed = 0; for (int i = 0; i < NCLUS; i++) { if ((Point2[i].Seed.Amp < 450) and (Point2[i].Seed.Amp > 30) and @@ -252,7 +252,7 @@ void TrigScintFirmwareTracker::produce(framework::Event &event) { } } } - std::unique_ptr Point3 = clusterproducer_sw(HPad3); + std::array Point3 = clusterproducer_sw(HPad3); topSeed = 0; for (int i = 0; i < NCLUS; i++) { if ((Point3[i].Seed.Amp < 450) and (Point3[i].Seed.Amp > 30) and From ee51194b03e282342560c5a252cd9218d0186877 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" Date: Wed, 18 Sep 2024 18:06:57 +0000 Subject: [PATCH 17/19] Apply clang-format --- TrigScint/include/TrigScint/Firmware/clusterproducer.h | 2 +- TrigScint/src/TrigScint/Firmware/clusterproducer_sw.cxx | 2 +- TrigScint/src/TrigScint/TrigScintFirmwareTracker.cxx | 6 +++--- 3 files changed, 5 insertions(+), 5 deletions(-) mode change 100755 => 100644 TrigScint/include/TrigScint/Firmware/clusterproducer.h mode change 100755 => 100644 TrigScint/src/TrigScint/Firmware/clusterproducer_sw.cxx diff --git a/TrigScint/include/TrigScint/Firmware/clusterproducer.h b/TrigScint/include/TrigScint/Firmware/clusterproducer.h old mode 100755 new mode 100644 index 76f07be51..94fd1ccd6 --- a/TrigScint/include/TrigScint/Firmware/clusterproducer.h +++ b/TrigScint/include/TrigScint/Firmware/clusterproducer.h @@ -6,7 +6,7 @@ void copyHit1(Hit One, Hit Two); void copyHit2(Hit One, Hit Two); void clusterproducer_ref(Hit inHit[NHITS], Cluster outClus[NCLUS]); -std::array clusterproducer_sw(Hit inHit[NHITS]); +std::array clusterproducer_sw(Hit inHit[NHITS]); void clusterproducer_hw(Hit inHit[NHITS], Cluster outClus[NCLUS]); #endif diff --git a/TrigScint/src/TrigScint/Firmware/clusterproducer_sw.cxx b/TrigScint/src/TrigScint/Firmware/clusterproducer_sw.cxx old mode 100755 new mode 100644 index 99f811bb0..516d86fa2 --- a/TrigScint/src/TrigScint/Firmware/clusterproducer_sw.cxx +++ b/TrigScint/src/TrigScint/Firmware/clusterproducer_sw.cxx @@ -5,7 +5,7 @@ #include "TrigScint/Firmware/clusterproducer.h" #include "TrigScint/Firmware/objdef.h" -std::array clusterproducer_sw(Hit inHit[NHITS]) { +std::array clusterproducer_sw(Hit inHit[NHITS]) { ap_int<12> SEEDTHR = 30; ap_int<12> CLUSTHR = 30; diff --git a/TrigScint/src/TrigScint/TrigScintFirmwareTracker.cxx b/TrigScint/src/TrigScint/TrigScintFirmwareTracker.cxx index e0d0768a8..8d25e308c 100644 --- a/TrigScint/src/TrigScint/TrigScintFirmwareTracker.cxx +++ b/TrigScint/src/TrigScint/TrigScintFirmwareTracker.cxx @@ -223,7 +223,7 @@ void TrigScintFirmwareTracker::produce(framework::Event &event) { // post-call cleanup before looping over the clusters and putting them into // Point i which is feed into track producer int counterN = 0; - std::array Point1 = clusterproducer_sw(HPad1); + std::array Point1 = clusterproducer_sw(HPad1); int topSeed = 0; for (int i = 0; i < NCLUS; i++) { if ((Point1[i].Seed.Amp < 450) and (Point1[i].Seed.Amp > 30) and @@ -238,7 +238,7 @@ void TrigScintFirmwareTracker::produce(framework::Event &event) { } } } - std::array Point2 = clusterproducer_sw(HPad2); + std::array Point2 = clusterproducer_sw(HPad2); topSeed = 0; for (int i = 0; i < NCLUS; i++) { if ((Point2[i].Seed.Amp < 450) and (Point2[i].Seed.Amp > 30) and @@ -252,7 +252,7 @@ void TrigScintFirmwareTracker::produce(framework::Event &event) { } } } - std::array Point3 = clusterproducer_sw(HPad3); + std::array Point3 = clusterproducer_sw(HPad3); topSeed = 0; for (int i = 0; i < NCLUS; i++) { if ((Point3[i].Seed.Amp < 450) and (Point3[i].Seed.Amp > 30) and From 892b19f4aa828c83ba5aaac14968c74b24c042e5 Mon Sep 17 00:00:00 2001 From: rodwyer100 Date: Thu, 19 Sep 2024 15:04:08 -0700 Subject: [PATCH 18/19] yay! all done --- TrigScint/CMakeLists.txt | 4 +++- TrigScint/exampleConfigs/firmwareEx.py | 2 +- TrigScint/src/TrigScint/Firmware/clusterproducer_sw.cxx | 2 +- TrigScint/src/TrigScint/TrigScintFirmwareTracker.cxx | 5 +++++ 4 files changed, 10 insertions(+), 3 deletions(-) diff --git a/TrigScint/CMakeLists.txt b/TrigScint/CMakeLists.txt index 33c12f323..6344dccc2 100644 --- a/TrigScint/CMakeLists.txt +++ b/TrigScint/CMakeLists.txt @@ -40,10 +40,12 @@ if(BUILD_EVENT_ONLY) endif() +setup_library(module TrigScint name Firmware) +target_include_directories(TrigScint_Firmware PUBLIC ../Trigger/HLS_arbitrary_Precision_Types/include) setup_library(module TrigScint dependencies Framework::Framework Recon::Event DetDescr::DetDescr Tools::Tools SimCore::Event + Tools::Tools SimCore::Event TrigScint::Firmware ) -target_include_directories(TrigScint PUBLIC ../Trigger/HLS_arbitrary_Precision_Types/include) setup_python(package_name LDMX/TrigScint) diff --git a/TrigScint/exampleConfigs/firmwareEx.py b/TrigScint/exampleConfigs/firmwareEx.py index c8e41ab18..fc9d4a93d 100644 --- a/TrigScint/exampleConfigs/firmwareEx.py +++ b/TrigScint/exampleConfigs/firmwareEx.py @@ -162,7 +162,7 @@ logEvents = p.maxEvents p.logFrequency = int( p.maxEvents/logEvents ) -json.dumps(p.parameterDump(), indent=2) +#json.dumps(p.parameterDump(), indent=2) with open('parameterDump.json', 'w') as outfile: json.dump(p.parameterDump(), outfile, indent=4) diff --git a/TrigScint/src/TrigScint/Firmware/clusterproducer_sw.cxx b/TrigScint/src/TrigScint/Firmware/clusterproducer_sw.cxx index 516d86fa2..495c63ef3 100644 --- a/TrigScint/src/TrigScint/Firmware/clusterproducer_sw.cxx +++ b/TrigScint/src/TrigScint/Firmware/clusterproducer_sw.cxx @@ -11,7 +11,7 @@ std::array clusterproducer_sw(Hit inHit[NHITS]) { ap_int<12> mapL1[NCHAN]; - std::unique_ptr outClus(new Cluster[NCLUS]); + std::array outClus; for (int i = 0; i < NCLUS; ++i) { clearClus(outClus[i]); diff --git a/TrigScint/src/TrigScint/TrigScintFirmwareTracker.cxx b/TrigScint/src/TrigScint/TrigScintFirmwareTracker.cxx index 8d25e308c..3f40dd604 100644 --- a/TrigScint/src/TrigScint/TrigScintFirmwareTracker.cxx +++ b/TrigScint/src/TrigScint/TrigScintFirmwareTracker.cxx @@ -271,6 +271,11 @@ void TrigScintFirmwareTracker::produce(framework::Event &event) { // into the trackproducer_hw to create the track collection I use makeTrack to // revert the firmware track object back into a regular track object for // analysis purposes + // + // NOTE: Pad1 has NTRK instead of NCLUS clusters for a reason: the firmware cannot + // facilitate NCLUS many tracks within its alloted bandwidth , we have to put a cut + // on them which is facilitated by a cut on the number of clusters in Pad1. Do + // not change this. trackproducer_hw(Pad1, Pad2, Pad3, outTrk, LOOKUP); for (int I = 0; I < NTRK; I++) { if (outTrk[I].Pad1.Seed.Amp > 0) { From 5598eaa3a4129dda761cf8375b1e948cdc76a6d3 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" Date: Thu, 19 Sep 2024 22:09:55 +0000 Subject: [PATCH 19/19] Apply clang-format --- TrigScint/src/TrigScint/Firmware/clusterproducer_sw.cxx | 2 +- TrigScint/src/TrigScint/TrigScintFirmwareTracker.cxx | 8 ++++---- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/TrigScint/src/TrigScint/Firmware/clusterproducer_sw.cxx b/TrigScint/src/TrigScint/Firmware/clusterproducer_sw.cxx index 495c63ef3..140011d93 100644 --- a/TrigScint/src/TrigScint/Firmware/clusterproducer_sw.cxx +++ b/TrigScint/src/TrigScint/Firmware/clusterproducer_sw.cxx @@ -11,7 +11,7 @@ std::array clusterproducer_sw(Hit inHit[NHITS]) { ap_int<12> mapL1[NCHAN]; - std::array outClus; + std::array outClus; for (int i = 0; i < NCLUS; ++i) { clearClus(outClus[i]); diff --git a/TrigScint/src/TrigScint/TrigScintFirmwareTracker.cxx b/TrigScint/src/TrigScint/TrigScintFirmwareTracker.cxx index 3f40dd604..6af83a407 100644 --- a/TrigScint/src/TrigScint/TrigScintFirmwareTracker.cxx +++ b/TrigScint/src/TrigScint/TrigScintFirmwareTracker.cxx @@ -272,10 +272,10 @@ void TrigScintFirmwareTracker::produce(framework::Event &event) { // revert the firmware track object back into a regular track object for // analysis purposes // - // NOTE: Pad1 has NTRK instead of NCLUS clusters for a reason: the firmware cannot - // facilitate NCLUS many tracks within its alloted bandwidth , we have to put a cut - // on them which is facilitated by a cut on the number of clusters in Pad1. Do - // not change this. + // NOTE: Pad1 has NTRK instead of NCLUS clusters for a reason: the firmware + // cannot facilitate NCLUS many tracks within its alloted bandwidth , we have + // to put a cut on them which is facilitated by a cut on the number of + // clusters in Pad1. Do not change this. trackproducer_hw(Pad1, Pad2, Pad3, outTrk, LOOKUP); for (int I = 0; I < NTRK; I++) { if (outTrk[I].Pad1.Seed.Amp > 0) {