From 5615d8d3b89f1b11d90cc3225a2703d7e2f3e8e9 Mon Sep 17 00:00:00 2001 From: mmrahorovic Date: Mon, 31 Jul 2023 12:25:17 +0100 Subject: [PATCH 1/5] [custom op]: set output datatype MVAU given no activation function --- src/finn/custom_op/fpgadataflow/matrixvectoractivation.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/finn/custom_op/fpgadataflow/matrixvectoractivation.py b/src/finn/custom_op/fpgadataflow/matrixvectoractivation.py index 204a41e21c..b125745708 100644 --- a/src/finn/custom_op/fpgadataflow/matrixvectoractivation.py +++ b/src/finn/custom_op/fpgadataflow/matrixvectoractivation.py @@ -664,6 +664,8 @@ def minimize_accumulator_width(self, model): # for no-activation nodes, output dt = acc dt self.set_nodeattr("outputDataType", adt.name) self.set_nodeattr("accDataType", adt.name) + if self.get_nodeattr("noActivation"): + self.set_nodeattr("outputDataType", adt.name) return DataType[self.get_nodeattr("accDataType")] def minimize_weight_bit_width(self, model): From 153c2d4e8f15bfab81d6dca4261fee72739419b8 Mon Sep 17 00:00:00 2001 From: mmrahorovic Date: Mon, 31 Jul 2023 12:25:43 +0100 Subject: [PATCH 2/5] [custom op]: update tensor datatype for consistency --- src/finn/custom_op/fpgadataflow/thresholding_batch.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/finn/custom_op/fpgadataflow/thresholding_batch.py b/src/finn/custom_op/fpgadataflow/thresholding_batch.py index 3bcc5c05cf..72ee2f7af6 100644 --- a/src/finn/custom_op/fpgadataflow/thresholding_batch.py +++ b/src/finn/custom_op/fpgadataflow/thresholding_batch.py @@ -211,6 +211,8 @@ def minimize_accumulator_width(self, model): threshold_tensor ).all(), "Thresholds can't be expressed with type %s" % str(tdt) self.set_nodeattr("weightDataType", tdt.name) + # Update QONNX DataType of tensor for consistency + model.set_tensor_datatype(self.onnx_node.input[1], tdt) return DataType[self.get_nodeattr("weightDataType")] def get_instream_width(self, ind=0): From f367a5aa3f2fc1bafe17ae5982057830964dffc0 Mon Sep 17 00:00:00 2001 From: mmrahorovic Date: Mon, 31 Jul 2023 12:26:32 +0100 Subject: [PATCH 3/5] [minimize acc width]: apply InferDataTypes to propagate changes in each loop iteration --- .../fpgadataflow/minimize_accumulator_width.py | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/src/finn/transformation/fpgadataflow/minimize_accumulator_width.py b/src/finn/transformation/fpgadataflow/minimize_accumulator_width.py index bc020ca428..8d04d5b817 100644 --- a/src/finn/transformation/fpgadataflow/minimize_accumulator_width.py +++ b/src/finn/transformation/fpgadataflow/minimize_accumulator_width.py @@ -28,6 +28,7 @@ from qonnx.custom_op.registry import getCustomOp from qonnx.transformation.base import Transformation +from qonnx.transformation.infer_datatypes import InferDataTypes from finn.util.fpgadataflow import is_fpgadataflow_node @@ -41,9 +42,15 @@ def __init__(self): super().__init__() def apply(self, model): - for node in model.graph.node: + for node_id in range(len(model.graph.node)): + # Since InferDataTypes potentially changes node attributes in each loop iterations, + # the for-loop cannot loop over a list of a snapshot of the graph's node protos + node = model.graph.node[node_id] if is_fpgadataflow_node(node) is True: inst = getCustomOp(node) if hasattr(inst, "minimize_accumulator_width"): inst.minimize_accumulator_width(model) + # Since this transformation is applied iteratively, we have to ensure that + # we propagate the new datatype to other layers + model = model.transform(InferDataTypes()) return (model, False) From 763fa48bbef716c1ff15cdb2423c073d2aa52aef Mon Sep 17 00:00:00 2001 From: mmrahorovic Date: Mon, 31 Jul 2023 17:57:57 +0100 Subject: [PATCH 4/5] [custom op]: set outputDataType in case of no activation --- src/finn/custom_op/fpgadataflow/vectorvectoractivation.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/finn/custom_op/fpgadataflow/vectorvectoractivation.py b/src/finn/custom_op/fpgadataflow/vectorvectoractivation.py index f817751852..9a9c6714fe 100644 --- a/src/finn/custom_op/fpgadataflow/vectorvectoractivation.py +++ b/src/finn/custom_op/fpgadataflow/vectorvectoractivation.py @@ -190,6 +190,8 @@ def minimize_accumulator_width(self, model): adt = DataType[new_adt_name] # for no-activation nodes, output dt = acc dt self.set_nodeattr("outputDataType", adt.name) + if self.get_nodeattr("noActivation"): + self.set_nodeattr("outputDataType", adt.name) self.set_nodeattr("accDataType", adt.name) return DataType[self.get_nodeattr("accDataType")] From f52871dfe71df725ef85eeb66b6ff9ca7dff1d2d Mon Sep 17 00:00:00 2001 From: auphelia Date: Fri, 4 Aug 2023 17:11:37 +0100 Subject: [PATCH 5/5] [Custom Op] Delete obsolete lines after merging with dev --- src/finn/custom_op/fpgadataflow/vectorvectoractivation.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/src/finn/custom_op/fpgadataflow/vectorvectoractivation.py b/src/finn/custom_op/fpgadataflow/vectorvectoractivation.py index db31090f44..bd5bb75f1d 100644 --- a/src/finn/custom_op/fpgadataflow/vectorvectoractivation.py +++ b/src/finn/custom_op/fpgadataflow/vectorvectoractivation.py @@ -184,8 +184,6 @@ def minimize_accumulator_width(self, model): adt = DataType[new_adt_name] # for no-activation nodes, output dt = acc dt self.set_nodeattr("outputDataType", adt.name) - if self.get_nodeattr("noActivation"): - self.set_nodeattr("outputDataType", adt.name) self.set_nodeattr("accDataType", adt.name) return DataType[self.get_nodeattr("accDataType")]