diff --git a/nncf/experimental/torch/fx/transformations.py b/nncf/experimental/torch/fx/transformations.py index c695220c981..3af906c5670 100644 --- a/nncf/experimental/torch/fx/transformations.py +++ b/nncf/experimental/torch/fx/transformations.py @@ -129,7 +129,7 @@ def module_insertion_transformation(model: torch.fx.GraphModule): user.replace_input_with(target_node, new_node) else: - prev_node = target_node.args[target_point.input_port_id] + prev_node = _get_node_by_input_port_id(target_node, target_point.input_port_id) _set_new_node_meta(new_node, [prev_node], module_to_insert, model) target_node.replace_input_with(prev_node, new_node) @@ -222,27 +222,25 @@ def constant_update_fn( :param updated_node_name: Name of the constant node after updating. Default is `nodename` + `_updated_constant`. """ graph = model.graph - node_name = updated_node_name if updated_node_name else node.name + "_updated_constant" - args = list(node.args) - # A bias node suppose to have constant on the second input port. - if args[input_port_id].op != "get_attr": + old_const = _get_node_by_input_port_id(node, input_port_id) + + if old_const.op != "get_attr": raise nncf.InternalError( - f"Constant on input port {input_port_id} for {node} is expected," - f" but node {args[input_port_id]} is present." + f"Constant on input port {input_port_id} for {node} is expected," f" but node {old_const} is present." ) + node_name = updated_node_name if updated_node_name else old_const.name + "_updated_constant" # Update metadata of the new constant node. - previous_const = args[input_port_id] - consumer_nodes = list(previous_const.users) + consumer_nodes = list(old_const.users) # This list of consumer nodes is topologically sorted # To ensure the updated node has the right order, # we insert constant node before the node placed at the highest order in topological order. sorted_consumer_nodes = [node for node in graph.nodes if node in consumer_nodes] with graph.inserting_before(sorted_consumer_nodes[0]): - new_constant = create_getattr_from_value(model, graph, node_name, value) + new_const = create_getattr_from_value(model, graph, node_name, value) - previous_const.replace_all_uses_with(new_constant, propagate_meta=True) + old_const.replace_all_uses_with(new_const, propagate_meta=True) graph.eliminate_dead_code() @@ -428,9 +426,7 @@ def insert_one_qdq(model: torch.fx.GraphModule, target_point: PTTargetPoint, qua dq_node = graph.call_function(dequantize_op, tuple(dq_inputs), {}) dq_node.meta["val"] = copy(meta_val) - args = list(target_node.args) - args[target_point.input_port_id] = dq_node - target_node.args = tuple(args) + target_node.replace_input_with(input_node, dq_node) else: raise nncf.InternalError(f"Unexpected target type: {target_point.target_type}") @@ -471,7 +467,21 @@ def get_input_node(target_point: PTTargetPoint, target_node: torch.fx.Node) -> t raise nncf.InternalError(f"Unexpected target type: {target_type}") if target_type == TargetType.OPERATOR_POST_HOOK: return target_node - return target_node.args[target_point.input_port_id] + + return _get_node_by_input_port_id(target_node, target_point.input_port_id) + + +def _get_node_by_input_port_id(node: torch.fx.Node, input_port_id: int) -> torch.fx.Node: + """ + Retrieves an input node from the given node and the input port id. + + :param node: Given input node. + :param input_port_id: Given input port id. + :return: An input node from the given node and the input port id. + """ + if node.target == torch.ops.aten.cat.default: + return node.args[0][input_port_id] + return node.args[input_port_id] def get_ctx_manager(graph: torch.fx.Graph, target_point: PTTargetPoint) -> Callable: diff --git a/nncf/torch/quantization/ignored_patterns.py b/nncf/torch/quantization/ignored_patterns.py index e5bd1d93e16..e90d228ed17 100644 --- a/nncf/torch/quantization/ignored_patterns.py +++ b/nncf/torch/quantization/ignored_patterns.py @@ -55,7 +55,7 @@ def _add_softmax_reshape_matmul( # \ # \ # \ - # RESHAPE RESHAPE||TRANSPOSE||GATHER||SQUEEZE||CONCAT + # RESHAPE || TRANSPOSE RESHAPE||TRANSPOSE||GATHER||SQUEEZE||CONCAT # \ / # \ / # \ / @@ -66,7 +66,10 @@ def _add_softmax_reshape_matmul( branch_matmul_nodes = reshape_squeeze_metatypes + gather_metatypes + transpose_metatypes + concat_metatypes softmax = pattern.add_node(**{GraphPattern.LABEL_ATTR: "SOFTMAX", GraphPattern.METATYPE_ATTR: om.PTSoftmaxMetatype}) reshape = pattern.add_node( - **{GraphPattern.LABEL_ATTR: "RESHAPE", GraphPattern.METATYPE_ATTR: reshape_squeeze_metatypes} + **{ + GraphPattern.LABEL_ATTR: "RESHAPE", + GraphPattern.METATYPE_ATTR: reshape_squeeze_metatypes + transpose_metatypes, + } ) matmul = pattern.add_node(**{GraphPattern.LABEL_ATTR: "MATMUL", GraphPattern.METATYPE_ATTR: matmul_metatypes}) matmul_branch_nodes = pattern.add_node( diff --git a/tests/torch/data/reference_graphs/fx/post_quantization_compressed/yolo11n_sdpa_block.dot b/tests/torch/data/reference_graphs/fx/post_quantization_compressed/yolo11n_sdpa_block.dot new file mode 100644 index 00000000000..c349bc361b7 --- /dev/null +++ b/tests/torch/data/reference_graphs/fx/post_quantization_compressed/yolo11n_sdpa_block.dot @@ -0,0 +1,63 @@ +strict digraph { +"0 x" [id=0, type=input]; +"1 x_0_0_nncf_smooth_quant_0" [id=1, type=call_module]; +"2 quantize_per_tensor_default" [id=2, type=quantize_per_tensor]; +"3 dequantize_per_tensor_default" [id=3, type=dequantize_per_tensor]; +"4 scale_updated_constant0" [id=4, type=get_attr]; +"5 compressed_weight_updated_constant0" [id=5, type=get_attr]; +"6 mul_tensor" [id=6, type=mul]; +"7 zero_point_updated_constant0" [id=7, type=get_attr]; +"8 sub_tensor" [id=8, type=sub]; +"9 linear" [id=9, type=linear]; +"10 quantize_per_tensor_default_1" [id=10, type=quantize_per_tensor]; +"11 dequantize_per_tensor_default_1" [id=11, type=dequantize_per_tensor]; +"12 slice_1" [id=12, type=slice]; +"13 slice_2" [id=13, type=slice]; +"14 slice_3" [id=14, type=slice]; +"15 quantize_per_tensor_default_2" [id=15, type=quantize_per_tensor]; +"16 dequantize_per_tensor_default_2" [id=16, type=dequantize_per_tensor]; +"17 slice_4" [id=17, type=slice]; +"18 slice_5" [id=18, type=slice]; +"19 slice_6" [id=19, type=slice]; +"20 slice_7" [id=20, type=slice]; +"21 slice_8" [id=21, type=slice]; +"22 slice_9" [id=22, type=slice]; +"23 transpose" [id=23, type=transpose]; +"24 matmul" [id=24, type=matmul]; +"25 div_" [id=25, type=div_]; +"26 softmax" [id=26, type=softmax]; +"27 transpose_1" [id=27, type=transpose]; +"28 matmul_1" [id=28, type=matmul]; +"29 output" [id=29, type=output]; +"0 x" -> "1 x_0_0_nncf_smooth_quant_0" [label="(1, 2, 4)", style=solid]; +"1 x_0_0_nncf_smooth_quant_0" -> "2 quantize_per_tensor_default" [label="(1, 2, 4)", style=solid]; +"2 quantize_per_tensor_default" -> "3 dequantize_per_tensor_default" [label="(1, 2, 4)", style=solid]; +"3 dequantize_per_tensor_default" -> "9 linear" [label="(1, 2, 4)", style=solid]; +"4 scale_updated_constant0" -> "6 mul_tensor" [label="(12, 1)", style=solid]; +"5 compressed_weight_updated_constant0" -> "6 mul_tensor" [label="(12, 4)", style=solid]; +"6 mul_tensor" -> "8 sub_tensor" [label="(12, 4)", style=solid]; +"7 zero_point_updated_constant0" -> "8 sub_tensor" [label="(12, 1)", style=solid]; +"8 sub_tensor" -> "9 linear" [label="(12, 4)", style=solid]; +"9 linear" -> "10 quantize_per_tensor_default_1" [label="(1, 2, 12)", style=solid]; +"9 linear" -> "15 quantize_per_tensor_default_2" [label="(1, 2, 12)", style=solid]; +"9 linear" -> "20 slice_7" [label="(1, 2, 12)", style=solid]; +"10 quantize_per_tensor_default_1" -> "11 dequantize_per_tensor_default_1" [label="(1, 2, 12)", style=solid]; +"11 dequantize_per_tensor_default_1" -> "12 slice_1" [label="(1, 2, 12)", style=solid]; +"12 slice_1" -> "13 slice_2" [label="(1, 2, 12)", style=solid]; +"13 slice_2" -> "14 slice_3" [label="(1, 2, 12)", style=solid]; +"14 slice_3" -> "24 matmul" [label="(1, 2, 4)", style=solid]; +"15 quantize_per_tensor_default_2" -> "16 dequantize_per_tensor_default_2" [label="(1, 2, 12)", style=solid]; +"16 dequantize_per_tensor_default_2" -> "17 slice_4" [label="(1, 2, 12)", style=solid]; +"17 slice_4" -> "18 slice_5" [label="(1, 2, 12)", style=solid]; +"18 slice_5" -> "19 slice_6" [label="(1, 2, 12)", style=solid]; +"19 slice_6" -> "23 transpose" [label="(1, 2, 4)", style=solid]; +"20 slice_7" -> "21 slice_8" [label="(1, 2, 12)", style=solid]; +"21 slice_8" -> "22 slice_9" [label="(1, 2, 12)", style=solid]; +"22 slice_9" -> "28 matmul_1" [label="(1, 2, 4)", style=solid]; +"23 transpose" -> "24 matmul" [label="(1, 4, 2)", style=solid]; +"24 matmul" -> "25 div_" [label="(1, 2, 2)", style=solid]; +"25 div_" -> "26 softmax" [label="(1, 2, 2)", style=solid]; +"26 softmax" -> "27 transpose_1" [label="(1, 2, 2)", style=solid]; +"27 transpose_1" -> "28 matmul_1" [label="(1, 2, 2)", style=solid]; +"28 matmul_1" -> "29 output" [label="(1, 2, 4)", style=solid]; +} diff --git a/tests/torch/data/reference_graphs/fx/quantized/yolo11n_sdpa_block.dot b/tests/torch/data/reference_graphs/fx/quantized/yolo11n_sdpa_block.dot new file mode 100644 index 00000000000..07216701d90 --- /dev/null +++ b/tests/torch/data/reference_graphs/fx/quantized/yolo11n_sdpa_block.dot @@ -0,0 +1,65 @@ +strict digraph { +"0 x" [id=0, type=input]; +"1 x_0_0_nncf_smooth_quant_0" [id=1, type=call_module]; +"2 quantize_per_tensor_default" [id=2, type=quantize_per_tensor]; +"3 dequantize_per_tensor_default" [id=3, type=dequantize_per_tensor]; +"4 linear_scale_0" [id=4, type=get_attr]; +"5 linear_zero_point_0" [id=5, type=get_attr]; +"6 compressed_weight_updated_constant0" [id=6, type=get_attr]; +"7 quantize_per_channel_default" [id=7, type=quantize_per_channel]; +"8 dequantize_per_channel_default" [id=8, type=dequantize_per_channel]; +"9 linear" [id=9, type=linear]; +"10 quantize_per_tensor_default_1" [id=10, type=quantize_per_tensor]; +"11 dequantize_per_tensor_default_1" [id=11, type=dequantize_per_tensor]; +"12 slice_1" [id=12, type=slice]; +"13 slice_2" [id=13, type=slice]; +"14 slice_3" [id=14, type=slice]; +"15 quantize_per_tensor_default_2" [id=15, type=quantize_per_tensor]; +"16 dequantize_per_tensor_default_2" [id=16, type=dequantize_per_tensor]; +"17 slice_4" [id=17, type=slice]; +"18 slice_5" [id=18, type=slice]; +"19 slice_6" [id=19, type=slice]; +"20 slice_7" [id=20, type=slice]; +"21 slice_8" [id=21, type=slice]; +"22 slice_9" [id=22, type=slice]; +"23 transpose" [id=23, type=transpose]; +"24 matmul" [id=24, type=matmul]; +"25 div_" [id=25, type=div_]; +"26 softmax" [id=26, type=softmax]; +"27 transpose_1" [id=27, type=transpose]; +"28 matmul_1" [id=28, type=matmul]; +"29 output" [id=29, type=output]; +"0 x" -> "1 x_0_0_nncf_smooth_quant_0" [label="(1, 2, 4)", style=solid]; +"1 x_0_0_nncf_smooth_quant_0" -> "2 quantize_per_tensor_default" [label="(1, 2, 4)", style=solid]; +"2 quantize_per_tensor_default" -> "3 dequantize_per_tensor_default" [label="(1, 2, 4)", style=solid]; +"3 dequantize_per_tensor_default" -> "9 linear" [label="(1, 2, 4)", style=solid]; +"4 linear_scale_0" -> "7 quantize_per_channel_default" [label="(12,)", style=solid]; +"4 linear_scale_0" -> "8 dequantize_per_channel_default" [label="(12,)", style=solid]; +"5 linear_zero_point_0" -> "7 quantize_per_channel_default" [label="(12,)", style=solid]; +"5 linear_zero_point_0" -> "8 dequantize_per_channel_default" [label="(12,)", style=solid]; +"6 compressed_weight_updated_constant0" -> "7 quantize_per_channel_default" [label="(12, 4)", style=solid]; +"7 quantize_per_channel_default" -> "8 dequantize_per_channel_default" [label="(12, 4)", style=solid]; +"8 dequantize_per_channel_default" -> "9 linear" [label="(12, 4)", style=solid]; +"9 linear" -> "10 quantize_per_tensor_default_1" [label="(1, 2, 12)", style=solid]; +"9 linear" -> "15 quantize_per_tensor_default_2" [label="(1, 2, 12)", style=solid]; +"9 linear" -> "20 slice_7" [label="(1, 2, 12)", style=solid]; +"10 quantize_per_tensor_default_1" -> "11 dequantize_per_tensor_default_1" [label="(1, 2, 12)", style=solid]; +"11 dequantize_per_tensor_default_1" -> "12 slice_1" [label="(1, 2, 12)", style=solid]; +"12 slice_1" -> "13 slice_2" [label="(1, 2, 12)", style=solid]; +"13 slice_2" -> "14 slice_3" [label="(1, 2, 12)", style=solid]; +"14 slice_3" -> "24 matmul" [label="(1, 2, 4)", style=solid]; +"15 quantize_per_tensor_default_2" -> "16 dequantize_per_tensor_default_2" [label="(1, 2, 12)", style=solid]; +"16 dequantize_per_tensor_default_2" -> "17 slice_4" [label="(1, 2, 12)", style=solid]; +"17 slice_4" -> "18 slice_5" [label="(1, 2, 12)", style=solid]; +"18 slice_5" -> "19 slice_6" [label="(1, 2, 12)", style=solid]; +"19 slice_6" -> "23 transpose" [label="(1, 2, 4)", style=solid]; +"20 slice_7" -> "21 slice_8" [label="(1, 2, 12)", style=solid]; +"21 slice_8" -> "22 slice_9" [label="(1, 2, 12)", style=solid]; +"22 slice_9" -> "28 matmul_1" [label="(1, 2, 4)", style=solid]; +"23 transpose" -> "24 matmul" [label="(1, 4, 2)", style=solid]; +"24 matmul" -> "25 div_" [label="(1, 2, 2)", style=solid]; +"25 div_" -> "26 softmax" [label="(1, 2, 2)", style=solid]; +"26 softmax" -> "27 transpose_1" [label="(1, 2, 2)", style=solid]; +"27 transpose_1" -> "28 matmul_1" [label="(1, 2, 2)", style=solid]; +"28 matmul_1" -> "29 output" [label="(1, 2, 4)", style=solid]; +} diff --git a/tests/torch/data/reference_graphs/fx/reference_metatypes/yolo11n_sdpa_block.json b/tests/torch/data/reference_graphs/fx/reference_metatypes/yolo11n_sdpa_block.json new file mode 100644 index 00000000000..422ed6958a7 --- /dev/null +++ b/tests/torch/data/reference_graphs/fx/reference_metatypes/yolo11n_sdpa_block.json @@ -0,0 +1,21 @@ +{ + "kqv_weight": "PTConstNoopMetatype", + "x": "PTInputNoopMetatype", + "linear": "PTLinearMetatype", + "slice_1": "PTGatherMetatype", + "slice_2": "PTGatherMetatype", + "slice_3": "PTGatherMetatype", + "slice_4": "PTGatherMetatype", + "slice_5": "PTGatherMetatype", + "slice_6": "PTGatherMetatype", + "slice_7": "PTGatherMetatype", + "slice_8": "PTGatherMetatype", + "slice_9": "PTGatherMetatype", + "transpose": "PTTransposeMetatype", + "matmul": "PTMatMulMetatype", + "div_": "PTDivMetatype", + "softmax": "PTSoftmaxMetatype", + "transpose_1": "PTTransposeMetatype", + "matmul_1": "PTMatMulMetatype", + "output_1": "PTOutputNoopMetatype" +} \ No newline at end of file diff --git a/tests/torch/data/reference_graphs/fx/transformed/cat_constant_update.dot b/tests/torch/data/reference_graphs/fx/transformed/cat_constant_update.dot new file mode 100644 index 00000000000..00305825ee1 --- /dev/null +++ b/tests/torch/data/reference_graphs/fx/transformed/cat_constant_update.dot @@ -0,0 +1,38 @@ +strict digraph { +"0 conv_a_weight" [id=0, type=get_attr]; +"1 conv_a_bias" [id=1, type=get_attr]; +"2 conv_b_weight" [id=2, type=get_attr]; +"3 conv_b_bias" [id=3, type=get_attr]; +"4 conv_c_weight" [id=4, type=get_attr]; +"5 conv_c_bias" [id=5, type=get_attr]; +"6 bias" [id=6, type=get_attr]; +"7 x" [id=7, type=input]; +"8 conv2d" [id=8, type=conv2d]; +"9 conv2d_1" [id=9, type=conv2d]; +"10 add_" [id=10, type=add_]; +"11 add__1" [id=11, type=add_]; +"12 const_updated_constant0" [id=12, type=get_attr]; +"13 cat" [id=13, type=cat]; +"14 conv2d_2" [id=14, type=conv2d]; +"15 add" [id=15, type=add]; +"16 output_1" [id=16, type=output]; +"0 conv_a_weight" -> "8 conv2d" [label="(3, 3, 1, 1)", style=solid]; +"1 conv_a_bias" -> "8 conv2d" [label="(3,)", style=solid]; +"2 conv_b_weight" -> "9 conv2d_1" [label="(3, 3, 1, 1)", style=solid]; +"3 conv_b_bias" -> "9 conv2d_1" [label="(3,)", style=solid]; +"4 conv_c_weight" -> "14 conv2d_2" [label="(3, 9, 1, 1)", style=solid]; +"5 conv_c_bias" -> "14 conv2d_2" [label="(3,)", style=solid]; +"6 bias" -> "10 add_" [label="(1,)", style=solid]; +"6 bias" -> "11 add__1" [label="(1,)", style=solid]; +"6 bias" -> "15 add" [label="(1,)", style=solid]; +"7 x" -> "8 conv2d" [label="(1, 3, 3, 3)", style=solid]; +"8 conv2d" -> "9 conv2d_1" [label="(1, 3, 3, 3)", style=solid]; +"8 conv2d" -> "10 add_" [label="(1, 3, 3, 3)", style=solid]; +"9 conv2d_1" -> "11 add__1" [label="(1, 3, 3, 3)", style=solid]; +"10 add_" -> "13 cat" [label="(1, 3, 3, 3)", style=solid]; +"11 add__1" -> "13 cat" [label="(1, 3, 3, 3)", style=solid]; +"12 const_updated_constant0" -> "13 cat" [label="(1,)", style=solid]; +"13 cat" -> "14 conv2d_2" [label="(1, 9, 3, 3)", style=solid]; +"14 conv2d_2" -> "15 add" [label="(1, 3, 3, 3)", style=solid]; +"15 add" -> "16 output_1" [label="(1, 3, 3, 3)", style=solid]; +} diff --git a/tests/torch/data/reference_graphs/fx/transformed/constant_update.dot b/tests/torch/data/reference_graphs/fx/transformed/constant_update.dot index dde22a7ecba..3a11314e9b6 100644 --- a/tests/torch/data/reference_graphs/fx/transformed/constant_update.dot +++ b/tests/torch/data/reference_graphs/fx/transformed/constant_update.dot @@ -1,36 +1,38 @@ strict digraph { -"0 conv_a_weight" [id=0, type=get_attr]; -"1 conv_a_bias" [id=1, type=get_attr]; -"2 conv_b_weight" [id=2, type=get_attr]; -"3 conv_b_bias" [id=3, type=get_attr]; -"4 conv_c_weight" [id=4, type=get_attr]; -"5 conv_c_bias" [id=5, type=get_attr]; -"6 x" [id=6, type=input]; -"7 conv2d" [id=7, type=conv2d]; -"8 conv2d_1" [id=8, type=conv2d]; -"9 add__updated_constant0" [id=9, type=get_attr]; -"10 add_" [id=10, type=add_]; -"11 add__1" [id=11, type=add_]; -"12 add" [id=12, type=add]; -"13 conv2d_2" [id=13, type=conv2d]; -"14 add_1" [id=14, type=add]; -"15 output_1" [id=15, type=output]; -"0 conv_a_weight" -> "7 conv2d" [label="(3, 3, 1, 1)", style=solid]; -"1 conv_a_bias" -> "7 conv2d" [label="(3,)", style=solid]; -"2 conv_b_weight" -> "8 conv2d_1" [label="(3, 3, 1, 1)", style=solid]; -"3 conv_b_bias" -> "8 conv2d_1" [label="(3,)", style=solid]; -"4 conv_c_weight" -> "13 conv2d_2" [label="(3, 3, 1, 1)", style=solid]; -"5 conv_c_bias" -> "13 conv2d_2" [label="(3,)", style=solid]; -"6 x" -> "7 conv2d" [label="(1, 3, 3, 3)", style=solid]; -"7 conv2d" -> "8 conv2d_1" [label="(1, 3, 3, 3)", style=solid]; -"7 conv2d" -> "10 add_" [label="(1, 3, 3, 3)", style=solid]; -"8 conv2d_1" -> "11 add__1" [label="(1, 3, 3, 3)", style=solid]; -"9 add__updated_constant0" -> "10 add_" [label="(1,)", style=solid]; -"9 add__updated_constant0" -> "11 add__1" [label="(1,)", style=solid]; -"9 add__updated_constant0" -> "14 add_1" [label="(1,)", style=solid]; -"10 add_" -> "12 add" [label="(1, 3, 3, 3)", style=solid]; -"11 add__1" -> "12 add" [label="(1, 3, 3, 3)", style=solid]; -"12 add" -> "13 conv2d_2" [label="(1, 3, 3, 3)", style=solid]; -"13 conv2d_2" -> "14 add_1" [label="(1, 3, 3, 3)", style=solid]; -"14 add_1" -> "15 output_1" [label="(1, 3, 3, 3)", style=solid]; +"0 const" [id=0, type=get_attr]; +"1 conv_a_weight" [id=1, type=get_attr]; +"2 conv_a_bias" [id=2, type=get_attr]; +"3 conv_b_weight" [id=3, type=get_attr]; +"4 conv_b_bias" [id=4, type=get_attr]; +"5 conv_c_weight" [id=5, type=get_attr]; +"6 conv_c_bias" [id=6, type=get_attr]; +"7 x" [id=7, type=input]; +"8 conv2d" [id=8, type=conv2d]; +"9 conv2d_1" [id=9, type=conv2d]; +"10 bias_updated_constant0" [id=10, type=get_attr]; +"11 add_" [id=11, type=add_]; +"12 add__1" [id=12, type=add_]; +"13 cat" [id=13, type=cat]; +"14 conv2d_2" [id=14, type=conv2d]; +"15 add" [id=15, type=add]; +"16 output_1" [id=16, type=output]; +"0 const" -> "13 cat" [label="(1, 3, 3, 3)", style=solid]; +"1 conv_a_weight" -> "8 conv2d" [label="(3, 3, 1, 1)", style=solid]; +"2 conv_a_bias" -> "8 conv2d" [label="(3,)", style=solid]; +"3 conv_b_weight" -> "9 conv2d_1" [label="(3, 3, 1, 1)", style=solid]; +"4 conv_b_bias" -> "9 conv2d_1" [label="(3,)", style=solid]; +"5 conv_c_weight" -> "14 conv2d_2" [label="(3, 9, 1, 1)", style=solid]; +"6 conv_c_bias" -> "14 conv2d_2" [label="(3,)", style=solid]; +"7 x" -> "8 conv2d" [label="(1, 3, 3, 3)", style=solid]; +"8 conv2d" -> "9 conv2d_1" [label="(1, 3, 3, 3)", style=solid]; +"8 conv2d" -> "11 add_" [label="(1, 3, 3, 3)", style=solid]; +"9 conv2d_1" -> "12 add__1" [label="(1, 3, 3, 3)", style=solid]; +"10 bias_updated_constant0" -> "11 add_" [label="(1,)", style=solid]; +"10 bias_updated_constant0" -> "12 add__1" [label="(1,)", style=solid]; +"10 bias_updated_constant0" -> "15 add" [label="(1,)", style=solid]; +"11 add_" -> "13 cat" [label="(1, 3, 3, 3)", style=solid]; +"12 add__1" -> "13 cat" [label="(1, 3, 3, 3)", style=solid]; +"13 cat" -> "14 conv2d_2" [label="(1, 9, 3, 3)", style=solid]; +"14 conv2d_2" -> "15 add" [label="(1, 3, 3, 3)", style=solid]; +"15 add" -> "16 output_1" [label="(1, 3, 3, 3)", style=solid]; } diff --git a/tests/torch/data/reference_graphs/fx/transformed/model_insertion.dot b/tests/torch/data/reference_graphs/fx/transformed/model_insertion.dot index 825f345d634..142b71f3caa 100644 --- a/tests/torch/data/reference_graphs/fx/transformed/model_insertion.dot +++ b/tests/torch/data/reference_graphs/fx/transformed/model_insertion.dot @@ -1,44 +1,50 @@ strict digraph { -"0 conv_a_weight" [id=0, type=get_attr]; -"1 conv_a_bias" [id=1, type=get_attr]; -"2 conv_b_weight" [id=2, type=get_attr]; -"3 conv_b_bias" [id=3, type=get_attr]; -"4 conv_c_weight" [id=4, type=get_attr]; -"5 conv_c_bias" [id=5, type=get_attr]; -"6 bias" [id=6, type=get_attr]; -"7 x" [id=7, type=input]; -"8 TEST_MODULE_0" [id=8, type=call_module]; -"9 TEST_MODULE_1" [id=9, type=call_module]; -"10 conv2d" [id=10, type=conv2d]; -"11 TEST_MODULE_3" [id=11, type=call_module]; -"12 TEST_MODULE_2" [id=12, type=call_module]; -"13 conv2d_1" [id=13, type=conv2d]; -"14 add_" [id=14, type=add_]; -"15 add__1" [id=15, type=add_]; -"16 add" [id=16, type=add]; -"17 conv2d_2" [id=17, type=conv2d]; -"18 add_1" [id=18, type=add]; -"19 output_1" [id=19, type=output]; -"0 conv_a_weight" -> "9 TEST_MODULE_1" [label="(3, 3, 1, 1)", style=solid]; -"1 conv_a_bias" -> "10 conv2d" [label="(3,)", style=solid]; -"2 conv_b_weight" -> "12 TEST_MODULE_2" [label="(3, 3, 1, 1)", style=solid]; -"3 conv_b_bias" -> "13 conv2d_1" [label="(3,)", style=solid]; -"4 conv_c_weight" -> "17 conv2d_2" [label="(3, 3, 1, 1)", style=solid]; -"5 conv_c_bias" -> "17 conv2d_2" [label="(3,)", style=solid]; -"6 bias" -> "14 add_" [label="(1,)", style=solid]; -"6 bias" -> "15 add__1" [label="(1,)", style=solid]; -"6 bias" -> "18 add_1" [label="(1,)", style=solid]; -"7 x" -> "8 TEST_MODULE_0" [label="(1, 3, 3, 3)", style=solid]; -"8 TEST_MODULE_0" -> "10 conv2d" [label="(1, 3, 3, 3)", style=solid]; -"9 TEST_MODULE_1" -> "10 conv2d" [label="(3, 3, 1, 1)", style=solid]; -"10 conv2d" -> "11 TEST_MODULE_3" [label="(1, 3, 3, 3)", style=solid]; -"11 TEST_MODULE_3" -> "13 conv2d_1" [label="(1, 3, 3, 3)", style=solid]; -"11 TEST_MODULE_3" -> "14 add_" [label="(1, 3, 3, 3)", style=solid]; -"12 TEST_MODULE_2" -> "13 conv2d_1" [label="(3, 3, 1, 1)", style=solid]; -"13 conv2d_1" -> "15 add__1" [label="(1, 3, 3, 3)", style=solid]; -"14 add_" -> "16 add" [label="(1, 3, 3, 3)", style=solid]; -"15 add__1" -> "16 add" [label="(1, 3, 3, 3)", style=solid]; -"16 add" -> "17 conv2d_2" [label="(1, 3, 3, 3)", style=solid]; -"17 conv2d_2" -> "18 add_1" [label="(1, 3, 3, 3)", style=solid]; -"18 add_1" -> "19 output_1" [label="(1, 3, 3, 3)", style=solid]; +"0 const" [id=0, type=get_attr]; +"1 conv_a_weight" [id=1, type=get_attr]; +"2 conv_a_bias" [id=2, type=get_attr]; +"3 conv_b_weight" [id=3, type=get_attr]; +"4 conv_b_bias" [id=4, type=get_attr]; +"5 conv_c_weight" [id=5, type=get_attr]; +"6 conv_c_bias" [id=6, type=get_attr]; +"7 bias" [id=7, type=get_attr]; +"8 x" [id=8, type=input]; +"9 TEST_MODULE_0" [id=9, type=call_module]; +"10 TEST_MODULE_1" [id=10, type=call_module]; +"11 conv2d" [id=11, type=conv2d]; +"12 TEST_MODULE_3" [id=12, type=call_module]; +"13 TEST_MODULE_2" [id=13, type=call_module]; +"14 conv2d_1" [id=14, type=conv2d]; +"15 add_" [id=15, type=add_]; +"16 add__1" [id=16, type=add_]; +"17 TEST_MODULE_4" [id=17, type=call_module]; +"18 TEST_MODULE_5" [id=18, type=call_module]; +"19 cat" [id=19, type=cat]; +"20 conv2d_2" [id=20, type=conv2d]; +"21 add" [id=21, type=add]; +"22 output_1" [id=22, type=output]; +"0 const" -> "18 TEST_MODULE_5" [label="(1, 3, 3, 3)", style=solid]; +"1 conv_a_weight" -> "10 TEST_MODULE_1" [label="(3, 3, 1, 1)", style=solid]; +"2 conv_a_bias" -> "11 conv2d" [label="(3,)", style=solid]; +"3 conv_b_weight" -> "13 TEST_MODULE_2" [label="(3, 3, 1, 1)", style=solid]; +"4 conv_b_bias" -> "14 conv2d_1" [label="(3,)", style=solid]; +"5 conv_c_weight" -> "20 conv2d_2" [label="(3, 9, 1, 1)", style=solid]; +"6 conv_c_bias" -> "20 conv2d_2" [label="(3,)", style=solid]; +"7 bias" -> "15 add_" [label="(1,)", style=solid]; +"7 bias" -> "16 add__1" [label="(1,)", style=solid]; +"7 bias" -> "21 add" [label="(1,)", style=solid]; +"8 x" -> "9 TEST_MODULE_0" [label="(1, 3, 3, 3)", style=solid]; +"9 TEST_MODULE_0" -> "11 conv2d" [label="(1, 3, 3, 3)", style=solid]; +"10 TEST_MODULE_1" -> "11 conv2d" [label="(3, 3, 1, 1)", style=solid]; +"11 conv2d" -> "12 TEST_MODULE_3" [label="(1, 3, 3, 3)", style=solid]; +"12 TEST_MODULE_3" -> "14 conv2d_1" [label="(1, 3, 3, 3)", style=solid]; +"12 TEST_MODULE_3" -> "15 add_" [label="(1, 3, 3, 3)", style=solid]; +"13 TEST_MODULE_2" -> "14 conv2d_1" [label="(3, 3, 1, 1)", style=solid]; +"14 conv2d_1" -> "16 add__1" [label="(1, 3, 3, 3)", style=solid]; +"15 add_" -> "19 cat" [label="(1, 3, 3, 3)", style=solid]; +"16 add__1" -> "17 TEST_MODULE_4" [label="(1, 3, 3, 3)", style=solid]; +"17 TEST_MODULE_4" -> "19 cat" [label="(1, 3, 3, 3)", style=solid]; +"18 TEST_MODULE_5" -> "19 cat" [label="(1, 3, 3, 3)", style=solid]; +"19 cat" -> "20 conv2d_2" [label="(1, 9, 3, 3)", style=solid]; +"20 conv2d_2" -> "21 add" [label="(1, 3, 3, 3)", style=solid]; +"21 add" -> "22 output_1" [label="(1, 3, 3, 3)", style=solid]; } diff --git a/tests/torch/data/reference_graphs/fx/transformed/model_insertion_leaf.dot b/tests/torch/data/reference_graphs/fx/transformed/model_insertion_leaf.dot index d4eccf226a0..349dd35ea11 100644 --- a/tests/torch/data/reference_graphs/fx/transformed/model_insertion_leaf.dot +++ b/tests/torch/data/reference_graphs/fx/transformed/model_insertion_leaf.dot @@ -1,44 +1,50 @@ strict digraph { -"0 conv_a_weight" [id=0, type=get_attr]; -"1 conv_a_bias" [id=1, type=get_attr]; -"2 conv_b_weight" [id=2, type=get_attr]; -"3 conv_b_bias" [id=3, type=get_attr]; -"4 conv_c_weight" [id=4, type=get_attr]; -"5 conv_c_bias" [id=5, type=get_attr]; -"6 bias" [id=6, type=get_attr]; -"7 x" [id=7, type=input]; -"8 TEST_MODULE_0" [id=8, type=call_module]; -"9 TEST_MODULE_1" [id=9, type=call_module]; -"10 conv2d" [id=10, type=conv2d]; -"11 TEST_MODULE_3" [id=11, type=call_module]; -"12 TEST_MODULE_2" [id=12, type=call_module]; -"13 conv2d_1" [id=13, type=conv2d]; -"14 add_" [id=14, type=add_]; -"15 add__1" [id=15, type=add_]; -"16 add" [id=16, type=add]; -"17 conv2d_2" [id=17, type=conv2d]; -"18 add_1" [id=18, type=add]; -"19 output_1" [id=19, type=output]; -"0 conv_a_weight" -> "9 TEST_MODULE_1" [label="(3, 3, 1, 1)", style=solid]; -"0 conv_a_weight" -> "10 conv2d" [label="(3, 3, 1, 1)", style=solid]; -"1 conv_a_bias" -> "10 conv2d" [label="(3,)", style=solid]; -"2 conv_b_weight" -> "12 TEST_MODULE_2" [label="(3, 3, 1, 1)", style=solid]; -"2 conv_b_weight" -> "13 conv2d_1" [label="(3, 3, 1, 1)", style=solid]; -"3 conv_b_bias" -> "13 conv2d_1" [label="(3,)", style=solid]; -"4 conv_c_weight" -> "17 conv2d_2" [label="(3, 3, 1, 1)", style=solid]; -"5 conv_c_bias" -> "17 conv2d_2" [label="(3,)", style=solid]; -"6 bias" -> "14 add_" [label="(1,)", style=solid]; -"6 bias" -> "15 add__1" [label="(1,)", style=solid]; -"6 bias" -> "18 add_1" [label="(1,)", style=solid]; -"7 x" -> "8 TEST_MODULE_0" [label="(1, 3, 3, 3)", style=solid]; -"7 x" -> "10 conv2d" [label="(1, 3, 3, 3)", style=solid]; -"10 conv2d" -> "11 TEST_MODULE_3" [label="(1, 3, 3, 3)", style=solid]; -"10 conv2d" -> "13 conv2d_1" [label="(1, 3, 3, 3)", style=solid]; -"10 conv2d" -> "14 add_" [label="(1, 3, 3, 3)", style=solid]; -"13 conv2d_1" -> "15 add__1" [label="(1, 3, 3, 3)", style=solid]; -"14 add_" -> "16 add" [label="(1, 3, 3, 3)", style=solid]; -"15 add__1" -> "16 add" [label="(1, 3, 3, 3)", style=solid]; -"16 add" -> "17 conv2d_2" [label="(1, 3, 3, 3)", style=solid]; -"17 conv2d_2" -> "18 add_1" [label="(1, 3, 3, 3)", style=solid]; -"18 add_1" -> "19 output_1" [label="(1, 3, 3, 3)", style=solid]; +"0 const" [id=0, type=get_attr]; +"1 conv_a_weight" [id=1, type=get_attr]; +"2 conv_a_bias" [id=2, type=get_attr]; +"3 conv_b_weight" [id=3, type=get_attr]; +"4 conv_b_bias" [id=4, type=get_attr]; +"5 conv_c_weight" [id=5, type=get_attr]; +"6 conv_c_bias" [id=6, type=get_attr]; +"7 bias" [id=7, type=get_attr]; +"8 x" [id=8, type=input]; +"9 TEST_MODULE_0" [id=9, type=call_module]; +"10 TEST_MODULE_1" [id=10, type=call_module]; +"11 conv2d" [id=11, type=conv2d]; +"12 TEST_MODULE_3" [id=12, type=call_module]; +"13 TEST_MODULE_2" [id=13, type=call_module]; +"14 conv2d_1" [id=14, type=conv2d]; +"15 add_" [id=15, type=add_]; +"16 add__1" [id=16, type=add_]; +"17 TEST_MODULE_4" [id=17, type=call_module]; +"18 TEST_MODULE_5" [id=18, type=call_module]; +"19 cat" [id=19, type=cat]; +"20 conv2d_2" [id=20, type=conv2d]; +"21 add" [id=21, type=add]; +"22 output_1" [id=22, type=output]; +"0 const" -> "18 TEST_MODULE_5" [label="(1, 3, 3, 3)", style=solid]; +"0 const" -> "19 cat" [label="(1, 3, 3, 3)", style=solid]; +"1 conv_a_weight" -> "10 TEST_MODULE_1" [label="(3, 3, 1, 1)", style=solid]; +"1 conv_a_weight" -> "11 conv2d" [label="(3, 3, 1, 1)", style=solid]; +"2 conv_a_bias" -> "11 conv2d" [label="(3,)", style=solid]; +"3 conv_b_weight" -> "13 TEST_MODULE_2" [label="(3, 3, 1, 1)", style=solid]; +"3 conv_b_weight" -> "14 conv2d_1" [label="(3, 3, 1, 1)", style=solid]; +"4 conv_b_bias" -> "14 conv2d_1" [label="(3,)", style=solid]; +"5 conv_c_weight" -> "20 conv2d_2" [label="(3, 9, 1, 1)", style=solid]; +"6 conv_c_bias" -> "20 conv2d_2" [label="(3,)", style=solid]; +"7 bias" -> "15 add_" [label="(1,)", style=solid]; +"7 bias" -> "16 add__1" [label="(1,)", style=solid]; +"7 bias" -> "21 add" [label="(1,)", style=solid]; +"8 x" -> "9 TEST_MODULE_0" [label="(1, 3, 3, 3)", style=solid]; +"8 x" -> "11 conv2d" [label="(1, 3, 3, 3)", style=solid]; +"11 conv2d" -> "12 TEST_MODULE_3" [label="(1, 3, 3, 3)", style=solid]; +"11 conv2d" -> "14 conv2d_1" [label="(1, 3, 3, 3)", style=solid]; +"11 conv2d" -> "15 add_" [label="(1, 3, 3, 3)", style=solid]; +"14 conv2d_1" -> "16 add__1" [label="(1, 3, 3, 3)", style=solid]; +"15 add_" -> "19 cat" [label="(1, 3, 3, 3)", style=solid]; +"16 add__1" -> "17 TEST_MODULE_4" [label="(1, 3, 3, 3)", style=solid]; +"16 add__1" -> "19 cat" [label="(1, 3, 3, 3)", style=solid]; +"19 cat" -> "20 conv2d_2" [label="(1, 9, 3, 3)", style=solid]; +"20 conv2d_2" -> "21 add" [label="(1, 3, 3, 3)", style=solid]; +"21 add" -> "22 output_1" [label="(1, 3, 3, 3)", style=solid]; } diff --git a/tests/torch/data/reference_graphs/fx/transformed/output_insertion_cat_5_2.dot b/tests/torch/data/reference_graphs/fx/transformed/output_insertion_cat_5_2.dot new file mode 100644 index 00000000000..5487b2dd213 --- /dev/null +++ b/tests/torch/data/reference_graphs/fx/transformed/output_insertion_cat_5_2.dot @@ -0,0 +1,41 @@ +strict digraph { +"0 const" [id=0, type=get_attr]; +"1 const_cloned" [id=1, type=clone]; +"2 conv_a_weight" [id=2, type=get_attr]; +"3 conv_a_bias" [id=3, type=get_attr]; +"4 conv_b_weight" [id=4, type=get_attr]; +"5 conv_b_bias" [id=5, type=get_attr]; +"6 conv_c_weight" [id=6, type=get_attr]; +"7 conv_c_bias" [id=7, type=get_attr]; +"8 bias" [id=8, type=get_attr]; +"9 x" [id=9, type=input]; +"10 conv2d" [id=10, type=conv2d]; +"11 conv2d_1" [id=11, type=conv2d]; +"12 add_" [id=12, type=add_]; +"13 add__1" [id=13, type=add_]; +"14 cat" [id=14, type=cat]; +"15 conv2d_2" [id=15, type=conv2d]; +"16 add" [id=16, type=add]; +"17 output_1" [id=17, type=output]; +"0 const" -> "1 const_cloned" [label="(1, 3, 3, 3)", style=solid]; +"0 const" -> "14 cat" [label="(1, 3, 3, 3)", style=solid]; +"1 const_cloned" -> "17 output_1" [label="(1, 3, 3, 3)", style=solid]; +"2 conv_a_weight" -> "10 conv2d" [label="(3, 3, 1, 1)", style=solid]; +"3 conv_a_bias" -> "10 conv2d" [label="(3,)", style=solid]; +"4 conv_b_weight" -> "11 conv2d_1" [label="(3, 3, 1, 1)", style=solid]; +"5 conv_b_bias" -> "11 conv2d_1" [label="(3,)", style=solid]; +"6 conv_c_weight" -> "15 conv2d_2" [label="(3, 9, 1, 1)", style=solid]; +"7 conv_c_bias" -> "15 conv2d_2" [label="(3,)", style=solid]; +"8 bias" -> "12 add_" [label="(1,)", style=solid]; +"8 bias" -> "13 add__1" [label="(1,)", style=solid]; +"8 bias" -> "16 add" [label="(1,)", style=solid]; +"9 x" -> "10 conv2d" [label="(1, 3, 3, 3)", style=solid]; +"10 conv2d" -> "11 conv2d_1" [label="(1, 3, 3, 3)", style=solid]; +"10 conv2d" -> "12 add_" [label="(1, 3, 3, 3)", style=solid]; +"11 conv2d_1" -> "13 add__1" [label="(1, 3, 3, 3)", style=solid]; +"12 add_" -> "14 cat" [label="(1, 3, 3, 3)", style=solid]; +"13 add__1" -> "14 cat" [label="(1, 3, 3, 3)", style=solid]; +"14 cat" -> "15 conv2d_2" [label="(1, 9, 3, 3)", style=solid]; +"15 conv2d_2" -> "16 add" [label="(1, 3, 3, 3)", style=solid]; +"16 add" -> "17 output_1" [label="(1, 3, 3, 3)", style=solid]; +} diff --git a/tests/torch/data/reference_graphs/fx/transformed/output_insertion_cat_6_1.dot b/tests/torch/data/reference_graphs/fx/transformed/output_insertion_cat_6_1.dot new file mode 100644 index 00000000000..8f8a88a2c5b --- /dev/null +++ b/tests/torch/data/reference_graphs/fx/transformed/output_insertion_cat_6_1.dot @@ -0,0 +1,41 @@ +strict digraph { +"0 const" [id=0, type=get_attr]; +"1 conv_a_weight" [id=1, type=get_attr]; +"2 conv_a_bias" [id=2, type=get_attr]; +"3 conv_b_weight" [id=3, type=get_attr]; +"4 conv_b_bias" [id=4, type=get_attr]; +"5 conv_c_weight" [id=5, type=get_attr]; +"6 conv_c_bias" [id=6, type=get_attr]; +"7 bias" [id=7, type=get_attr]; +"8 x" [id=8, type=input]; +"9 conv2d" [id=9, type=conv2d]; +"10 conv2d_1" [id=10, type=conv2d]; +"11 add_" [id=11, type=add_]; +"12 add__1" [id=12, type=add_]; +"13 add__1_cloned" [id=13, type=clone]; +"14 cat" [id=14, type=cat]; +"15 conv2d_2" [id=15, type=conv2d]; +"16 add" [id=16, type=add]; +"17 output_1" [id=17, type=output]; +"0 const" -> "14 cat" [label="(1, 3, 3, 3)", style=solid]; +"1 conv_a_weight" -> "9 conv2d" [label="(3, 3, 1, 1)", style=solid]; +"2 conv_a_bias" -> "9 conv2d" [label="(3,)", style=solid]; +"3 conv_b_weight" -> "10 conv2d_1" [label="(3, 3, 1, 1)", style=solid]; +"4 conv_b_bias" -> "10 conv2d_1" [label="(3,)", style=solid]; +"5 conv_c_weight" -> "15 conv2d_2" [label="(3, 9, 1, 1)", style=solid]; +"6 conv_c_bias" -> "15 conv2d_2" [label="(3,)", style=solid]; +"7 bias" -> "11 add_" [label="(1,)", style=solid]; +"7 bias" -> "12 add__1" [label="(1,)", style=solid]; +"7 bias" -> "16 add" [label="(1,)", style=solid]; +"8 x" -> "9 conv2d" [label="(1, 3, 3, 3)", style=solid]; +"9 conv2d" -> "10 conv2d_1" [label="(1, 3, 3, 3)", style=solid]; +"9 conv2d" -> "11 add_" [label="(1, 3, 3, 3)", style=solid]; +"10 conv2d_1" -> "12 add__1" [label="(1, 3, 3, 3)", style=solid]; +"11 add_" -> "14 cat" [label="(1, 3, 3, 3)", style=solid]; +"12 add__1" -> "13 add__1_cloned" [label="(1, 3, 3, 3)", style=solid]; +"12 add__1" -> "14 cat" [label="(1, 3, 3, 3)", style=solid]; +"13 add__1_cloned" -> "17 output_1" [label="(1, 3, 3, 3)", style=solid]; +"14 cat" -> "15 conv2d_2" [label="(1, 9, 3, 3)", style=solid]; +"15 conv2d_2" -> "16 add" [label="(1, 3, 3, 3)", style=solid]; +"16 add" -> "17 output_1" [label="(1, 3, 3, 3)", style=solid]; +} diff --git a/tests/torch/data/reference_graphs/fx/transformed/output_insertion_conv2d_1_5_1.dot b/tests/torch/data/reference_graphs/fx/transformed/output_insertion_conv2d_1_5_1.dot index 8d641d46d41..c6beba2eb4f 100644 --- a/tests/torch/data/reference_graphs/fx/transformed/output_insertion_conv2d_1_5_1.dot +++ b/tests/torch/data/reference_graphs/fx/transformed/output_insertion_conv2d_1_5_1.dot @@ -1,39 +1,41 @@ strict digraph { -"0 conv_a_weight" [id=0, type=get_attr]; -"1 conv_a_bias" [id=1, type=get_attr]; -"2 conv_b_weight" [id=2, type=get_attr]; -"3 conv_b_weight_cloned" [id=3, type=clone]; -"4 conv_b_bias" [id=4, type=get_attr]; -"5 conv_c_weight" [id=5, type=get_attr]; -"6 conv_c_bias" [id=6, type=get_attr]; -"7 bias" [id=7, type=get_attr]; -"8 x" [id=8, type=input]; -"9 conv2d" [id=9, type=conv2d]; -"10 conv2d_1" [id=10, type=conv2d]; -"11 add_" [id=11, type=add_]; -"12 add__1" [id=12, type=add_]; -"13 add" [id=13, type=add]; -"14 conv2d_2" [id=14, type=conv2d]; -"15 add_1" [id=15, type=add]; -"16 output_1" [id=16, type=output]; -"0 conv_a_weight" -> "9 conv2d" [label="(3, 3, 1, 1)", style=solid]; -"1 conv_a_bias" -> "9 conv2d" [label="(3,)", style=solid]; -"2 conv_b_weight" -> "3 conv_b_weight_cloned" [label="(3, 3, 1, 1)", style=solid]; -"2 conv_b_weight" -> "10 conv2d_1" [label="(3, 3, 1, 1)", style=solid]; -"3 conv_b_weight_cloned" -> "16 output_1" [label="(3, 3, 1, 1)", style=solid]; -"4 conv_b_bias" -> "10 conv2d_1" [label="(3,)", style=solid]; -"5 conv_c_weight" -> "14 conv2d_2" [label="(3, 3, 1, 1)", style=solid]; -"6 conv_c_bias" -> "14 conv2d_2" [label="(3,)", style=solid]; -"7 bias" -> "11 add_" [label="(1,)", style=solid]; -"7 bias" -> "12 add__1" [label="(1,)", style=solid]; -"7 bias" -> "15 add_1" [label="(1,)", style=solid]; -"8 x" -> "9 conv2d" [label="(1, 3, 3, 3)", style=solid]; -"9 conv2d" -> "10 conv2d_1" [label="(1, 3, 3, 3)", style=solid]; -"9 conv2d" -> "11 add_" [label="(1, 3, 3, 3)", style=solid]; -"10 conv2d_1" -> "12 add__1" [label="(1, 3, 3, 3)", style=solid]; -"11 add_" -> "13 add" [label="(1, 3, 3, 3)", style=solid]; -"12 add__1" -> "13 add" [label="(1, 3, 3, 3)", style=solid]; -"13 add" -> "14 conv2d_2" [label="(1, 3, 3, 3)", style=solid]; -"14 conv2d_2" -> "15 add_1" [label="(1, 3, 3, 3)", style=solid]; -"15 add_1" -> "16 output_1" [label="(1, 3, 3, 3)", style=solid]; +"0 const" [id=0, type=get_attr]; +"1 conv_a_weight" [id=1, type=get_attr]; +"2 conv_a_bias" [id=2, type=get_attr]; +"3 conv_b_weight" [id=3, type=get_attr]; +"4 conv_b_weight_cloned" [id=4, type=clone]; +"5 conv_b_bias" [id=5, type=get_attr]; +"6 conv_c_weight" [id=6, type=get_attr]; +"7 conv_c_bias" [id=7, type=get_attr]; +"8 bias" [id=8, type=get_attr]; +"9 x" [id=9, type=input]; +"10 conv2d" [id=10, type=conv2d]; +"11 conv2d_1" [id=11, type=conv2d]; +"12 add_" [id=12, type=add_]; +"13 add__1" [id=13, type=add_]; +"14 cat" [id=14, type=cat]; +"15 conv2d_2" [id=15, type=conv2d]; +"16 add" [id=16, type=add]; +"17 output_1" [id=17, type=output]; +"0 const" -> "14 cat" [label="(1, 3, 3, 3)", style=solid]; +"1 conv_a_weight" -> "10 conv2d" [label="(3, 3, 1, 1)", style=solid]; +"2 conv_a_bias" -> "10 conv2d" [label="(3,)", style=solid]; +"3 conv_b_weight" -> "4 conv_b_weight_cloned" [label="(3, 3, 1, 1)", style=solid]; +"3 conv_b_weight" -> "11 conv2d_1" [label="(3, 3, 1, 1)", style=solid]; +"4 conv_b_weight_cloned" -> "17 output_1" [label="(3, 3, 1, 1)", style=solid]; +"5 conv_b_bias" -> "11 conv2d_1" [label="(3,)", style=solid]; +"6 conv_c_weight" -> "15 conv2d_2" [label="(3, 9, 1, 1)", style=solid]; +"7 conv_c_bias" -> "15 conv2d_2" [label="(3,)", style=solid]; +"8 bias" -> "12 add_" [label="(1,)", style=solid]; +"8 bias" -> "13 add__1" [label="(1,)", style=solid]; +"8 bias" -> "16 add" [label="(1,)", style=solid]; +"9 x" -> "10 conv2d" [label="(1, 3, 3, 3)", style=solid]; +"10 conv2d" -> "11 conv2d_1" [label="(1, 3, 3, 3)", style=solid]; +"10 conv2d" -> "12 add_" [label="(1, 3, 3, 3)", style=solid]; +"11 conv2d_1" -> "13 add__1" [label="(1, 3, 3, 3)", style=solid]; +"12 add_" -> "14 cat" [label="(1, 3, 3, 3)", style=solid]; +"13 add__1" -> "14 cat" [label="(1, 3, 3, 3)", style=solid]; +"14 cat" -> "15 conv2d_2" [label="(1, 9, 3, 3)", style=solid]; +"15 conv2d_2" -> "16 add" [label="(1, 3, 3, 3)", style=solid]; +"16 add" -> "17 output_1" [label="(1, 3, 3, 3)", style=solid]; } diff --git a/tests/torch/data/reference_graphs/fx/transformed/output_insertion_conv2d_6_0.dot b/tests/torch/data/reference_graphs/fx/transformed/output_insertion_conv2d_6_0.dot index 3a35ddf39cf..54c024ac085 100644 --- a/tests/torch/data/reference_graphs/fx/transformed/output_insertion_conv2d_6_0.dot +++ b/tests/torch/data/reference_graphs/fx/transformed/output_insertion_conv2d_6_0.dot @@ -1,39 +1,41 @@ strict digraph { -"0 conv_a_weight" [id=0, type=get_attr]; -"1 conv_a_bias" [id=1, type=get_attr]; -"2 conv_b_weight" [id=2, type=get_attr]; -"3 conv_b_bias" [id=3, type=get_attr]; -"4 conv_c_weight" [id=4, type=get_attr]; -"5 conv_c_bias" [id=5, type=get_attr]; -"6 bias" [id=6, type=get_attr]; -"7 x" [id=7, type=input]; -"8 x_cloned" [id=8, type=clone]; -"9 conv2d" [id=9, type=conv2d]; -"10 conv2d_1" [id=10, type=conv2d]; -"11 add_" [id=11, type=add_]; -"12 add__1" [id=12, type=add_]; -"13 add" [id=13, type=add]; -"14 conv2d_2" [id=14, type=conv2d]; -"15 add_1" [id=15, type=add]; -"16 output_1" [id=16, type=output]; -"0 conv_a_weight" -> "9 conv2d" [label="(3, 3, 1, 1)", style=solid]; -"1 conv_a_bias" -> "9 conv2d" [label="(3,)", style=solid]; -"2 conv_b_weight" -> "10 conv2d_1" [label="(3, 3, 1, 1)", style=solid]; -"3 conv_b_bias" -> "10 conv2d_1" [label="(3,)", style=solid]; -"4 conv_c_weight" -> "14 conv2d_2" [label="(3, 3, 1, 1)", style=solid]; -"5 conv_c_bias" -> "14 conv2d_2" [label="(3,)", style=solid]; -"6 bias" -> "11 add_" [label="(1,)", style=solid]; -"6 bias" -> "12 add__1" [label="(1,)", style=solid]; -"6 bias" -> "15 add_1" [label="(1,)", style=solid]; -"7 x" -> "8 x_cloned" [label="(1, 3, 3, 3)", style=solid]; -"7 x" -> "9 conv2d" [label="(1, 3, 3, 3)", style=solid]; -"8 x_cloned" -> "16 output_1" [label="(1, 3, 3, 3)", style=solid]; -"9 conv2d" -> "10 conv2d_1" [label="(1, 3, 3, 3)", style=solid]; -"9 conv2d" -> "11 add_" [label="(1, 3, 3, 3)", style=solid]; -"10 conv2d_1" -> "12 add__1" [label="(1, 3, 3, 3)", style=solid]; -"11 add_" -> "13 add" [label="(1, 3, 3, 3)", style=solid]; -"12 add__1" -> "13 add" [label="(1, 3, 3, 3)", style=solid]; -"13 add" -> "14 conv2d_2" [label="(1, 3, 3, 3)", style=solid]; -"14 conv2d_2" -> "15 add_1" [label="(1, 3, 3, 3)", style=solid]; -"15 add_1" -> "16 output_1" [label="(1, 3, 3, 3)", style=solid]; +"0 const" [id=0, type=get_attr]; +"1 conv_a_weight" [id=1, type=get_attr]; +"2 conv_a_bias" [id=2, type=get_attr]; +"3 conv_b_weight" [id=3, type=get_attr]; +"4 conv_b_bias" [id=4, type=get_attr]; +"5 conv_c_weight" [id=5, type=get_attr]; +"6 conv_c_bias" [id=6, type=get_attr]; +"7 bias" [id=7, type=get_attr]; +"8 x" [id=8, type=input]; +"9 x_cloned" [id=9, type=clone]; +"10 conv2d" [id=10, type=conv2d]; +"11 conv2d_1" [id=11, type=conv2d]; +"12 add_" [id=12, type=add_]; +"13 add__1" [id=13, type=add_]; +"14 cat" [id=14, type=cat]; +"15 conv2d_2" [id=15, type=conv2d]; +"16 add" [id=16, type=add]; +"17 output_1" [id=17, type=output]; +"0 const" -> "14 cat" [label="(1, 3, 3, 3)", style=solid]; +"1 conv_a_weight" -> "10 conv2d" [label="(3, 3, 1, 1)", style=solid]; +"2 conv_a_bias" -> "10 conv2d" [label="(3,)", style=solid]; +"3 conv_b_weight" -> "11 conv2d_1" [label="(3, 3, 1, 1)", style=solid]; +"4 conv_b_bias" -> "11 conv2d_1" [label="(3,)", style=solid]; +"5 conv_c_weight" -> "15 conv2d_2" [label="(3, 9, 1, 1)", style=solid]; +"6 conv_c_bias" -> "15 conv2d_2" [label="(3,)", style=solid]; +"7 bias" -> "12 add_" [label="(1,)", style=solid]; +"7 bias" -> "13 add__1" [label="(1,)", style=solid]; +"7 bias" -> "16 add" [label="(1,)", style=solid]; +"8 x" -> "9 x_cloned" [label="(1, 3, 3, 3)", style=solid]; +"8 x" -> "10 conv2d" [label="(1, 3, 3, 3)", style=solid]; +"9 x_cloned" -> "17 output_1" [label="(1, 3, 3, 3)", style=solid]; +"10 conv2d" -> "11 conv2d_1" [label="(1, 3, 3, 3)", style=solid]; +"10 conv2d" -> "12 add_" [label="(1, 3, 3, 3)", style=solid]; +"11 conv2d_1" -> "13 add__1" [label="(1, 3, 3, 3)", style=solid]; +"12 add_" -> "14 cat" [label="(1, 3, 3, 3)", style=solid]; +"13 add__1" -> "14 cat" [label="(1, 3, 3, 3)", style=solid]; +"14 cat" -> "15 conv2d_2" [label="(1, 9, 3, 3)", style=solid]; +"15 conv2d_2" -> "16 add" [label="(1, 3, 3, 3)", style=solid]; +"16 add" -> "17 output_1" [label="(1, 3, 3, 3)", style=solid]; } diff --git a/tests/torch/data/reference_graphs/fx/transformed/output_insertion_conv2d_6_1.dot b/tests/torch/data/reference_graphs/fx/transformed/output_insertion_conv2d_6_1.dot index 290db19728b..bebe10601ad 100644 --- a/tests/torch/data/reference_graphs/fx/transformed/output_insertion_conv2d_6_1.dot +++ b/tests/torch/data/reference_graphs/fx/transformed/output_insertion_conv2d_6_1.dot @@ -1,39 +1,41 @@ strict digraph { -"0 conv_a_weight" [id=0, type=get_attr]; -"1 conv_a_weight_cloned" [id=1, type=clone]; -"2 conv_a_bias" [id=2, type=get_attr]; -"3 conv_b_weight" [id=3, type=get_attr]; -"4 conv_b_bias" [id=4, type=get_attr]; -"5 conv_c_weight" [id=5, type=get_attr]; -"6 conv_c_bias" [id=6, type=get_attr]; -"7 bias" [id=7, type=get_attr]; -"8 x" [id=8, type=input]; -"9 conv2d" [id=9, type=conv2d]; -"10 conv2d_1" [id=10, type=conv2d]; -"11 add_" [id=11, type=add_]; -"12 add__1" [id=12, type=add_]; -"13 add" [id=13, type=add]; -"14 conv2d_2" [id=14, type=conv2d]; -"15 add_1" [id=15, type=add]; -"16 output_1" [id=16, type=output]; -"0 conv_a_weight" -> "1 conv_a_weight_cloned" [label="(3, 3, 1, 1)", style=solid]; -"0 conv_a_weight" -> "9 conv2d" [label="(3, 3, 1, 1)", style=solid]; -"1 conv_a_weight_cloned" -> "16 output_1" [label="(3, 3, 1, 1)", style=solid]; -"2 conv_a_bias" -> "9 conv2d" [label="(3,)", style=solid]; -"3 conv_b_weight" -> "10 conv2d_1" [label="(3, 3, 1, 1)", style=solid]; -"4 conv_b_bias" -> "10 conv2d_1" [label="(3,)", style=solid]; -"5 conv_c_weight" -> "14 conv2d_2" [label="(3, 3, 1, 1)", style=solid]; -"6 conv_c_bias" -> "14 conv2d_2" [label="(3,)", style=solid]; -"7 bias" -> "11 add_" [label="(1,)", style=solid]; -"7 bias" -> "12 add__1" [label="(1,)", style=solid]; -"7 bias" -> "15 add_1" [label="(1,)", style=solid]; -"8 x" -> "9 conv2d" [label="(1, 3, 3, 3)", style=solid]; -"9 conv2d" -> "10 conv2d_1" [label="(1, 3, 3, 3)", style=solid]; -"9 conv2d" -> "11 add_" [label="(1, 3, 3, 3)", style=solid]; -"10 conv2d_1" -> "12 add__1" [label="(1, 3, 3, 3)", style=solid]; -"11 add_" -> "13 add" [label="(1, 3, 3, 3)", style=solid]; -"12 add__1" -> "13 add" [label="(1, 3, 3, 3)", style=solid]; -"13 add" -> "14 conv2d_2" [label="(1, 3, 3, 3)", style=solid]; -"14 conv2d_2" -> "15 add_1" [label="(1, 3, 3, 3)", style=solid]; -"15 add_1" -> "16 output_1" [label="(1, 3, 3, 3)", style=solid]; +"0 const" [id=0, type=get_attr]; +"1 conv_a_weight" [id=1, type=get_attr]; +"2 conv_a_weight_cloned" [id=2, type=clone]; +"3 conv_a_bias" [id=3, type=get_attr]; +"4 conv_b_weight" [id=4, type=get_attr]; +"5 conv_b_bias" [id=5, type=get_attr]; +"6 conv_c_weight" [id=6, type=get_attr]; +"7 conv_c_bias" [id=7, type=get_attr]; +"8 bias" [id=8, type=get_attr]; +"9 x" [id=9, type=input]; +"10 conv2d" [id=10, type=conv2d]; +"11 conv2d_1" [id=11, type=conv2d]; +"12 add_" [id=12, type=add_]; +"13 add__1" [id=13, type=add_]; +"14 cat" [id=14, type=cat]; +"15 conv2d_2" [id=15, type=conv2d]; +"16 add" [id=16, type=add]; +"17 output_1" [id=17, type=output]; +"0 const" -> "14 cat" [label="(1, 3, 3, 3)", style=solid]; +"1 conv_a_weight" -> "2 conv_a_weight_cloned" [label="(3, 3, 1, 1)", style=solid]; +"1 conv_a_weight" -> "10 conv2d" [label="(3, 3, 1, 1)", style=solid]; +"2 conv_a_weight_cloned" -> "17 output_1" [label="(3, 3, 1, 1)", style=solid]; +"3 conv_a_bias" -> "10 conv2d" [label="(3,)", style=solid]; +"4 conv_b_weight" -> "11 conv2d_1" [label="(3, 3, 1, 1)", style=solid]; +"5 conv_b_bias" -> "11 conv2d_1" [label="(3,)", style=solid]; +"6 conv_c_weight" -> "15 conv2d_2" [label="(3, 9, 1, 1)", style=solid]; +"7 conv_c_bias" -> "15 conv2d_2" [label="(3,)", style=solid]; +"8 bias" -> "12 add_" [label="(1,)", style=solid]; +"8 bias" -> "13 add__1" [label="(1,)", style=solid]; +"8 bias" -> "16 add" [label="(1,)", style=solid]; +"9 x" -> "10 conv2d" [label="(1, 3, 3, 3)", style=solid]; +"10 conv2d" -> "11 conv2d_1" [label="(1, 3, 3, 3)", style=solid]; +"10 conv2d" -> "12 add_" [label="(1, 3, 3, 3)", style=solid]; +"11 conv2d_1" -> "13 add__1" [label="(1, 3, 3, 3)", style=solid]; +"12 add_" -> "14 cat" [label="(1, 3, 3, 3)", style=solid]; +"13 add__1" -> "14 cat" [label="(1, 3, 3, 3)", style=solid]; +"14 cat" -> "15 conv2d_2" [label="(1, 9, 3, 3)", style=solid]; +"15 conv2d_2" -> "16 add" [label="(1, 3, 3, 3)", style=solid]; +"16 add" -> "17 output_1" [label="(1, 3, 3, 3)", style=solid]; } diff --git a/tests/torch/data/reference_graphs/fx/transformed/output_insertion_conv2d_7_None.dot b/tests/torch/data/reference_graphs/fx/transformed/output_insertion_conv2d_7_None.dot index ff9313f1acc..92a8d41d58e 100644 --- a/tests/torch/data/reference_graphs/fx/transformed/output_insertion_conv2d_7_None.dot +++ b/tests/torch/data/reference_graphs/fx/transformed/output_insertion_conv2d_7_None.dot @@ -1,39 +1,41 @@ strict digraph { -"0 conv_a_weight" [id=0, type=get_attr]; -"1 conv_a_bias" [id=1, type=get_attr]; -"2 conv_b_weight" [id=2, type=get_attr]; -"3 conv_b_bias" [id=3, type=get_attr]; -"4 conv_c_weight" [id=4, type=get_attr]; -"5 conv_c_bias" [id=5, type=get_attr]; -"6 bias" [id=6, type=get_attr]; -"7 x" [id=7, type=input]; -"8 conv2d" [id=8, type=conv2d]; -"9 conv2d_cloned" [id=9, type=clone]; -"10 conv2d_1" [id=10, type=conv2d]; -"11 add_" [id=11, type=add_]; -"12 add__1" [id=12, type=add_]; -"13 add" [id=13, type=add]; -"14 conv2d_2" [id=14, type=conv2d]; -"15 add_1" [id=15, type=add]; -"16 output_1" [id=16, type=output]; -"0 conv_a_weight" -> "8 conv2d" [label="(3, 3, 1, 1)", style=solid]; -"1 conv_a_bias" -> "8 conv2d" [label="(3,)", style=solid]; -"2 conv_b_weight" -> "10 conv2d_1" [label="(3, 3, 1, 1)", style=solid]; -"3 conv_b_bias" -> "10 conv2d_1" [label="(3,)", style=solid]; -"4 conv_c_weight" -> "14 conv2d_2" [label="(3, 3, 1, 1)", style=solid]; -"5 conv_c_bias" -> "14 conv2d_2" [label="(3,)", style=solid]; -"6 bias" -> "11 add_" [label="(1,)", style=solid]; -"6 bias" -> "12 add__1" [label="(1,)", style=solid]; -"6 bias" -> "15 add_1" [label="(1,)", style=solid]; -"7 x" -> "8 conv2d" [label="(1, 3, 3, 3)", style=solid]; -"8 conv2d" -> "9 conv2d_cloned" [label="(1, 3, 3, 3)", style=solid]; -"8 conv2d" -> "10 conv2d_1" [label="(1, 3, 3, 3)", style=solid]; -"8 conv2d" -> "11 add_" [label="(1, 3, 3, 3)", style=solid]; -"9 conv2d_cloned" -> "16 output_1" [label="(1, 3, 3, 3)", style=solid]; -"10 conv2d_1" -> "12 add__1" [label="(1, 3, 3, 3)", style=solid]; -"11 add_" -> "13 add" [label="(1, 3, 3, 3)", style=solid]; -"12 add__1" -> "13 add" [label="(1, 3, 3, 3)", style=solid]; -"13 add" -> "14 conv2d_2" [label="(1, 3, 3, 3)", style=solid]; -"14 conv2d_2" -> "15 add_1" [label="(1, 3, 3, 3)", style=solid]; -"15 add_1" -> "16 output_1" [label="(1, 3, 3, 3)", style=solid]; +"0 const" [id=0, type=get_attr]; +"1 conv_a_weight" [id=1, type=get_attr]; +"2 conv_a_bias" [id=2, type=get_attr]; +"3 conv_b_weight" [id=3, type=get_attr]; +"4 conv_b_bias" [id=4, type=get_attr]; +"5 conv_c_weight" [id=5, type=get_attr]; +"6 conv_c_bias" [id=6, type=get_attr]; +"7 bias" [id=7, type=get_attr]; +"8 x" [id=8, type=input]; +"9 conv2d" [id=9, type=conv2d]; +"10 conv2d_cloned" [id=10, type=clone]; +"11 conv2d_1" [id=11, type=conv2d]; +"12 add_" [id=12, type=add_]; +"13 add__1" [id=13, type=add_]; +"14 cat" [id=14, type=cat]; +"15 conv2d_2" [id=15, type=conv2d]; +"16 add" [id=16, type=add]; +"17 output_1" [id=17, type=output]; +"0 const" -> "14 cat" [label="(1, 3, 3, 3)", style=solid]; +"1 conv_a_weight" -> "9 conv2d" [label="(3, 3, 1, 1)", style=solid]; +"2 conv_a_bias" -> "9 conv2d" [label="(3,)", style=solid]; +"3 conv_b_weight" -> "11 conv2d_1" [label="(3, 3, 1, 1)", style=solid]; +"4 conv_b_bias" -> "11 conv2d_1" [label="(3,)", style=solid]; +"5 conv_c_weight" -> "15 conv2d_2" [label="(3, 9, 1, 1)", style=solid]; +"6 conv_c_bias" -> "15 conv2d_2" [label="(3,)", style=solid]; +"7 bias" -> "12 add_" [label="(1,)", style=solid]; +"7 bias" -> "13 add__1" [label="(1,)", style=solid]; +"7 bias" -> "16 add" [label="(1,)", style=solid]; +"8 x" -> "9 conv2d" [label="(1, 3, 3, 3)", style=solid]; +"9 conv2d" -> "10 conv2d_cloned" [label="(1, 3, 3, 3)", style=solid]; +"9 conv2d" -> "11 conv2d_1" [label="(1, 3, 3, 3)", style=solid]; +"9 conv2d" -> "12 add_" [label="(1, 3, 3, 3)", style=solid]; +"10 conv2d_cloned" -> "17 output_1" [label="(1, 3, 3, 3)", style=solid]; +"11 conv2d_1" -> "13 add__1" [label="(1, 3, 3, 3)", style=solid]; +"12 add_" -> "14 cat" [label="(1, 3, 3, 3)", style=solid]; +"13 add__1" -> "14 cat" [label="(1, 3, 3, 3)", style=solid]; +"14 cat" -> "15 conv2d_2" [label="(1, 9, 3, 3)", style=solid]; +"15 conv2d_2" -> "16 add" [label="(1, 3, 3, 3)", style=solid]; +"16 add" -> "17 output_1" [label="(1, 3, 3, 3)", style=solid]; } diff --git a/tests/torch/data/reference_graphs/fx/transformed/qdq_insert_cat_5_2_per_channel.dot b/tests/torch/data/reference_graphs/fx/transformed/qdq_insert_cat_5_2_per_channel.dot new file mode 100644 index 00000000000..fb6a2f6716c --- /dev/null +++ b/tests/torch/data/reference_graphs/fx/transformed/qdq_insert_cat_5_2_per_channel.dot @@ -0,0 +1,48 @@ +strict digraph { +"0 const" [id=0, type=get_attr]; +"1 conv_a_weight" [id=1, type=get_attr]; +"2 conv_a_bias" [id=2, type=get_attr]; +"3 conv_b_weight" [id=3, type=get_attr]; +"4 conv_b_bias" [id=4, type=get_attr]; +"5 conv_c_weight" [id=5, type=get_attr]; +"6 conv_c_bias" [id=6, type=get_attr]; +"7 bias" [id=7, type=get_attr]; +"8 x" [id=8, type=input]; +"9 conv2d" [id=9, type=conv2d]; +"10 conv2d_1" [id=10, type=conv2d]; +"11 add_" [id=11, type=add_]; +"12 add__1" [id=12, type=add_]; +"13 cat_scale_0" [id=13, type=get_attr]; +"14 cat_zero_point_0" [id=14, type=get_attr]; +"15 quantize_per_channel_default" [id=15, type=quantize_per_channel]; +"16 dequantize_per_channel_default" [id=16, type=dequantize_per_channel]; +"17 cat" [id=17, type=cat]; +"18 conv2d_2" [id=18, type=conv2d]; +"19 add" [id=19, type=add]; +"20 output_1" [id=20, type=output]; +"0 const" -> "15 quantize_per_channel_default" [label="(1, 3, 3, 3)", style=solid]; +"1 conv_a_weight" -> "9 conv2d" [label="(3, 3, 1, 1)", style=solid]; +"2 conv_a_bias" -> "9 conv2d" [label="(3,)", style=solid]; +"3 conv_b_weight" -> "10 conv2d_1" [label="(3, 3, 1, 1)", style=solid]; +"4 conv_b_bias" -> "10 conv2d_1" [label="(3,)", style=solid]; +"5 conv_c_weight" -> "18 conv2d_2" [label="(3, 9, 1, 1)", style=solid]; +"6 conv_c_bias" -> "18 conv2d_2" [label="(3,)", style=solid]; +"7 bias" -> "11 add_" [label="(1,)", style=solid]; +"7 bias" -> "12 add__1" [label="(1,)", style=solid]; +"7 bias" -> "19 add" [label="(1,)", style=solid]; +"8 x" -> "9 conv2d" [label="(1, 3, 3, 3)", style=solid]; +"9 conv2d" -> "10 conv2d_1" [label="(1, 3, 3, 3)", style=solid]; +"9 conv2d" -> "11 add_" [label="(1, 3, 3, 3)", style=solid]; +"10 conv2d_1" -> "12 add__1" [label="(1, 3, 3, 3)", style=solid]; +"11 add_" -> "17 cat" [label="(1, 3, 3, 3)", style=solid]; +"12 add__1" -> "17 cat" [label="(1, 3, 3, 3)", style=solid]; +"13 cat_scale_0" -> "15 quantize_per_channel_default" [label="(1,)", style=solid]; +"13 cat_scale_0" -> "16 dequantize_per_channel_default" [label="(1,)", style=solid]; +"14 cat_zero_point_0" -> "15 quantize_per_channel_default" [label="(1,)", style=solid]; +"14 cat_zero_point_0" -> "16 dequantize_per_channel_default" [label="(1,)", style=solid]; +"15 quantize_per_channel_default" -> "16 dequantize_per_channel_default" [label="(1, 3, 3, 3)", style=solid]; +"16 dequantize_per_channel_default" -> "17 cat" [label="(1, 3, 3, 3)", style=solid]; +"17 cat" -> "18 conv2d_2" [label="(1, 9, 3, 3)", style=solid]; +"18 conv2d_2" -> "19 add" [label="(1, 3, 3, 3)", style=solid]; +"19 add" -> "20 output_1" [label="(1, 3, 3, 3)", style=solid]; +} diff --git a/tests/torch/data/reference_graphs/fx/transformed/qdq_insert_cat_5_2_per_tensor.dot b/tests/torch/data/reference_graphs/fx/transformed/qdq_insert_cat_5_2_per_tensor.dot new file mode 100644 index 00000000000..58aa60d5f44 --- /dev/null +++ b/tests/torch/data/reference_graphs/fx/transformed/qdq_insert_cat_5_2_per_tensor.dot @@ -0,0 +1,42 @@ +strict digraph { +"0 const" [id=0, type=get_attr]; +"1 conv_a_weight" [id=1, type=get_attr]; +"2 conv_a_bias" [id=2, type=get_attr]; +"3 conv_b_weight" [id=3, type=get_attr]; +"4 conv_b_bias" [id=4, type=get_attr]; +"5 conv_c_weight" [id=5, type=get_attr]; +"6 conv_c_bias" [id=6, type=get_attr]; +"7 bias" [id=7, type=get_attr]; +"8 x" [id=8, type=input]; +"9 conv2d" [id=9, type=conv2d]; +"10 conv2d_1" [id=10, type=conv2d]; +"11 add_" [id=11, type=add_]; +"12 add__1" [id=12, type=add_]; +"13 quantize_per_tensor_default" [id=13, type=quantize_per_tensor]; +"14 dequantize_per_tensor_default" [id=14, type=dequantize_per_tensor]; +"15 cat" [id=15, type=cat]; +"16 conv2d_2" [id=16, type=conv2d]; +"17 add" [id=17, type=add]; +"18 output_1" [id=18, type=output]; +"0 const" -> "13 quantize_per_tensor_default" [label="(1, 3, 3, 3)", style=solid]; +"1 conv_a_weight" -> "9 conv2d" [label="(3, 3, 1, 1)", style=solid]; +"2 conv_a_bias" -> "9 conv2d" [label="(3,)", style=solid]; +"3 conv_b_weight" -> "10 conv2d_1" [label="(3, 3, 1, 1)", style=solid]; +"4 conv_b_bias" -> "10 conv2d_1" [label="(3,)", style=solid]; +"5 conv_c_weight" -> "16 conv2d_2" [label="(3, 9, 1, 1)", style=solid]; +"6 conv_c_bias" -> "16 conv2d_2" [label="(3,)", style=solid]; +"7 bias" -> "11 add_" [label="(1,)", style=solid]; +"7 bias" -> "12 add__1" [label="(1,)", style=solid]; +"7 bias" -> "17 add" [label="(1,)", style=solid]; +"8 x" -> "9 conv2d" [label="(1, 3, 3, 3)", style=solid]; +"9 conv2d" -> "10 conv2d_1" [label="(1, 3, 3, 3)", style=solid]; +"9 conv2d" -> "11 add_" [label="(1, 3, 3, 3)", style=solid]; +"10 conv2d_1" -> "12 add__1" [label="(1, 3, 3, 3)", style=solid]; +"11 add_" -> "15 cat" [label="(1, 3, 3, 3)", style=solid]; +"12 add__1" -> "15 cat" [label="(1, 3, 3, 3)", style=solid]; +"13 quantize_per_tensor_default" -> "14 dequantize_per_tensor_default" [label="(1, 3, 3, 3)", style=solid]; +"14 dequantize_per_tensor_default" -> "15 cat" [label="(1, 3, 3, 3)", style=solid]; +"15 cat" -> "16 conv2d_2" [label="(1, 9, 3, 3)", style=solid]; +"16 conv2d_2" -> "17 add" [label="(1, 3, 3, 3)", style=solid]; +"17 add" -> "18 output_1" [label="(1, 3, 3, 3)", style=solid]; +} diff --git a/tests/torch/data/reference_graphs/fx/transformed/qdq_insert_cat_6_1_per_channel.dot b/tests/torch/data/reference_graphs/fx/transformed/qdq_insert_cat_6_1_per_channel.dot new file mode 100644 index 00000000000..a8226a3f505 --- /dev/null +++ b/tests/torch/data/reference_graphs/fx/transformed/qdq_insert_cat_6_1_per_channel.dot @@ -0,0 +1,48 @@ +strict digraph { +"0 const" [id=0, type=get_attr]; +"1 conv_a_weight" [id=1, type=get_attr]; +"2 conv_a_bias" [id=2, type=get_attr]; +"3 conv_b_weight" [id=3, type=get_attr]; +"4 conv_b_bias" [id=4, type=get_attr]; +"5 conv_c_weight" [id=5, type=get_attr]; +"6 conv_c_bias" [id=6, type=get_attr]; +"7 bias" [id=7, type=get_attr]; +"8 x" [id=8, type=input]; +"9 conv2d" [id=9, type=conv2d]; +"10 conv2d_1" [id=10, type=conv2d]; +"11 add_" [id=11, type=add_]; +"12 add__1" [id=12, type=add_]; +"13 cat_scale_0" [id=13, type=get_attr]; +"14 cat_zero_point_0" [id=14, type=get_attr]; +"15 quantize_per_channel_default" [id=15, type=quantize_per_channel]; +"16 dequantize_per_channel_default" [id=16, type=dequantize_per_channel]; +"17 cat" [id=17, type=cat]; +"18 conv2d_2" [id=18, type=conv2d]; +"19 add" [id=19, type=add]; +"20 output_1" [id=20, type=output]; +"0 const" -> "17 cat" [label="(1, 3, 3, 3)", style=solid]; +"1 conv_a_weight" -> "9 conv2d" [label="(3, 3, 1, 1)", style=solid]; +"2 conv_a_bias" -> "9 conv2d" [label="(3,)", style=solid]; +"3 conv_b_weight" -> "10 conv2d_1" [label="(3, 3, 1, 1)", style=solid]; +"4 conv_b_bias" -> "10 conv2d_1" [label="(3,)", style=solid]; +"5 conv_c_weight" -> "18 conv2d_2" [label="(3, 9, 1, 1)", style=solid]; +"6 conv_c_bias" -> "18 conv2d_2" [label="(3,)", style=solid]; +"7 bias" -> "11 add_" [label="(1,)", style=solid]; +"7 bias" -> "12 add__1" [label="(1,)", style=solid]; +"7 bias" -> "19 add" [label="(1,)", style=solid]; +"8 x" -> "9 conv2d" [label="(1, 3, 3, 3)", style=solid]; +"9 conv2d" -> "10 conv2d_1" [label="(1, 3, 3, 3)", style=solid]; +"9 conv2d" -> "11 add_" [label="(1, 3, 3, 3)", style=solid]; +"10 conv2d_1" -> "12 add__1" [label="(1, 3, 3, 3)", style=solid]; +"11 add_" -> "17 cat" [label="(1, 3, 3, 3)", style=solid]; +"12 add__1" -> "15 quantize_per_channel_default" [label="(1, 3, 3, 3)", style=solid]; +"13 cat_scale_0" -> "15 quantize_per_channel_default" [label="(1,)", style=solid]; +"13 cat_scale_0" -> "16 dequantize_per_channel_default" [label="(1,)", style=solid]; +"14 cat_zero_point_0" -> "15 quantize_per_channel_default" [label="(1,)", style=solid]; +"14 cat_zero_point_0" -> "16 dequantize_per_channel_default" [label="(1,)", style=solid]; +"15 quantize_per_channel_default" -> "16 dequantize_per_channel_default" [label="(1, 3, 3, 3)", style=solid]; +"16 dequantize_per_channel_default" -> "17 cat" [label="(1, 3, 3, 3)", style=solid]; +"17 cat" -> "18 conv2d_2" [label="(1, 9, 3, 3)", style=solid]; +"18 conv2d_2" -> "19 add" [label="(1, 3, 3, 3)", style=solid]; +"19 add" -> "20 output_1" [label="(1, 3, 3, 3)", style=solid]; +} diff --git a/tests/torch/data/reference_graphs/fx/transformed/qdq_insert_cat_6_1_per_tensor.dot b/tests/torch/data/reference_graphs/fx/transformed/qdq_insert_cat_6_1_per_tensor.dot new file mode 100644 index 00000000000..3739095a9ff --- /dev/null +++ b/tests/torch/data/reference_graphs/fx/transformed/qdq_insert_cat_6_1_per_tensor.dot @@ -0,0 +1,42 @@ +strict digraph { +"0 const" [id=0, type=get_attr]; +"1 conv_a_weight" [id=1, type=get_attr]; +"2 conv_a_bias" [id=2, type=get_attr]; +"3 conv_b_weight" [id=3, type=get_attr]; +"4 conv_b_bias" [id=4, type=get_attr]; +"5 conv_c_weight" [id=5, type=get_attr]; +"6 conv_c_bias" [id=6, type=get_attr]; +"7 bias" [id=7, type=get_attr]; +"8 x" [id=8, type=input]; +"9 conv2d" [id=9, type=conv2d]; +"10 conv2d_1" [id=10, type=conv2d]; +"11 add_" [id=11, type=add_]; +"12 add__1" [id=12, type=add_]; +"13 quantize_per_tensor_default" [id=13, type=quantize_per_tensor]; +"14 dequantize_per_tensor_default" [id=14, type=dequantize_per_tensor]; +"15 cat" [id=15, type=cat]; +"16 conv2d_2" [id=16, type=conv2d]; +"17 add" [id=17, type=add]; +"18 output_1" [id=18, type=output]; +"0 const" -> "15 cat" [label="(1, 3, 3, 3)", style=solid]; +"1 conv_a_weight" -> "9 conv2d" [label="(3, 3, 1, 1)", style=solid]; +"2 conv_a_bias" -> "9 conv2d" [label="(3,)", style=solid]; +"3 conv_b_weight" -> "10 conv2d_1" [label="(3, 3, 1, 1)", style=solid]; +"4 conv_b_bias" -> "10 conv2d_1" [label="(3,)", style=solid]; +"5 conv_c_weight" -> "16 conv2d_2" [label="(3, 9, 1, 1)", style=solid]; +"6 conv_c_bias" -> "16 conv2d_2" [label="(3,)", style=solid]; +"7 bias" -> "11 add_" [label="(1,)", style=solid]; +"7 bias" -> "12 add__1" [label="(1,)", style=solid]; +"7 bias" -> "17 add" [label="(1,)", style=solid]; +"8 x" -> "9 conv2d" [label="(1, 3, 3, 3)", style=solid]; +"9 conv2d" -> "10 conv2d_1" [label="(1, 3, 3, 3)", style=solid]; +"9 conv2d" -> "11 add_" [label="(1, 3, 3, 3)", style=solid]; +"10 conv2d_1" -> "12 add__1" [label="(1, 3, 3, 3)", style=solid]; +"11 add_" -> "15 cat" [label="(1, 3, 3, 3)", style=solid]; +"12 add__1" -> "13 quantize_per_tensor_default" [label="(1, 3, 3, 3)", style=solid]; +"13 quantize_per_tensor_default" -> "14 dequantize_per_tensor_default" [label="(1, 3, 3, 3)", style=solid]; +"14 dequantize_per_tensor_default" -> "15 cat" [label="(1, 3, 3, 3)", style=solid]; +"15 cat" -> "16 conv2d_2" [label="(1, 9, 3, 3)", style=solid]; +"16 conv2d_2" -> "17 add" [label="(1, 3, 3, 3)", style=solid]; +"17 add" -> "18 output_1" [label="(1, 3, 3, 3)", style=solid]; +} diff --git a/tests/torch/data/reference_graphs/fx/transformed/qdq_insert_conv2d_1_5_1_per_channel.dot b/tests/torch/data/reference_graphs/fx/transformed/qdq_insert_conv2d_1_5_1_per_channel.dot index 1843f556984..539b9aba7ba 100644 --- a/tests/torch/data/reference_graphs/fx/transformed/qdq_insert_conv2d_1_5_1_per_channel.dot +++ b/tests/torch/data/reference_graphs/fx/transformed/qdq_insert_conv2d_1_5_1_per_channel.dot @@ -1,46 +1,48 @@ strict digraph { -"0 conv_a_weight" [id=0, type=get_attr]; -"1 conv_a_bias" [id=1, type=get_attr]; -"2 conv_b_weight" [id=2, type=get_attr]; -"3 conv_b_bias" [id=3, type=get_attr]; -"4 conv_c_weight" [id=4, type=get_attr]; -"5 conv_c_bias" [id=5, type=get_attr]; -"6 bias" [id=6, type=get_attr]; -"7 x" [id=7, type=input]; -"8 conv2d" [id=8, type=conv2d]; -"9 conv2d_1_scale_0" [id=9, type=get_attr]; -"10 conv2d_1_zero_point_0" [id=10, type=get_attr]; -"11 quantize_per_channel_default" [id=11, type=quantize_per_channel]; -"12 dequantize_per_channel_default" [id=12, type=dequantize_per_channel]; -"13 conv2d_1" [id=13, type=conv2d]; -"14 add_" [id=14, type=add_]; -"15 add__1" [id=15, type=add_]; -"16 add" [id=16, type=add]; -"17 conv2d_2" [id=17, type=conv2d]; -"18 add_1" [id=18, type=add]; -"19 output_1" [id=19, type=output]; -"0 conv_a_weight" -> "8 conv2d" [label="(3, 3, 1, 1)", style=solid]; -"1 conv_a_bias" -> "8 conv2d" [label="(3,)", style=solid]; -"2 conv_b_weight" -> "11 quantize_per_channel_default" [label="(3, 3, 1, 1)", style=solid]; -"3 conv_b_bias" -> "13 conv2d_1" [label="(3,)", style=solid]; -"4 conv_c_weight" -> "17 conv2d_2" [label="(3, 3, 1, 1)", style=solid]; -"5 conv_c_bias" -> "17 conv2d_2" [label="(3,)", style=solid]; -"6 bias" -> "14 add_" [label="(1,)", style=solid]; -"6 bias" -> "15 add__1" [label="(1,)", style=solid]; -"6 bias" -> "18 add_1" [label="(1,)", style=solid]; -"7 x" -> "8 conv2d" [label="(1, 3, 3, 3)", style=solid]; -"8 conv2d" -> "13 conv2d_1" [label="(1, 3, 3, 3)", style=solid]; -"8 conv2d" -> "14 add_" [label="(1, 3, 3, 3)", style=solid]; -"9 conv2d_1_scale_0" -> "11 quantize_per_channel_default" [label="(1,)", style=solid]; -"9 conv2d_1_scale_0" -> "12 dequantize_per_channel_default" [label="(1,)", style=solid]; -"10 conv2d_1_zero_point_0" -> "11 quantize_per_channel_default" [label="(1,)", style=solid]; -"10 conv2d_1_zero_point_0" -> "12 dequantize_per_channel_default" [label="(1,)", style=solid]; -"11 quantize_per_channel_default" -> "12 dequantize_per_channel_default" [label="(3, 3, 1, 1)", style=solid]; -"12 dequantize_per_channel_default" -> "13 conv2d_1" [label="(3, 3, 1, 1)", style=solid]; -"13 conv2d_1" -> "15 add__1" [label="(1, 3, 3, 3)", style=solid]; -"14 add_" -> "16 add" [label="(1, 3, 3, 3)", style=solid]; -"15 add__1" -> "16 add" [label="(1, 3, 3, 3)", style=solid]; -"16 add" -> "17 conv2d_2" [label="(1, 3, 3, 3)", style=solid]; -"17 conv2d_2" -> "18 add_1" [label="(1, 3, 3, 3)", style=solid]; -"18 add_1" -> "19 output_1" [label="(1, 3, 3, 3)", style=solid]; +"0 const" [id=0, type=get_attr]; +"1 conv_a_weight" [id=1, type=get_attr]; +"2 conv_a_bias" [id=2, type=get_attr]; +"3 conv_b_weight" [id=3, type=get_attr]; +"4 conv_b_bias" [id=4, type=get_attr]; +"5 conv_c_weight" [id=5, type=get_attr]; +"6 conv_c_bias" [id=6, type=get_attr]; +"7 bias" [id=7, type=get_attr]; +"8 x" [id=8, type=input]; +"9 conv2d" [id=9, type=conv2d]; +"10 conv2d_1_scale_0" [id=10, type=get_attr]; +"11 conv2d_1_zero_point_0" [id=11, type=get_attr]; +"12 quantize_per_channel_default" [id=12, type=quantize_per_channel]; +"13 dequantize_per_channel_default" [id=13, type=dequantize_per_channel]; +"14 conv2d_1" [id=14, type=conv2d]; +"15 add_" [id=15, type=add_]; +"16 add__1" [id=16, type=add_]; +"17 cat" [id=17, type=cat]; +"18 conv2d_2" [id=18, type=conv2d]; +"19 add" [id=19, type=add]; +"20 output_1" [id=20, type=output]; +"0 const" -> "17 cat" [label="(1, 3, 3, 3)", style=solid]; +"1 conv_a_weight" -> "9 conv2d" [label="(3, 3, 1, 1)", style=solid]; +"2 conv_a_bias" -> "9 conv2d" [label="(3,)", style=solid]; +"3 conv_b_weight" -> "12 quantize_per_channel_default" [label="(3, 3, 1, 1)", style=solid]; +"4 conv_b_bias" -> "14 conv2d_1" [label="(3,)", style=solid]; +"5 conv_c_weight" -> "18 conv2d_2" [label="(3, 9, 1, 1)", style=solid]; +"6 conv_c_bias" -> "18 conv2d_2" [label="(3,)", style=solid]; +"7 bias" -> "15 add_" [label="(1,)", style=solid]; +"7 bias" -> "16 add__1" [label="(1,)", style=solid]; +"7 bias" -> "19 add" [label="(1,)", style=solid]; +"8 x" -> "9 conv2d" [label="(1, 3, 3, 3)", style=solid]; +"9 conv2d" -> "14 conv2d_1" [label="(1, 3, 3, 3)", style=solid]; +"9 conv2d" -> "15 add_" [label="(1, 3, 3, 3)", style=solid]; +"10 conv2d_1_scale_0" -> "12 quantize_per_channel_default" [label="(1,)", style=solid]; +"10 conv2d_1_scale_0" -> "13 dequantize_per_channel_default" [label="(1,)", style=solid]; +"11 conv2d_1_zero_point_0" -> "12 quantize_per_channel_default" [label="(1,)", style=solid]; +"11 conv2d_1_zero_point_0" -> "13 dequantize_per_channel_default" [label="(1,)", style=solid]; +"12 quantize_per_channel_default" -> "13 dequantize_per_channel_default" [label="(3, 3, 1, 1)", style=solid]; +"13 dequantize_per_channel_default" -> "14 conv2d_1" [label="(3, 3, 1, 1)", style=solid]; +"14 conv2d_1" -> "16 add__1" [label="(1, 3, 3, 3)", style=solid]; +"15 add_" -> "17 cat" [label="(1, 3, 3, 3)", style=solid]; +"16 add__1" -> "17 cat" [label="(1, 3, 3, 3)", style=solid]; +"17 cat" -> "18 conv2d_2" [label="(1, 9, 3, 3)", style=solid]; +"18 conv2d_2" -> "19 add" [label="(1, 3, 3, 3)", style=solid]; +"19 add" -> "20 output_1" [label="(1, 3, 3, 3)", style=solid]; } diff --git a/tests/torch/data/reference_graphs/fx/transformed/qdq_insert_conv2d_1_5_1_per_tensor.dot b/tests/torch/data/reference_graphs/fx/transformed/qdq_insert_conv2d_1_5_1_per_tensor.dot index 1d8a5e629c0..5a55809fbfd 100644 --- a/tests/torch/data/reference_graphs/fx/transformed/qdq_insert_conv2d_1_5_1_per_tensor.dot +++ b/tests/torch/data/reference_graphs/fx/transformed/qdq_insert_conv2d_1_5_1_per_tensor.dot @@ -1,40 +1,42 @@ strict digraph { -"0 conv_a_weight" [id=0, type=get_attr]; -"1 conv_a_bias" [id=1, type=get_attr]; -"2 conv_b_weight" [id=2, type=get_attr]; -"3 conv_b_bias" [id=3, type=get_attr]; -"4 conv_c_weight" [id=4, type=get_attr]; -"5 conv_c_bias" [id=5, type=get_attr]; -"6 bias" [id=6, type=get_attr]; -"7 x" [id=7, type=input]; -"8 conv2d" [id=8, type=conv2d]; -"9 quantize_per_tensor_default" [id=9, type=quantize_per_tensor]; -"10 dequantize_per_tensor_default" [id=10, type=dequantize_per_tensor]; -"11 conv2d_1" [id=11, type=conv2d]; -"12 add_" [id=12, type=add_]; -"13 add__1" [id=13, type=add_]; -"14 add" [id=14, type=add]; -"15 conv2d_2" [id=15, type=conv2d]; -"16 add_1" [id=16, type=add]; -"17 output_1" [id=17, type=output]; -"0 conv_a_weight" -> "8 conv2d" [label="(3, 3, 1, 1)", style=solid]; -"1 conv_a_bias" -> "8 conv2d" [label="(3,)", style=solid]; -"2 conv_b_weight" -> "9 quantize_per_tensor_default" [label="(3, 3, 1, 1)", style=solid]; -"3 conv_b_bias" -> "11 conv2d_1" [label="(3,)", style=solid]; -"4 conv_c_weight" -> "15 conv2d_2" [label="(3, 3, 1, 1)", style=solid]; -"5 conv_c_bias" -> "15 conv2d_2" [label="(3,)", style=solid]; -"6 bias" -> "12 add_" [label="(1,)", style=solid]; -"6 bias" -> "13 add__1" [label="(1,)", style=solid]; -"6 bias" -> "16 add_1" [label="(1,)", style=solid]; -"7 x" -> "8 conv2d" [label="(1, 3, 3, 3)", style=solid]; -"8 conv2d" -> "11 conv2d_1" [label="(1, 3, 3, 3)", style=solid]; -"8 conv2d" -> "12 add_" [label="(1, 3, 3, 3)", style=solid]; -"9 quantize_per_tensor_default" -> "10 dequantize_per_tensor_default" [label="(3, 3, 1, 1)", style=solid]; -"10 dequantize_per_tensor_default" -> "11 conv2d_1" [label="(3, 3, 1, 1)", style=solid]; -"11 conv2d_1" -> "13 add__1" [label="(1, 3, 3, 3)", style=solid]; -"12 add_" -> "14 add" [label="(1, 3, 3, 3)", style=solid]; -"13 add__1" -> "14 add" [label="(1, 3, 3, 3)", style=solid]; -"14 add" -> "15 conv2d_2" [label="(1, 3, 3, 3)", style=solid]; -"15 conv2d_2" -> "16 add_1" [label="(1, 3, 3, 3)", style=solid]; -"16 add_1" -> "17 output_1" [label="(1, 3, 3, 3)", style=solid]; +"0 const" [id=0, type=get_attr]; +"1 conv_a_weight" [id=1, type=get_attr]; +"2 conv_a_bias" [id=2, type=get_attr]; +"3 conv_b_weight" [id=3, type=get_attr]; +"4 conv_b_bias" [id=4, type=get_attr]; +"5 conv_c_weight" [id=5, type=get_attr]; +"6 conv_c_bias" [id=6, type=get_attr]; +"7 bias" [id=7, type=get_attr]; +"8 x" [id=8, type=input]; +"9 conv2d" [id=9, type=conv2d]; +"10 quantize_per_tensor_default" [id=10, type=quantize_per_tensor]; +"11 dequantize_per_tensor_default" [id=11, type=dequantize_per_tensor]; +"12 conv2d_1" [id=12, type=conv2d]; +"13 add_" [id=13, type=add_]; +"14 add__1" [id=14, type=add_]; +"15 cat" [id=15, type=cat]; +"16 conv2d_2" [id=16, type=conv2d]; +"17 add" [id=17, type=add]; +"18 output_1" [id=18, type=output]; +"0 const" -> "15 cat" [label="(1, 3, 3, 3)", style=solid]; +"1 conv_a_weight" -> "9 conv2d" [label="(3, 3, 1, 1)", style=solid]; +"2 conv_a_bias" -> "9 conv2d" [label="(3,)", style=solid]; +"3 conv_b_weight" -> "10 quantize_per_tensor_default" [label="(3, 3, 1, 1)", style=solid]; +"4 conv_b_bias" -> "12 conv2d_1" [label="(3,)", style=solid]; +"5 conv_c_weight" -> "16 conv2d_2" [label="(3, 9, 1, 1)", style=solid]; +"6 conv_c_bias" -> "16 conv2d_2" [label="(3,)", style=solid]; +"7 bias" -> "13 add_" [label="(1,)", style=solid]; +"7 bias" -> "14 add__1" [label="(1,)", style=solid]; +"7 bias" -> "17 add" [label="(1,)", style=solid]; +"8 x" -> "9 conv2d" [label="(1, 3, 3, 3)", style=solid]; +"9 conv2d" -> "12 conv2d_1" [label="(1, 3, 3, 3)", style=solid]; +"9 conv2d" -> "13 add_" [label="(1, 3, 3, 3)", style=solid]; +"10 quantize_per_tensor_default" -> "11 dequantize_per_tensor_default" [label="(3, 3, 1, 1)", style=solid]; +"11 dequantize_per_tensor_default" -> "12 conv2d_1" [label="(3, 3, 1, 1)", style=solid]; +"12 conv2d_1" -> "14 add__1" [label="(1, 3, 3, 3)", style=solid]; +"13 add_" -> "15 cat" [label="(1, 3, 3, 3)", style=solid]; +"14 add__1" -> "15 cat" [label="(1, 3, 3, 3)", style=solid]; +"15 cat" -> "16 conv2d_2" [label="(1, 9, 3, 3)", style=solid]; +"16 conv2d_2" -> "17 add" [label="(1, 3, 3, 3)", style=solid]; +"17 add" -> "18 output_1" [label="(1, 3, 3, 3)", style=solid]; } diff --git a/tests/torch/data/reference_graphs/fx/transformed/qdq_insert_conv2d_6_0_per_channel.dot b/tests/torch/data/reference_graphs/fx/transformed/qdq_insert_conv2d_6_0_per_channel.dot index 50e8261f594..422540fb1e9 100644 --- a/tests/torch/data/reference_graphs/fx/transformed/qdq_insert_conv2d_6_0_per_channel.dot +++ b/tests/torch/data/reference_graphs/fx/transformed/qdq_insert_conv2d_6_0_per_channel.dot @@ -1,46 +1,48 @@ strict digraph { -"0 conv_a_weight" [id=0, type=get_attr]; -"1 conv_a_bias" [id=1, type=get_attr]; -"2 conv_b_weight" [id=2, type=get_attr]; -"3 conv_b_bias" [id=3, type=get_attr]; -"4 conv_c_weight" [id=4, type=get_attr]; -"5 conv_c_bias" [id=5, type=get_attr]; -"6 bias" [id=6, type=get_attr]; -"7 x" [id=7, type=input]; -"8 conv2d_scale_0" [id=8, type=get_attr]; -"9 conv2d_zero_point_0" [id=9, type=get_attr]; -"10 quantize_per_channel_default" [id=10, type=quantize_per_channel]; -"11 dequantize_per_channel_default" [id=11, type=dequantize_per_channel]; -"12 conv2d" [id=12, type=conv2d]; -"13 conv2d_1" [id=13, type=conv2d]; -"14 add_" [id=14, type=add_]; -"15 add__1" [id=15, type=add_]; -"16 add" [id=16, type=add]; -"17 conv2d_2" [id=17, type=conv2d]; -"18 add_1" [id=18, type=add]; -"19 output_1" [id=19, type=output]; -"0 conv_a_weight" -> "12 conv2d" [label="(3, 3, 1, 1)", style=solid]; -"1 conv_a_bias" -> "12 conv2d" [label="(3,)", style=solid]; -"2 conv_b_weight" -> "13 conv2d_1" [label="(3, 3, 1, 1)", style=solid]; -"3 conv_b_bias" -> "13 conv2d_1" [label="(3,)", style=solid]; -"4 conv_c_weight" -> "17 conv2d_2" [label="(3, 3, 1, 1)", style=solid]; -"5 conv_c_bias" -> "17 conv2d_2" [label="(3,)", style=solid]; -"6 bias" -> "14 add_" [label="(1,)", style=solid]; -"6 bias" -> "15 add__1" [label="(1,)", style=solid]; -"6 bias" -> "18 add_1" [label="(1,)", style=solid]; -"7 x" -> "10 quantize_per_channel_default" [label="(1, 3, 3, 3)", style=solid]; -"8 conv2d_scale_0" -> "10 quantize_per_channel_default" [label="(1,)", style=solid]; -"8 conv2d_scale_0" -> "11 dequantize_per_channel_default" [label="(1,)", style=solid]; -"9 conv2d_zero_point_0" -> "10 quantize_per_channel_default" [label="(1,)", style=solid]; -"9 conv2d_zero_point_0" -> "11 dequantize_per_channel_default" [label="(1,)", style=solid]; -"10 quantize_per_channel_default" -> "11 dequantize_per_channel_default" [label="(1, 3, 3, 3)", style=solid]; -"11 dequantize_per_channel_default" -> "12 conv2d" [label="(1, 3, 3, 3)", style=solid]; -"12 conv2d" -> "13 conv2d_1" [label="(1, 3, 3, 3)", style=solid]; -"12 conv2d" -> "14 add_" [label="(1, 3, 3, 3)", style=solid]; -"13 conv2d_1" -> "15 add__1" [label="(1, 3, 3, 3)", style=solid]; -"14 add_" -> "16 add" [label="(1, 3, 3, 3)", style=solid]; -"15 add__1" -> "16 add" [label="(1, 3, 3, 3)", style=solid]; -"16 add" -> "17 conv2d_2" [label="(1, 3, 3, 3)", style=solid]; -"17 conv2d_2" -> "18 add_1" [label="(1, 3, 3, 3)", style=solid]; -"18 add_1" -> "19 output_1" [label="(1, 3, 3, 3)", style=solid]; +"0 const" [id=0, type=get_attr]; +"1 conv_a_weight" [id=1, type=get_attr]; +"2 conv_a_bias" [id=2, type=get_attr]; +"3 conv_b_weight" [id=3, type=get_attr]; +"4 conv_b_bias" [id=4, type=get_attr]; +"5 conv_c_weight" [id=5, type=get_attr]; +"6 conv_c_bias" [id=6, type=get_attr]; +"7 bias" [id=7, type=get_attr]; +"8 x" [id=8, type=input]; +"9 conv2d_scale_0" [id=9, type=get_attr]; +"10 conv2d_zero_point_0" [id=10, type=get_attr]; +"11 quantize_per_channel_default" [id=11, type=quantize_per_channel]; +"12 dequantize_per_channel_default" [id=12, type=dequantize_per_channel]; +"13 conv2d" [id=13, type=conv2d]; +"14 conv2d_1" [id=14, type=conv2d]; +"15 add_" [id=15, type=add_]; +"16 add__1" [id=16, type=add_]; +"17 cat" [id=17, type=cat]; +"18 conv2d_2" [id=18, type=conv2d]; +"19 add" [id=19, type=add]; +"20 output_1" [id=20, type=output]; +"0 const" -> "17 cat" [label="(1, 3, 3, 3)", style=solid]; +"1 conv_a_weight" -> "13 conv2d" [label="(3, 3, 1, 1)", style=solid]; +"2 conv_a_bias" -> "13 conv2d" [label="(3,)", style=solid]; +"3 conv_b_weight" -> "14 conv2d_1" [label="(3, 3, 1, 1)", style=solid]; +"4 conv_b_bias" -> "14 conv2d_1" [label="(3,)", style=solid]; +"5 conv_c_weight" -> "18 conv2d_2" [label="(3, 9, 1, 1)", style=solid]; +"6 conv_c_bias" -> "18 conv2d_2" [label="(3,)", style=solid]; +"7 bias" -> "15 add_" [label="(1,)", style=solid]; +"7 bias" -> "16 add__1" [label="(1,)", style=solid]; +"7 bias" -> "19 add" [label="(1,)", style=solid]; +"8 x" -> "11 quantize_per_channel_default" [label="(1, 3, 3, 3)", style=solid]; +"9 conv2d_scale_0" -> "11 quantize_per_channel_default" [label="(1,)", style=solid]; +"9 conv2d_scale_0" -> "12 dequantize_per_channel_default" [label="(1,)", style=solid]; +"10 conv2d_zero_point_0" -> "11 quantize_per_channel_default" [label="(1,)", style=solid]; +"10 conv2d_zero_point_0" -> "12 dequantize_per_channel_default" [label="(1,)", style=solid]; +"11 quantize_per_channel_default" -> "12 dequantize_per_channel_default" [label="(1, 3, 3, 3)", style=solid]; +"12 dequantize_per_channel_default" -> "13 conv2d" [label="(1, 3, 3, 3)", style=solid]; +"13 conv2d" -> "14 conv2d_1" [label="(1, 3, 3, 3)", style=solid]; +"13 conv2d" -> "15 add_" [label="(1, 3, 3, 3)", style=solid]; +"14 conv2d_1" -> "16 add__1" [label="(1, 3, 3, 3)", style=solid]; +"15 add_" -> "17 cat" [label="(1, 3, 3, 3)", style=solid]; +"16 add__1" -> "17 cat" [label="(1, 3, 3, 3)", style=solid]; +"17 cat" -> "18 conv2d_2" [label="(1, 9, 3, 3)", style=solid]; +"18 conv2d_2" -> "19 add" [label="(1, 3, 3, 3)", style=solid]; +"19 add" -> "20 output_1" [label="(1, 3, 3, 3)", style=solid]; } diff --git a/tests/torch/data/reference_graphs/fx/transformed/qdq_insert_conv2d_6_0_per_tensor.dot b/tests/torch/data/reference_graphs/fx/transformed/qdq_insert_conv2d_6_0_per_tensor.dot index 01bfb0da32b..0efaf214887 100644 --- a/tests/torch/data/reference_graphs/fx/transformed/qdq_insert_conv2d_6_0_per_tensor.dot +++ b/tests/torch/data/reference_graphs/fx/transformed/qdq_insert_conv2d_6_0_per_tensor.dot @@ -1,40 +1,42 @@ strict digraph { -"0 conv_a_weight" [id=0, type=get_attr]; -"1 conv_a_bias" [id=1, type=get_attr]; -"2 conv_b_weight" [id=2, type=get_attr]; -"3 conv_b_bias" [id=3, type=get_attr]; -"4 conv_c_weight" [id=4, type=get_attr]; -"5 conv_c_bias" [id=5, type=get_attr]; -"6 bias" [id=6, type=get_attr]; -"7 x" [id=7, type=input]; -"8 quantize_per_tensor_default" [id=8, type=quantize_per_tensor]; -"9 dequantize_per_tensor_default" [id=9, type=dequantize_per_tensor]; -"10 conv2d" [id=10, type=conv2d]; -"11 conv2d_1" [id=11, type=conv2d]; -"12 add_" [id=12, type=add_]; -"13 add__1" [id=13, type=add_]; -"14 add" [id=14, type=add]; -"15 conv2d_2" [id=15, type=conv2d]; -"16 add_1" [id=16, type=add]; -"17 output_1" [id=17, type=output]; -"0 conv_a_weight" -> "10 conv2d" [label="(3, 3, 1, 1)", style=solid]; -"1 conv_a_bias" -> "10 conv2d" [label="(3,)", style=solid]; -"2 conv_b_weight" -> "11 conv2d_1" [label="(3, 3, 1, 1)", style=solid]; -"3 conv_b_bias" -> "11 conv2d_1" [label="(3,)", style=solid]; -"4 conv_c_weight" -> "15 conv2d_2" [label="(3, 3, 1, 1)", style=solid]; -"5 conv_c_bias" -> "15 conv2d_2" [label="(3,)", style=solid]; -"6 bias" -> "12 add_" [label="(1,)", style=solid]; -"6 bias" -> "13 add__1" [label="(1,)", style=solid]; -"6 bias" -> "16 add_1" [label="(1,)", style=solid]; -"7 x" -> "8 quantize_per_tensor_default" [label="(1, 3, 3, 3)", style=solid]; -"8 quantize_per_tensor_default" -> "9 dequantize_per_tensor_default" [label="(1, 3, 3, 3)", style=solid]; -"9 dequantize_per_tensor_default" -> "10 conv2d" [label="(1, 3, 3, 3)", style=solid]; -"10 conv2d" -> "11 conv2d_1" [label="(1, 3, 3, 3)", style=solid]; -"10 conv2d" -> "12 add_" [label="(1, 3, 3, 3)", style=solid]; -"11 conv2d_1" -> "13 add__1" [label="(1, 3, 3, 3)", style=solid]; -"12 add_" -> "14 add" [label="(1, 3, 3, 3)", style=solid]; -"13 add__1" -> "14 add" [label="(1, 3, 3, 3)", style=solid]; -"14 add" -> "15 conv2d_2" [label="(1, 3, 3, 3)", style=solid]; -"15 conv2d_2" -> "16 add_1" [label="(1, 3, 3, 3)", style=solid]; -"16 add_1" -> "17 output_1" [label="(1, 3, 3, 3)", style=solid]; +"0 const" [id=0, type=get_attr]; +"1 conv_a_weight" [id=1, type=get_attr]; +"2 conv_a_bias" [id=2, type=get_attr]; +"3 conv_b_weight" [id=3, type=get_attr]; +"4 conv_b_bias" [id=4, type=get_attr]; +"5 conv_c_weight" [id=5, type=get_attr]; +"6 conv_c_bias" [id=6, type=get_attr]; +"7 bias" [id=7, type=get_attr]; +"8 x" [id=8, type=input]; +"9 quantize_per_tensor_default" [id=9, type=quantize_per_tensor]; +"10 dequantize_per_tensor_default" [id=10, type=dequantize_per_tensor]; +"11 conv2d" [id=11, type=conv2d]; +"12 conv2d_1" [id=12, type=conv2d]; +"13 add_" [id=13, type=add_]; +"14 add__1" [id=14, type=add_]; +"15 cat" [id=15, type=cat]; +"16 conv2d_2" [id=16, type=conv2d]; +"17 add" [id=17, type=add]; +"18 output_1" [id=18, type=output]; +"0 const" -> "15 cat" [label="(1, 3, 3, 3)", style=solid]; +"1 conv_a_weight" -> "11 conv2d" [label="(3, 3, 1, 1)", style=solid]; +"2 conv_a_bias" -> "11 conv2d" [label="(3,)", style=solid]; +"3 conv_b_weight" -> "12 conv2d_1" [label="(3, 3, 1, 1)", style=solid]; +"4 conv_b_bias" -> "12 conv2d_1" [label="(3,)", style=solid]; +"5 conv_c_weight" -> "16 conv2d_2" [label="(3, 9, 1, 1)", style=solid]; +"6 conv_c_bias" -> "16 conv2d_2" [label="(3,)", style=solid]; +"7 bias" -> "13 add_" [label="(1,)", style=solid]; +"7 bias" -> "14 add__1" [label="(1,)", style=solid]; +"7 bias" -> "17 add" [label="(1,)", style=solid]; +"8 x" -> "9 quantize_per_tensor_default" [label="(1, 3, 3, 3)", style=solid]; +"9 quantize_per_tensor_default" -> "10 dequantize_per_tensor_default" [label="(1, 3, 3, 3)", style=solid]; +"10 dequantize_per_tensor_default" -> "11 conv2d" [label="(1, 3, 3, 3)", style=solid]; +"11 conv2d" -> "12 conv2d_1" [label="(1, 3, 3, 3)", style=solid]; +"11 conv2d" -> "13 add_" [label="(1, 3, 3, 3)", style=solid]; +"12 conv2d_1" -> "14 add__1" [label="(1, 3, 3, 3)", style=solid]; +"13 add_" -> "15 cat" [label="(1, 3, 3, 3)", style=solid]; +"14 add__1" -> "15 cat" [label="(1, 3, 3, 3)", style=solid]; +"15 cat" -> "16 conv2d_2" [label="(1, 9, 3, 3)", style=solid]; +"16 conv2d_2" -> "17 add" [label="(1, 3, 3, 3)", style=solid]; +"17 add" -> "18 output_1" [label="(1, 3, 3, 3)", style=solid]; } diff --git a/tests/torch/data/reference_graphs/fx/transformed/qdq_insert_conv2d_6_1_per_channel.dot b/tests/torch/data/reference_graphs/fx/transformed/qdq_insert_conv2d_6_1_per_channel.dot index 1c8808cc8c6..00881a953e9 100644 --- a/tests/torch/data/reference_graphs/fx/transformed/qdq_insert_conv2d_6_1_per_channel.dot +++ b/tests/torch/data/reference_graphs/fx/transformed/qdq_insert_conv2d_6_1_per_channel.dot @@ -1,46 +1,48 @@ strict digraph { -"0 conv_a_weight" [id=0, type=get_attr]; -"1 conv_a_bias" [id=1, type=get_attr]; -"2 conv_b_weight" [id=2, type=get_attr]; -"3 conv_b_bias" [id=3, type=get_attr]; -"4 conv_c_weight" [id=4, type=get_attr]; -"5 conv_c_bias" [id=5, type=get_attr]; -"6 bias" [id=6, type=get_attr]; -"7 x" [id=7, type=input]; -"8 conv2d_scale_0" [id=8, type=get_attr]; -"9 conv2d_zero_point_0" [id=9, type=get_attr]; -"10 quantize_per_channel_default" [id=10, type=quantize_per_channel]; -"11 dequantize_per_channel_default" [id=11, type=dequantize_per_channel]; -"12 conv2d" [id=12, type=conv2d]; -"13 conv2d_1" [id=13, type=conv2d]; -"14 add_" [id=14, type=add_]; -"15 add__1" [id=15, type=add_]; -"16 add" [id=16, type=add]; -"17 conv2d_2" [id=17, type=conv2d]; -"18 add_1" [id=18, type=add]; -"19 output_1" [id=19, type=output]; -"0 conv_a_weight" -> "10 quantize_per_channel_default" [label="(3, 3, 1, 1)", style=solid]; -"1 conv_a_bias" -> "12 conv2d" [label="(3,)", style=solid]; -"2 conv_b_weight" -> "13 conv2d_1" [label="(3, 3, 1, 1)", style=solid]; -"3 conv_b_bias" -> "13 conv2d_1" [label="(3,)", style=solid]; -"4 conv_c_weight" -> "17 conv2d_2" [label="(3, 3, 1, 1)", style=solid]; -"5 conv_c_bias" -> "17 conv2d_2" [label="(3,)", style=solid]; -"6 bias" -> "14 add_" [label="(1,)", style=solid]; -"6 bias" -> "15 add__1" [label="(1,)", style=solid]; -"6 bias" -> "18 add_1" [label="(1,)", style=solid]; -"7 x" -> "12 conv2d" [label="(1, 3, 3, 3)", style=solid]; -"8 conv2d_scale_0" -> "10 quantize_per_channel_default" [label="(1,)", style=solid]; -"8 conv2d_scale_0" -> "11 dequantize_per_channel_default" [label="(1,)", style=solid]; -"9 conv2d_zero_point_0" -> "10 quantize_per_channel_default" [label="(1,)", style=solid]; -"9 conv2d_zero_point_0" -> "11 dequantize_per_channel_default" [label="(1,)", style=solid]; -"10 quantize_per_channel_default" -> "11 dequantize_per_channel_default" [label="(3, 3, 1, 1)", style=solid]; -"11 dequantize_per_channel_default" -> "12 conv2d" [label="(3, 3, 1, 1)", style=solid]; -"12 conv2d" -> "13 conv2d_1" [label="(1, 3, 3, 3)", style=solid]; -"12 conv2d" -> "14 add_" [label="(1, 3, 3, 3)", style=solid]; -"13 conv2d_1" -> "15 add__1" [label="(1, 3, 3, 3)", style=solid]; -"14 add_" -> "16 add" [label="(1, 3, 3, 3)", style=solid]; -"15 add__1" -> "16 add" [label="(1, 3, 3, 3)", style=solid]; -"16 add" -> "17 conv2d_2" [label="(1, 3, 3, 3)", style=solid]; -"17 conv2d_2" -> "18 add_1" [label="(1, 3, 3, 3)", style=solid]; -"18 add_1" -> "19 output_1" [label="(1, 3, 3, 3)", style=solid]; +"0 const" [id=0, type=get_attr]; +"1 conv_a_weight" [id=1, type=get_attr]; +"2 conv_a_bias" [id=2, type=get_attr]; +"3 conv_b_weight" [id=3, type=get_attr]; +"4 conv_b_bias" [id=4, type=get_attr]; +"5 conv_c_weight" [id=5, type=get_attr]; +"6 conv_c_bias" [id=6, type=get_attr]; +"7 bias" [id=7, type=get_attr]; +"8 x" [id=8, type=input]; +"9 conv2d_scale_0" [id=9, type=get_attr]; +"10 conv2d_zero_point_0" [id=10, type=get_attr]; +"11 quantize_per_channel_default" [id=11, type=quantize_per_channel]; +"12 dequantize_per_channel_default" [id=12, type=dequantize_per_channel]; +"13 conv2d" [id=13, type=conv2d]; +"14 conv2d_1" [id=14, type=conv2d]; +"15 add_" [id=15, type=add_]; +"16 add__1" [id=16, type=add_]; +"17 cat" [id=17, type=cat]; +"18 conv2d_2" [id=18, type=conv2d]; +"19 add" [id=19, type=add]; +"20 output_1" [id=20, type=output]; +"0 const" -> "17 cat" [label="(1, 3, 3, 3)", style=solid]; +"1 conv_a_weight" -> "11 quantize_per_channel_default" [label="(3, 3, 1, 1)", style=solid]; +"2 conv_a_bias" -> "13 conv2d" [label="(3,)", style=solid]; +"3 conv_b_weight" -> "14 conv2d_1" [label="(3, 3, 1, 1)", style=solid]; +"4 conv_b_bias" -> "14 conv2d_1" [label="(3,)", style=solid]; +"5 conv_c_weight" -> "18 conv2d_2" [label="(3, 9, 1, 1)", style=solid]; +"6 conv_c_bias" -> "18 conv2d_2" [label="(3,)", style=solid]; +"7 bias" -> "15 add_" [label="(1,)", style=solid]; +"7 bias" -> "16 add__1" [label="(1,)", style=solid]; +"7 bias" -> "19 add" [label="(1,)", style=solid]; +"8 x" -> "13 conv2d" [label="(1, 3, 3, 3)", style=solid]; +"9 conv2d_scale_0" -> "11 quantize_per_channel_default" [label="(1,)", style=solid]; +"9 conv2d_scale_0" -> "12 dequantize_per_channel_default" [label="(1,)", style=solid]; +"10 conv2d_zero_point_0" -> "11 quantize_per_channel_default" [label="(1,)", style=solid]; +"10 conv2d_zero_point_0" -> "12 dequantize_per_channel_default" [label="(1,)", style=solid]; +"11 quantize_per_channel_default" -> "12 dequantize_per_channel_default" [label="(3, 3, 1, 1)", style=solid]; +"12 dequantize_per_channel_default" -> "13 conv2d" [label="(3, 3, 1, 1)", style=solid]; +"13 conv2d" -> "14 conv2d_1" [label="(1, 3, 3, 3)", style=solid]; +"13 conv2d" -> "15 add_" [label="(1, 3, 3, 3)", style=solid]; +"14 conv2d_1" -> "16 add__1" [label="(1, 3, 3, 3)", style=solid]; +"15 add_" -> "17 cat" [label="(1, 3, 3, 3)", style=solid]; +"16 add__1" -> "17 cat" [label="(1, 3, 3, 3)", style=solid]; +"17 cat" -> "18 conv2d_2" [label="(1, 9, 3, 3)", style=solid]; +"18 conv2d_2" -> "19 add" [label="(1, 3, 3, 3)", style=solid]; +"19 add" -> "20 output_1" [label="(1, 3, 3, 3)", style=solid]; } diff --git a/tests/torch/data/reference_graphs/fx/transformed/qdq_insert_conv2d_6_1_per_tensor.dot b/tests/torch/data/reference_graphs/fx/transformed/qdq_insert_conv2d_6_1_per_tensor.dot index 62ab139712c..889befe8911 100644 --- a/tests/torch/data/reference_graphs/fx/transformed/qdq_insert_conv2d_6_1_per_tensor.dot +++ b/tests/torch/data/reference_graphs/fx/transformed/qdq_insert_conv2d_6_1_per_tensor.dot @@ -1,40 +1,42 @@ strict digraph { -"0 conv_a_weight" [id=0, type=get_attr]; -"1 conv_a_bias" [id=1, type=get_attr]; -"2 conv_b_weight" [id=2, type=get_attr]; -"3 conv_b_bias" [id=3, type=get_attr]; -"4 conv_c_weight" [id=4, type=get_attr]; -"5 conv_c_bias" [id=5, type=get_attr]; -"6 bias" [id=6, type=get_attr]; -"7 x" [id=7, type=input]; -"8 quantize_per_tensor_default" [id=8, type=quantize_per_tensor]; -"9 dequantize_per_tensor_default" [id=9, type=dequantize_per_tensor]; -"10 conv2d" [id=10, type=conv2d]; -"11 conv2d_1" [id=11, type=conv2d]; -"12 add_" [id=12, type=add_]; -"13 add__1" [id=13, type=add_]; -"14 add" [id=14, type=add]; -"15 conv2d_2" [id=15, type=conv2d]; -"16 add_1" [id=16, type=add]; -"17 output_1" [id=17, type=output]; -"0 conv_a_weight" -> "8 quantize_per_tensor_default" [label="(3, 3, 1, 1)", style=solid]; -"1 conv_a_bias" -> "10 conv2d" [label="(3,)", style=solid]; -"2 conv_b_weight" -> "11 conv2d_1" [label="(3, 3, 1, 1)", style=solid]; -"3 conv_b_bias" -> "11 conv2d_1" [label="(3,)", style=solid]; -"4 conv_c_weight" -> "15 conv2d_2" [label="(3, 3, 1, 1)", style=solid]; -"5 conv_c_bias" -> "15 conv2d_2" [label="(3,)", style=solid]; -"6 bias" -> "12 add_" [label="(1,)", style=solid]; -"6 bias" -> "13 add__1" [label="(1,)", style=solid]; -"6 bias" -> "16 add_1" [label="(1,)", style=solid]; -"7 x" -> "10 conv2d" [label="(1, 3, 3, 3)", style=solid]; -"8 quantize_per_tensor_default" -> "9 dequantize_per_tensor_default" [label="(3, 3, 1, 1)", style=solid]; -"9 dequantize_per_tensor_default" -> "10 conv2d" [label="(3, 3, 1, 1)", style=solid]; -"10 conv2d" -> "11 conv2d_1" [label="(1, 3, 3, 3)", style=solid]; -"10 conv2d" -> "12 add_" [label="(1, 3, 3, 3)", style=solid]; -"11 conv2d_1" -> "13 add__1" [label="(1, 3, 3, 3)", style=solid]; -"12 add_" -> "14 add" [label="(1, 3, 3, 3)", style=solid]; -"13 add__1" -> "14 add" [label="(1, 3, 3, 3)", style=solid]; -"14 add" -> "15 conv2d_2" [label="(1, 3, 3, 3)", style=solid]; -"15 conv2d_2" -> "16 add_1" [label="(1, 3, 3, 3)", style=solid]; -"16 add_1" -> "17 output_1" [label="(1, 3, 3, 3)", style=solid]; +"0 const" [id=0, type=get_attr]; +"1 conv_a_weight" [id=1, type=get_attr]; +"2 conv_a_bias" [id=2, type=get_attr]; +"3 conv_b_weight" [id=3, type=get_attr]; +"4 conv_b_bias" [id=4, type=get_attr]; +"5 conv_c_weight" [id=5, type=get_attr]; +"6 conv_c_bias" [id=6, type=get_attr]; +"7 bias" [id=7, type=get_attr]; +"8 x" [id=8, type=input]; +"9 quantize_per_tensor_default" [id=9, type=quantize_per_tensor]; +"10 dequantize_per_tensor_default" [id=10, type=dequantize_per_tensor]; +"11 conv2d" [id=11, type=conv2d]; +"12 conv2d_1" [id=12, type=conv2d]; +"13 add_" [id=13, type=add_]; +"14 add__1" [id=14, type=add_]; +"15 cat" [id=15, type=cat]; +"16 conv2d_2" [id=16, type=conv2d]; +"17 add" [id=17, type=add]; +"18 output_1" [id=18, type=output]; +"0 const" -> "15 cat" [label="(1, 3, 3, 3)", style=solid]; +"1 conv_a_weight" -> "9 quantize_per_tensor_default" [label="(3, 3, 1, 1)", style=solid]; +"2 conv_a_bias" -> "11 conv2d" [label="(3,)", style=solid]; +"3 conv_b_weight" -> "12 conv2d_1" [label="(3, 3, 1, 1)", style=solid]; +"4 conv_b_bias" -> "12 conv2d_1" [label="(3,)", style=solid]; +"5 conv_c_weight" -> "16 conv2d_2" [label="(3, 9, 1, 1)", style=solid]; +"6 conv_c_bias" -> "16 conv2d_2" [label="(3,)", style=solid]; +"7 bias" -> "13 add_" [label="(1,)", style=solid]; +"7 bias" -> "14 add__1" [label="(1,)", style=solid]; +"7 bias" -> "17 add" [label="(1,)", style=solid]; +"8 x" -> "11 conv2d" [label="(1, 3, 3, 3)", style=solid]; +"9 quantize_per_tensor_default" -> "10 dequantize_per_tensor_default" [label="(3, 3, 1, 1)", style=solid]; +"10 dequantize_per_tensor_default" -> "11 conv2d" [label="(3, 3, 1, 1)", style=solid]; +"11 conv2d" -> "12 conv2d_1" [label="(1, 3, 3, 3)", style=solid]; +"11 conv2d" -> "13 add_" [label="(1, 3, 3, 3)", style=solid]; +"12 conv2d_1" -> "14 add__1" [label="(1, 3, 3, 3)", style=solid]; +"13 add_" -> "15 cat" [label="(1, 3, 3, 3)", style=solid]; +"14 add__1" -> "15 cat" [label="(1, 3, 3, 3)", style=solid]; +"15 cat" -> "16 conv2d_2" [label="(1, 9, 3, 3)", style=solid]; +"16 conv2d_2" -> "17 add" [label="(1, 3, 3, 3)", style=solid]; +"17 add" -> "18 output_1" [label="(1, 3, 3, 3)", style=solid]; } diff --git a/tests/torch/data/reference_graphs/fx/transformed/qdq_insert_conv2d_7_None_per_channel.dot b/tests/torch/data/reference_graphs/fx/transformed/qdq_insert_conv2d_7_None_per_channel.dot index 5c9e516a456..0b7d122a946 100644 --- a/tests/torch/data/reference_graphs/fx/transformed/qdq_insert_conv2d_7_None_per_channel.dot +++ b/tests/torch/data/reference_graphs/fx/transformed/qdq_insert_conv2d_7_None_per_channel.dot @@ -1,50 +1,52 @@ strict digraph { -"0 conv_a_weight" [id=0, type=get_attr]; -"1 conv_a_bias" [id=1, type=get_attr]; -"2 conv_b_weight" [id=2, type=get_attr]; -"3 conv_b_bias" [id=3, type=get_attr]; -"4 conv_c_weight" [id=4, type=get_attr]; -"5 conv_c_bias" [id=5, type=get_attr]; -"6 bias" [id=6, type=get_attr]; -"7 x" [id=7, type=input]; -"8 conv2d_scale_0" [id=8, type=get_attr]; -"9 conv2d_zero_point_0" [id=9, type=get_attr]; -"10 conv2d" [id=10, type=conv2d]; -"11 quantize_per_channel_default" [id=11, type=quantize_per_channel]; -"12 dequantize_per_channel_default_1" [id=12, type=dequantize_per_channel]; -"13 dequantize_per_channel_default" [id=13, type=dequantize_per_channel]; -"14 conv2d_1" [id=14, type=conv2d]; -"15 add_" [id=15, type=add_]; -"16 add__1" [id=16, type=add_]; -"17 add" [id=17, type=add]; -"18 conv2d_2" [id=18, type=conv2d]; -"19 add_1" [id=19, type=add]; -"20 output_1" [id=20, type=output]; -"0 conv_a_weight" -> "10 conv2d" [label="(3, 3, 1, 1)", style=solid]; -"1 conv_a_bias" -> "10 conv2d" [label="(3,)", style=solid]; -"2 conv_b_weight" -> "14 conv2d_1" [label="(3, 3, 1, 1)", style=solid]; -"3 conv_b_bias" -> "14 conv2d_1" [label="(3,)", style=solid]; -"4 conv_c_weight" -> "18 conv2d_2" [label="(3, 3, 1, 1)", style=solid]; -"5 conv_c_bias" -> "18 conv2d_2" [label="(3,)", style=solid]; -"6 bias" -> "15 add_" [label="(1,)", style=solid]; -"6 bias" -> "16 add__1" [label="(1,)", style=solid]; -"6 bias" -> "19 add_1" [label="(1,)", style=solid]; -"7 x" -> "10 conv2d" [label="(1, 3, 3, 3)", style=solid]; -"8 conv2d_scale_0" -> "11 quantize_per_channel_default" [label="(1,)", style=solid]; -"8 conv2d_scale_0" -> "12 dequantize_per_channel_default_1" [label="(1,)", style=solid]; -"8 conv2d_scale_0" -> "13 dequantize_per_channel_default" [label="(1,)", style=solid]; -"9 conv2d_zero_point_0" -> "11 quantize_per_channel_default" [label="(1,)", style=solid]; -"9 conv2d_zero_point_0" -> "12 dequantize_per_channel_default_1" [label="(1,)", style=solid]; -"9 conv2d_zero_point_0" -> "13 dequantize_per_channel_default" [label="(1,)", style=solid]; -"10 conv2d" -> "11 quantize_per_channel_default" [label="(1, 3, 3, 3)", style=solid]; -"11 quantize_per_channel_default" -> "12 dequantize_per_channel_default_1" [label="(1, 3, 3, 3)", style=solid]; -"11 quantize_per_channel_default" -> "13 dequantize_per_channel_default" [label="(1, 3, 3, 3)", style=solid]; -"12 dequantize_per_channel_default_1" -> "15 add_" [label="(1, 3, 3, 3)", style=solid]; -"13 dequantize_per_channel_default" -> "14 conv2d_1" [label="(1, 3, 3, 3)", style=solid]; -"14 conv2d_1" -> "16 add__1" [label="(1, 3, 3, 3)", style=solid]; -"15 add_" -> "17 add" [label="(1, 3, 3, 3)", style=solid]; -"16 add__1" -> "17 add" [label="(1, 3, 3, 3)", style=solid]; -"17 add" -> "18 conv2d_2" [label="(1, 3, 3, 3)", style=solid]; -"18 conv2d_2" -> "19 add_1" [label="(1, 3, 3, 3)", style=solid]; -"19 add_1" -> "20 output_1" [label="(1, 3, 3, 3)", style=solid]; +"0 const" [id=0, type=get_attr]; +"1 conv_a_weight" [id=1, type=get_attr]; +"2 conv_a_bias" [id=2, type=get_attr]; +"3 conv_b_weight" [id=3, type=get_attr]; +"4 conv_b_bias" [id=4, type=get_attr]; +"5 conv_c_weight" [id=5, type=get_attr]; +"6 conv_c_bias" [id=6, type=get_attr]; +"7 bias" [id=7, type=get_attr]; +"8 x" [id=8, type=input]; +"9 conv2d_scale_0" [id=9, type=get_attr]; +"10 conv2d_zero_point_0" [id=10, type=get_attr]; +"11 conv2d" [id=11, type=conv2d]; +"12 quantize_per_channel_default" [id=12, type=quantize_per_channel]; +"13 dequantize_per_channel_default_1" [id=13, type=dequantize_per_channel]; +"14 dequantize_per_channel_default" [id=14, type=dequantize_per_channel]; +"15 conv2d_1" [id=15, type=conv2d]; +"16 add_" [id=16, type=add_]; +"17 add__1" [id=17, type=add_]; +"18 cat" [id=18, type=cat]; +"19 conv2d_2" [id=19, type=conv2d]; +"20 add" [id=20, type=add]; +"21 output_1" [id=21, type=output]; +"0 const" -> "18 cat" [label="(1, 3, 3, 3)", style=solid]; +"1 conv_a_weight" -> "11 conv2d" [label="(3, 3, 1, 1)", style=solid]; +"2 conv_a_bias" -> "11 conv2d" [label="(3,)", style=solid]; +"3 conv_b_weight" -> "15 conv2d_1" [label="(3, 3, 1, 1)", style=solid]; +"4 conv_b_bias" -> "15 conv2d_1" [label="(3,)", style=solid]; +"5 conv_c_weight" -> "19 conv2d_2" [label="(3, 9, 1, 1)", style=solid]; +"6 conv_c_bias" -> "19 conv2d_2" [label="(3,)", style=solid]; +"7 bias" -> "16 add_" [label="(1,)", style=solid]; +"7 bias" -> "17 add__1" [label="(1,)", style=solid]; +"7 bias" -> "20 add" [label="(1,)", style=solid]; +"8 x" -> "11 conv2d" [label="(1, 3, 3, 3)", style=solid]; +"9 conv2d_scale_0" -> "12 quantize_per_channel_default" [label="(1,)", style=solid]; +"9 conv2d_scale_0" -> "13 dequantize_per_channel_default_1" [label="(1,)", style=solid]; +"9 conv2d_scale_0" -> "14 dequantize_per_channel_default" [label="(1,)", style=solid]; +"10 conv2d_zero_point_0" -> "12 quantize_per_channel_default" [label="(1,)", style=solid]; +"10 conv2d_zero_point_0" -> "13 dequantize_per_channel_default_1" [label="(1,)", style=solid]; +"10 conv2d_zero_point_0" -> "14 dequantize_per_channel_default" [label="(1,)", style=solid]; +"11 conv2d" -> "12 quantize_per_channel_default" [label="(1, 3, 3, 3)", style=solid]; +"12 quantize_per_channel_default" -> "13 dequantize_per_channel_default_1" [label="(1, 3, 3, 3)", style=solid]; +"12 quantize_per_channel_default" -> "14 dequantize_per_channel_default" [label="(1, 3, 3, 3)", style=solid]; +"13 dequantize_per_channel_default_1" -> "16 add_" [label="(1, 3, 3, 3)", style=solid]; +"14 dequantize_per_channel_default" -> "15 conv2d_1" [label="(1, 3, 3, 3)", style=solid]; +"15 conv2d_1" -> "17 add__1" [label="(1, 3, 3, 3)", style=solid]; +"16 add_" -> "18 cat" [label="(1, 3, 3, 3)", style=solid]; +"17 add__1" -> "18 cat" [label="(1, 3, 3, 3)", style=solid]; +"18 cat" -> "19 conv2d_2" [label="(1, 9, 3, 3)", style=solid]; +"19 conv2d_2" -> "20 add" [label="(1, 3, 3, 3)", style=solid]; +"20 add" -> "21 output_1" [label="(1, 3, 3, 3)", style=solid]; } diff --git a/tests/torch/data/reference_graphs/fx/transformed/qdq_insert_conv2d_7_None_per_tensor.dot b/tests/torch/data/reference_graphs/fx/transformed/qdq_insert_conv2d_7_None_per_tensor.dot index b0d96c62313..5c1f4b3409b 100644 --- a/tests/torch/data/reference_graphs/fx/transformed/qdq_insert_conv2d_7_None_per_tensor.dot +++ b/tests/torch/data/reference_graphs/fx/transformed/qdq_insert_conv2d_7_None_per_tensor.dot @@ -1,42 +1,44 @@ strict digraph { -"0 conv_a_weight" [id=0, type=get_attr]; -"1 conv_a_bias" [id=1, type=get_attr]; -"2 conv_b_weight" [id=2, type=get_attr]; -"3 conv_b_bias" [id=3, type=get_attr]; -"4 conv_c_weight" [id=4, type=get_attr]; -"5 conv_c_bias" [id=5, type=get_attr]; -"6 bias" [id=6, type=get_attr]; -"7 x" [id=7, type=input]; -"8 conv2d" [id=8, type=conv2d]; -"9 quantize_per_tensor_default" [id=9, type=quantize_per_tensor]; -"10 dequantize_per_tensor_default_1" [id=10, type=dequantize_per_tensor]; -"11 dequantize_per_tensor_default" [id=11, type=dequantize_per_tensor]; -"12 conv2d_1" [id=12, type=conv2d]; -"13 add_" [id=13, type=add_]; -"14 add__1" [id=14, type=add_]; -"15 add" [id=15, type=add]; -"16 conv2d_2" [id=16, type=conv2d]; -"17 add_1" [id=17, type=add]; -"18 output_1" [id=18, type=output]; -"0 conv_a_weight" -> "8 conv2d" [label="(3, 3, 1, 1)", style=solid]; -"1 conv_a_bias" -> "8 conv2d" [label="(3,)", style=solid]; -"2 conv_b_weight" -> "12 conv2d_1" [label="(3, 3, 1, 1)", style=solid]; -"3 conv_b_bias" -> "12 conv2d_1" [label="(3,)", style=solid]; -"4 conv_c_weight" -> "16 conv2d_2" [label="(3, 3, 1, 1)", style=solid]; -"5 conv_c_bias" -> "16 conv2d_2" [label="(3,)", style=solid]; -"6 bias" -> "13 add_" [label="(1,)", style=solid]; -"6 bias" -> "14 add__1" [label="(1,)", style=solid]; -"6 bias" -> "17 add_1" [label="(1,)", style=solid]; -"7 x" -> "8 conv2d" [label="(1, 3, 3, 3)", style=solid]; -"8 conv2d" -> "9 quantize_per_tensor_default" [label="(1, 3, 3, 3)", style=solid]; -"9 quantize_per_tensor_default" -> "10 dequantize_per_tensor_default_1" [label="(1, 3, 3, 3)", style=solid]; -"9 quantize_per_tensor_default" -> "11 dequantize_per_tensor_default" [label="(1, 3, 3, 3)", style=solid]; -"10 dequantize_per_tensor_default_1" -> "13 add_" [label="(1, 3, 3, 3)", style=solid]; -"11 dequantize_per_tensor_default" -> "12 conv2d_1" [label="(1, 3, 3, 3)", style=solid]; -"12 conv2d_1" -> "14 add__1" [label="(1, 3, 3, 3)", style=solid]; -"13 add_" -> "15 add" [label="(1, 3, 3, 3)", style=solid]; -"14 add__1" -> "15 add" [label="(1, 3, 3, 3)", style=solid]; -"15 add" -> "16 conv2d_2" [label="(1, 3, 3, 3)", style=solid]; -"16 conv2d_2" -> "17 add_1" [label="(1, 3, 3, 3)", style=solid]; -"17 add_1" -> "18 output_1" [label="(1, 3, 3, 3)", style=solid]; +"0 const" [id=0, type=get_attr]; +"1 conv_a_weight" [id=1, type=get_attr]; +"2 conv_a_bias" [id=2, type=get_attr]; +"3 conv_b_weight" [id=3, type=get_attr]; +"4 conv_b_bias" [id=4, type=get_attr]; +"5 conv_c_weight" [id=5, type=get_attr]; +"6 conv_c_bias" [id=6, type=get_attr]; +"7 bias" [id=7, type=get_attr]; +"8 x" [id=8, type=input]; +"9 conv2d" [id=9, type=conv2d]; +"10 quantize_per_tensor_default" [id=10, type=quantize_per_tensor]; +"11 dequantize_per_tensor_default_1" [id=11, type=dequantize_per_tensor]; +"12 dequantize_per_tensor_default" [id=12, type=dequantize_per_tensor]; +"13 conv2d_1" [id=13, type=conv2d]; +"14 add_" [id=14, type=add_]; +"15 add__1" [id=15, type=add_]; +"16 cat" [id=16, type=cat]; +"17 conv2d_2" [id=17, type=conv2d]; +"18 add" [id=18, type=add]; +"19 output_1" [id=19, type=output]; +"0 const" -> "16 cat" [label="(1, 3, 3, 3)", style=solid]; +"1 conv_a_weight" -> "9 conv2d" [label="(3, 3, 1, 1)", style=solid]; +"2 conv_a_bias" -> "9 conv2d" [label="(3,)", style=solid]; +"3 conv_b_weight" -> "13 conv2d_1" [label="(3, 3, 1, 1)", style=solid]; +"4 conv_b_bias" -> "13 conv2d_1" [label="(3,)", style=solid]; +"5 conv_c_weight" -> "17 conv2d_2" [label="(3, 9, 1, 1)", style=solid]; +"6 conv_c_bias" -> "17 conv2d_2" [label="(3,)", style=solid]; +"7 bias" -> "14 add_" [label="(1,)", style=solid]; +"7 bias" -> "15 add__1" [label="(1,)", style=solid]; +"7 bias" -> "18 add" [label="(1,)", style=solid]; +"8 x" -> "9 conv2d" [label="(1, 3, 3, 3)", style=solid]; +"9 conv2d" -> "10 quantize_per_tensor_default" [label="(1, 3, 3, 3)", style=solid]; +"10 quantize_per_tensor_default" -> "11 dequantize_per_tensor_default_1" [label="(1, 3, 3, 3)", style=solid]; +"10 quantize_per_tensor_default" -> "12 dequantize_per_tensor_default" [label="(1, 3, 3, 3)", style=solid]; +"11 dequantize_per_tensor_default_1" -> "14 add_" [label="(1, 3, 3, 3)", style=solid]; +"12 dequantize_per_tensor_default" -> "13 conv2d_1" [label="(1, 3, 3, 3)", style=solid]; +"13 conv2d_1" -> "15 add__1" [label="(1, 3, 3, 3)", style=solid]; +"14 add_" -> "16 cat" [label="(1, 3, 3, 3)", style=solid]; +"15 add__1" -> "16 cat" [label="(1, 3, 3, 3)", style=solid]; +"16 cat" -> "17 conv2d_2" [label="(1, 9, 3, 3)", style=solid]; +"17 conv2d_2" -> "18 add" [label="(1, 3, 3, 3)", style=solid]; +"18 add" -> "19 output_1" [label="(1, 3, 3, 3)", style=solid]; } diff --git a/tests/torch/data/reference_graphs/fx/transformed/qdq_shared_insert_weights_per_channel.dot b/tests/torch/data/reference_graphs/fx/transformed/qdq_shared_insert_weights_per_channel.dot index 38f5b8a0c91..7a87e882527 100644 --- a/tests/torch/data/reference_graphs/fx/transformed/qdq_shared_insert_weights_per_channel.dot +++ b/tests/torch/data/reference_graphs/fx/transformed/qdq_shared_insert_weights_per_channel.dot @@ -1,56 +1,58 @@ strict digraph { -"0 conv_a_weight" [id=0, type=get_attr]; -"1 conv_a_bias" [id=1, type=get_attr]; -"2 conv_b_weight" [id=2, type=get_attr]; -"3 conv_b_bias" [id=3, type=get_attr]; -"4 conv_c_weight" [id=4, type=get_attr]; -"5 conv_c_bias" [id=5, type=get_attr]; -"6 bias" [id=6, type=get_attr]; -"7 x" [id=7, type=input]; -"8 conv2d_scale_0" [id=8, type=get_attr]; -"9 conv2d_zero_point_0" [id=9, type=get_attr]; -"10 quantize_per_channel_default" [id=10, type=quantize_per_channel]; -"11 dequantize_per_channel_default" [id=11, type=dequantize_per_channel]; -"12 conv2d" [id=12, type=conv2d]; -"13 conv2d_1_scale_0" [id=13, type=get_attr]; -"14 conv2d_1_zero_point_0" [id=14, type=get_attr]; -"15 quantize_per_channel_default_1" [id=15, type=quantize_per_channel]; -"16 dequantize_per_channel_default_1" [id=16, type=dequantize_per_channel]; -"17 conv2d_1" [id=17, type=conv2d]; -"18 add_" [id=18, type=add_]; -"19 add__1" [id=19, type=add_]; -"20 add" [id=20, type=add]; -"21 conv2d_2" [id=21, type=conv2d]; -"22 add_1" [id=22, type=add]; -"23 output_1" [id=23, type=output]; -"0 conv_a_weight" -> "10 quantize_per_channel_default" [label="(3, 3, 1, 1)", style=solid]; -"1 conv_a_bias" -> "12 conv2d" [label="(3,)", style=solid]; -"2 conv_b_weight" -> "15 quantize_per_channel_default_1" [label="(3, 3, 1, 1)", style=solid]; -"3 conv_b_bias" -> "17 conv2d_1" [label="(3,)", style=solid]; -"4 conv_c_weight" -> "21 conv2d_2" [label="(3, 3, 1, 1)", style=solid]; -"5 conv_c_bias" -> "21 conv2d_2" [label="(3,)", style=solid]; -"6 bias" -> "18 add_" [label="(1,)", style=solid]; -"6 bias" -> "19 add__1" [label="(1,)", style=solid]; -"6 bias" -> "22 add_1" [label="(1,)", style=solid]; -"7 x" -> "12 conv2d" [label="(1, 3, 3, 3)", style=solid]; -"8 conv2d_scale_0" -> "10 quantize_per_channel_default" [label="(1,)", style=solid]; -"8 conv2d_scale_0" -> "11 dequantize_per_channel_default" [label="(1,)", style=solid]; -"9 conv2d_zero_point_0" -> "10 quantize_per_channel_default" [label="(1,)", style=solid]; -"9 conv2d_zero_point_0" -> "11 dequantize_per_channel_default" [label="(1,)", style=solid]; -"10 quantize_per_channel_default" -> "11 dequantize_per_channel_default" [label="(3, 3, 1, 1)", style=solid]; -"11 dequantize_per_channel_default" -> "12 conv2d" [label="(3, 3, 1, 1)", style=solid]; -"12 conv2d" -> "17 conv2d_1" [label="(1, 3, 3, 3)", style=solid]; -"12 conv2d" -> "18 add_" [label="(1, 3, 3, 3)", style=solid]; -"13 conv2d_1_scale_0" -> "15 quantize_per_channel_default_1" [label="(1,)", style=solid]; -"13 conv2d_1_scale_0" -> "16 dequantize_per_channel_default_1" [label="(1,)", style=solid]; -"14 conv2d_1_zero_point_0" -> "15 quantize_per_channel_default_1" [label="(1,)", style=solid]; -"14 conv2d_1_zero_point_0" -> "16 dequantize_per_channel_default_1" [label="(1,)", style=solid]; -"15 quantize_per_channel_default_1" -> "16 dequantize_per_channel_default_1" [label="(3, 3, 1, 1)", style=solid]; -"16 dequantize_per_channel_default_1" -> "17 conv2d_1" [label="(3, 3, 1, 1)", style=solid]; -"17 conv2d_1" -> "19 add__1" [label="(1, 3, 3, 3)", style=solid]; -"18 add_" -> "20 add" [label="(1, 3, 3, 3)", style=solid]; -"19 add__1" -> "20 add" [label="(1, 3, 3, 3)", style=solid]; -"20 add" -> "21 conv2d_2" [label="(1, 3, 3, 3)", style=solid]; -"21 conv2d_2" -> "22 add_1" [label="(1, 3, 3, 3)", style=solid]; -"22 add_1" -> "23 output_1" [label="(1, 3, 3, 3)", style=solid]; +"0 const" [id=0, type=get_attr]; +"1 conv_a_weight" [id=1, type=get_attr]; +"2 conv_a_bias" [id=2, type=get_attr]; +"3 conv_b_weight" [id=3, type=get_attr]; +"4 conv_b_bias" [id=4, type=get_attr]; +"5 conv_c_weight" [id=5, type=get_attr]; +"6 conv_c_bias" [id=6, type=get_attr]; +"7 bias" [id=7, type=get_attr]; +"8 x" [id=8, type=input]; +"9 conv2d_scale_0" [id=9, type=get_attr]; +"10 conv2d_zero_point_0" [id=10, type=get_attr]; +"11 quantize_per_channel_default" [id=11, type=quantize_per_channel]; +"12 dequantize_per_channel_default" [id=12, type=dequantize_per_channel]; +"13 conv2d" [id=13, type=conv2d]; +"14 conv2d_1_scale_0" [id=14, type=get_attr]; +"15 conv2d_1_zero_point_0" [id=15, type=get_attr]; +"16 quantize_per_channel_default_1" [id=16, type=quantize_per_channel]; +"17 dequantize_per_channel_default_1" [id=17, type=dequantize_per_channel]; +"18 conv2d_1" [id=18, type=conv2d]; +"19 add_" [id=19, type=add_]; +"20 add__1" [id=20, type=add_]; +"21 cat" [id=21, type=cat]; +"22 conv2d_2" [id=22, type=conv2d]; +"23 add" [id=23, type=add]; +"24 output_1" [id=24, type=output]; +"0 const" -> "21 cat" [label="(1, 3, 3, 3)", style=solid]; +"1 conv_a_weight" -> "11 quantize_per_channel_default" [label="(3, 3, 1, 1)", style=solid]; +"2 conv_a_bias" -> "13 conv2d" [label="(3,)", style=solid]; +"3 conv_b_weight" -> "16 quantize_per_channel_default_1" [label="(3, 3, 1, 1)", style=solid]; +"4 conv_b_bias" -> "18 conv2d_1" [label="(3,)", style=solid]; +"5 conv_c_weight" -> "22 conv2d_2" [label="(3, 9, 1, 1)", style=solid]; +"6 conv_c_bias" -> "22 conv2d_2" [label="(3,)", style=solid]; +"7 bias" -> "19 add_" [label="(1,)", style=solid]; +"7 bias" -> "20 add__1" [label="(1,)", style=solid]; +"7 bias" -> "23 add" [label="(1,)", style=solid]; +"8 x" -> "13 conv2d" [label="(1, 3, 3, 3)", style=solid]; +"9 conv2d_scale_0" -> "11 quantize_per_channel_default" [label="(1,)", style=solid]; +"9 conv2d_scale_0" -> "12 dequantize_per_channel_default" [label="(1,)", style=solid]; +"10 conv2d_zero_point_0" -> "11 quantize_per_channel_default" [label="(1,)", style=solid]; +"10 conv2d_zero_point_0" -> "12 dequantize_per_channel_default" [label="(1,)", style=solid]; +"11 quantize_per_channel_default" -> "12 dequantize_per_channel_default" [label="(3, 3, 1, 1)", style=solid]; +"12 dequantize_per_channel_default" -> "13 conv2d" [label="(3, 3, 1, 1)", style=solid]; +"13 conv2d" -> "18 conv2d_1" [label="(1, 3, 3, 3)", style=solid]; +"13 conv2d" -> "19 add_" [label="(1, 3, 3, 3)", style=solid]; +"14 conv2d_1_scale_0" -> "16 quantize_per_channel_default_1" [label="(1,)", style=solid]; +"14 conv2d_1_scale_0" -> "17 dequantize_per_channel_default_1" [label="(1,)", style=solid]; +"15 conv2d_1_zero_point_0" -> "16 quantize_per_channel_default_1" [label="(1,)", style=solid]; +"15 conv2d_1_zero_point_0" -> "17 dequantize_per_channel_default_1" [label="(1,)", style=solid]; +"16 quantize_per_channel_default_1" -> "17 dequantize_per_channel_default_1" [label="(3, 3, 1, 1)", style=solid]; +"17 dequantize_per_channel_default_1" -> "18 conv2d_1" [label="(3, 3, 1, 1)", style=solid]; +"18 conv2d_1" -> "20 add__1" [label="(1, 3, 3, 3)", style=solid]; +"19 add_" -> "21 cat" [label="(1, 3, 3, 3)", style=solid]; +"20 add__1" -> "21 cat" [label="(1, 3, 3, 3)", style=solid]; +"21 cat" -> "22 conv2d_2" [label="(1, 9, 3, 3)", style=solid]; +"22 conv2d_2" -> "23 add" [label="(1, 3, 3, 3)", style=solid]; +"23 add" -> "24 output_1" [label="(1, 3, 3, 3)", style=solid]; } diff --git a/tests/torch/data/reference_graphs/fx/transformed/qdq_shared_insert_weights_per_tensor.dot b/tests/torch/data/reference_graphs/fx/transformed/qdq_shared_insert_weights_per_tensor.dot index 952b7087ef5..04da21545ba 100644 --- a/tests/torch/data/reference_graphs/fx/transformed/qdq_shared_insert_weights_per_tensor.dot +++ b/tests/torch/data/reference_graphs/fx/transformed/qdq_shared_insert_weights_per_tensor.dot @@ -1,44 +1,46 @@ strict digraph { -"0 conv_a_weight" [id=0, type=get_attr]; -"1 conv_a_bias" [id=1, type=get_attr]; -"2 conv_b_weight" [id=2, type=get_attr]; -"3 conv_b_bias" [id=3, type=get_attr]; -"4 conv_c_weight" [id=4, type=get_attr]; -"5 conv_c_bias" [id=5, type=get_attr]; -"6 bias" [id=6, type=get_attr]; -"7 x" [id=7, type=input]; -"8 quantize_per_tensor_default" [id=8, type=quantize_per_tensor]; -"9 dequantize_per_tensor_default" [id=9, type=dequantize_per_tensor]; -"10 conv2d" [id=10, type=conv2d]; -"11 quantize_per_tensor_default_1" [id=11, type=quantize_per_tensor]; -"12 dequantize_per_tensor_default_1" [id=12, type=dequantize_per_tensor]; -"13 conv2d_1" [id=13, type=conv2d]; -"14 add_" [id=14, type=add_]; -"15 add__1" [id=15, type=add_]; -"16 add" [id=16, type=add]; -"17 conv2d_2" [id=17, type=conv2d]; -"18 add_1" [id=18, type=add]; -"19 output_1" [id=19, type=output]; -"0 conv_a_weight" -> "8 quantize_per_tensor_default" [label="(3, 3, 1, 1)", style=solid]; -"1 conv_a_bias" -> "10 conv2d" [label="(3,)", style=solid]; -"2 conv_b_weight" -> "11 quantize_per_tensor_default_1" [label="(3, 3, 1, 1)", style=solid]; -"3 conv_b_bias" -> "13 conv2d_1" [label="(3,)", style=solid]; -"4 conv_c_weight" -> "17 conv2d_2" [label="(3, 3, 1, 1)", style=solid]; -"5 conv_c_bias" -> "17 conv2d_2" [label="(3,)", style=solid]; -"6 bias" -> "14 add_" [label="(1,)", style=solid]; -"6 bias" -> "15 add__1" [label="(1,)", style=solid]; -"6 bias" -> "18 add_1" [label="(1,)", style=solid]; -"7 x" -> "10 conv2d" [label="(1, 3, 3, 3)", style=solid]; -"8 quantize_per_tensor_default" -> "9 dequantize_per_tensor_default" [label="(3, 3, 1, 1)", style=solid]; -"9 dequantize_per_tensor_default" -> "10 conv2d" [label="(3, 3, 1, 1)", style=solid]; -"10 conv2d" -> "13 conv2d_1" [label="(1, 3, 3, 3)", style=solid]; -"10 conv2d" -> "14 add_" [label="(1, 3, 3, 3)", style=solid]; -"11 quantize_per_tensor_default_1" -> "12 dequantize_per_tensor_default_1" [label="(3, 3, 1, 1)", style=solid]; -"12 dequantize_per_tensor_default_1" -> "13 conv2d_1" [label="(3, 3, 1, 1)", style=solid]; -"13 conv2d_1" -> "15 add__1" [label="(1, 3, 3, 3)", style=solid]; -"14 add_" -> "16 add" [label="(1, 3, 3, 3)", style=solid]; -"15 add__1" -> "16 add" [label="(1, 3, 3, 3)", style=solid]; -"16 add" -> "17 conv2d_2" [label="(1, 3, 3, 3)", style=solid]; -"17 conv2d_2" -> "18 add_1" [label="(1, 3, 3, 3)", style=solid]; -"18 add_1" -> "19 output_1" [label="(1, 3, 3, 3)", style=solid]; +"0 const" [id=0, type=get_attr]; +"1 conv_a_weight" [id=1, type=get_attr]; +"2 conv_a_bias" [id=2, type=get_attr]; +"3 conv_b_weight" [id=3, type=get_attr]; +"4 conv_b_bias" [id=4, type=get_attr]; +"5 conv_c_weight" [id=5, type=get_attr]; +"6 conv_c_bias" [id=6, type=get_attr]; +"7 bias" [id=7, type=get_attr]; +"8 x" [id=8, type=input]; +"9 quantize_per_tensor_default" [id=9, type=quantize_per_tensor]; +"10 dequantize_per_tensor_default" [id=10, type=dequantize_per_tensor]; +"11 conv2d" [id=11, type=conv2d]; +"12 quantize_per_tensor_default_1" [id=12, type=quantize_per_tensor]; +"13 dequantize_per_tensor_default_1" [id=13, type=dequantize_per_tensor]; +"14 conv2d_1" [id=14, type=conv2d]; +"15 add_" [id=15, type=add_]; +"16 add__1" [id=16, type=add_]; +"17 cat" [id=17, type=cat]; +"18 conv2d_2" [id=18, type=conv2d]; +"19 add" [id=19, type=add]; +"20 output_1" [id=20, type=output]; +"0 const" -> "17 cat" [label="(1, 3, 3, 3)", style=solid]; +"1 conv_a_weight" -> "9 quantize_per_tensor_default" [label="(3, 3, 1, 1)", style=solid]; +"2 conv_a_bias" -> "11 conv2d" [label="(3,)", style=solid]; +"3 conv_b_weight" -> "12 quantize_per_tensor_default_1" [label="(3, 3, 1, 1)", style=solid]; +"4 conv_b_bias" -> "14 conv2d_1" [label="(3,)", style=solid]; +"5 conv_c_weight" -> "18 conv2d_2" [label="(3, 9, 1, 1)", style=solid]; +"6 conv_c_bias" -> "18 conv2d_2" [label="(3,)", style=solid]; +"7 bias" -> "15 add_" [label="(1,)", style=solid]; +"7 bias" -> "16 add__1" [label="(1,)", style=solid]; +"7 bias" -> "19 add" [label="(1,)", style=solid]; +"8 x" -> "11 conv2d" [label="(1, 3, 3, 3)", style=solid]; +"9 quantize_per_tensor_default" -> "10 dequantize_per_tensor_default" [label="(3, 3, 1, 1)", style=solid]; +"10 dequantize_per_tensor_default" -> "11 conv2d" [label="(3, 3, 1, 1)", style=solid]; +"11 conv2d" -> "14 conv2d_1" [label="(1, 3, 3, 3)", style=solid]; +"11 conv2d" -> "15 add_" [label="(1, 3, 3, 3)", style=solid]; +"12 quantize_per_tensor_default_1" -> "13 dequantize_per_tensor_default_1" [label="(3, 3, 1, 1)", style=solid]; +"13 dequantize_per_tensor_default_1" -> "14 conv2d_1" [label="(3, 3, 1, 1)", style=solid]; +"14 conv2d_1" -> "16 add__1" [label="(1, 3, 3, 3)", style=solid]; +"15 add_" -> "17 cat" [label="(1, 3, 3, 3)", style=solid]; +"16 add__1" -> "17 cat" [label="(1, 3, 3, 3)", style=solid]; +"17 cat" -> "18 conv2d_2" [label="(1, 9, 3, 3)", style=solid]; +"18 conv2d_2" -> "19 add" [label="(1, 3, 3, 3)", style=solid]; +"19 add" -> "20 output_1" [label="(1, 3, 3, 3)", style=solid]; } diff --git a/tests/torch/data/reference_graphs/fx/yolo11n_sdpa_block.dot b/tests/torch/data/reference_graphs/fx/yolo11n_sdpa_block.dot new file mode 100644 index 00000000000..5a06e9017ab --- /dev/null +++ b/tests/torch/data/reference_graphs/fx/yolo11n_sdpa_block.dot @@ -0,0 +1,41 @@ +strict digraph { +"0 kqv_weight" [id=0, type=get_attr]; +"1 x" [id=1, type=input]; +"2 linear" [id=2, type=linear]; +"3 slice_1" [id=3, type=slice]; +"4 slice_2" [id=4, type=slice]; +"5 slice_3" [id=5, type=slice]; +"6 slice_4" [id=6, type=slice]; +"7 slice_5" [id=7, type=slice]; +"8 slice_6" [id=8, type=slice]; +"9 slice_7" [id=9, type=slice]; +"10 slice_8" [id=10, type=slice]; +"11 slice_9" [id=11, type=slice]; +"12 transpose" [id=12, type=transpose]; +"13 matmul" [id=13, type=matmul]; +"14 div_" [id=14, type=div_]; +"15 softmax" [id=15, type=softmax]; +"16 transpose_1" [id=16, type=transpose]; +"17 matmul_1" [id=17, type=matmul]; +"18 output_1" [id=18, type=output]; +"0 kqv_weight" -> "2 linear" [label="(12, 4)", style=solid]; +"1 x" -> "2 linear" [label="(1, 2, 4)", style=solid]; +"2 linear" -> "3 slice_1" [label="(1, 2, 12)", style=solid]; +"2 linear" -> "6 slice_4" [label="(1, 2, 12)", style=solid]; +"2 linear" -> "9 slice_7" [label="(1, 2, 12)", style=solid]; +"3 slice_1" -> "4 slice_2" [label="(1, 2, 12)", style=solid]; +"4 slice_2" -> "5 slice_3" [label="(1, 2, 12)", style=solid]; +"5 slice_3" -> "13 matmul" [label="(1, 2, 4)", style=solid]; +"6 slice_4" -> "7 slice_5" [label="(1, 2, 12)", style=solid]; +"7 slice_5" -> "8 slice_6" [label="(1, 2, 12)", style=solid]; +"8 slice_6" -> "12 transpose" [label="(1, 2, 4)", style=solid]; +"9 slice_7" -> "10 slice_8" [label="(1, 2, 12)", style=solid]; +"10 slice_8" -> "11 slice_9" [label="(1, 2, 12)", style=solid]; +"11 slice_9" -> "17 matmul_1" [label="(1, 2, 4)", style=solid]; +"12 transpose" -> "13 matmul" [label="(1, 4, 2)", style=solid]; +"13 matmul" -> "14 div_" [label="(1, 2, 2)", style=solid]; +"14 div_" -> "15 softmax" [label="(1, 2, 2)", style=solid]; +"15 softmax" -> "16 transpose_1" [label="(1, 2, 2)", style=solid]; +"16 transpose_1" -> "17 matmul_1" [label="(1, 2, 2)", style=solid]; +"17 matmul_1" -> "18 output_1" [label="(1, 2, 4)", style=solid]; +} diff --git a/tests/torch/fx/test_compress_weights.py b/tests/torch/fx/test_compress_weights.py index 874a6c64d16..0de35aef29e 100644 --- a/tests/torch/fx/test_compress_weights.py +++ b/tests/torch/fx/test_compress_weights.py @@ -274,9 +274,9 @@ def test_get_dtype_attribute_of_parameter(): dummy_input = torch.randint(0, 10, [3, 3]) exported_model = get_torch_fx_model(model, dummy_input) compressed_model = compress_weights(exported_model) - assert compressed_model.matmul_updated_constant0.dtype == torch.uint8 + assert compressed_model.weight_updated_constant0.dtype == torch.uint8 compressed_model(dummy_input) - assert compressed_model.matmul_updated_constant0.dtype == torch.uint8 + assert compressed_model.weight_updated_constant0.dtype == torch.uint8 @pytest.mark.parametrize("dtype", ("float16", "float32")) @@ -295,6 +295,6 @@ def test_model_devices_and_precisions(use_cuda, dtype): result = compressed_model(dummy_input) # Scale should always be in float16 - assert compressed_model.state_dict()["asymmetric_weights_decompressor_matmul._scale"].dtype == torch.float16 + assert compressed_model.state_dict()["asymmetric_weights_decompressor_w._scale"].dtype == torch.float16 # Result should be in the precision of the model assert result.dtype == dtype diff --git a/tests/torch/fx/test_model_transformer.py b/tests/torch/fx/test_model_transformer.py index d42d52f4d8d..23039ee99ef 100644 --- a/tests/torch/fx/test_model_transformer.py +++ b/tests/torch/fx/test_model_transformer.py @@ -36,6 +36,7 @@ from nncf.experimental.torch.fx.node_utils import get_graph_node_by_name from nncf.experimental.torch.fx.node_utils import get_tensor_constant_from_node from nncf.experimental.torch.fx.transformations import _get_connected_nodes +from nncf.experimental.torch.fx.transformations import _get_node_by_input_port_id from nncf.experimental.torch.fx.transformations import _set_new_node_meta from nncf.experimental.torch.fx.transformations import bias_update_transformation_builder from nncf.experimental.torch.fx.transformations import compress_post_quantize_transformation @@ -54,6 +55,7 @@ from tests.torch.test_models.synthetic import ConvolutionWithAllConstantInputsModel from tests.torch.test_models.synthetic import ConvolutionWithNotTensorBiasModel from tests.torch.test_models.synthetic import MultiBranchesConnectedModel +from tests.torch.test_models.synthetic import MultiBranchesConnectedModelWithConcat @dataclass @@ -118,11 +120,13 @@ def test_model_extraction(test_case: ModelExtractionTestCase): check_graph(nncf_graph, f"{get_test_id(test_case)}.dot", EXTRACTED_GRAPHS_DIR_NAME, extended=True) -MultiBranchesConnectedModel_TARGET_POINTS = ( +MultiBranchesConnectedModelWithConcat_TARGET_POINTS = ( PTTargetPoint(TargetType.OPERATOR_PRE_HOOK, "conv2d", input_port_id=0), PTTargetPoint(TargetType.OPERATOR_PRE_HOOK, "conv2d", input_port_id=1), PTTargetPoint(TargetType.OPERATION_WITH_WEIGHTS, "conv2d_1", input_port_id=1), PTTargetPoint(TargetType.OPERATOR_POST_HOOK, "conv2d"), + PTTargetPoint(TargetType.OPERATOR_PRE_HOOK, "cat", input_port_id=1), + PTTargetPoint(TargetType.OPERATION_WITH_WEIGHTS, "cat", input_port_id=2), ) @@ -132,14 +136,14 @@ class TestInsertModule(torch.nn.Module): def forward(self, x): return x + 1 - target_points = list(MultiBranchesConnectedModel_TARGET_POINTS) + target_points = list(MultiBranchesConnectedModelWithConcat_TARGET_POINTS) target_node_name = "TEST_MODULE" test_module_instance = TestInsertModule() builder = leaf_module_insertion_transformation_builder if leaf else module_insertion_transformation_builder transformation = builder(test_module_instance, target_points, target_node_name) - model = MultiBranchesConnectedModel() - captured_model = get_torch_fx_model(model, torch.ones((1, 3, 3, 3))) + model = MultiBranchesConnectedModelWithConcat() + captured_model = get_torch_fx_model(model, torch.ones(MultiBranchesConnectedModelWithConcat.INPUT_SIZE)) transformation(captured_model) nncf_graph = GraphConverter.create_nncf_graph(captured_model) @@ -147,14 +151,40 @@ def forward(self, x): check_graph(nncf_graph, f"model_insertion{'_leaf' if leaf else ''}.dot", TRANSFORMED_GRAPH_DIR_NAME, extended=True) -@pytest.mark.parametrize("bias", [True, False], ids=["bias", "constant"]) -def test_constant_update_transformation(bias: bool): - model = MultiBranchesConnectedModel() - captured_model = get_torch_fx_model(model, torch.ones((1, 3, 3, 3))) +@pytest.mark.parametrize("concat", [False, True]) +def test_constant_update_transformation(concat: bool): + model = MultiBranchesConnectedModelWithConcat() + captured_model = get_torch_fx_model(model, torch.ones(MultiBranchesConnectedModelWithConcat.INPUT_SIZE)) nncf_graph = GraphConverter.create_nncf_graph(captured_model) - target_node = nncf_graph.get_node_by_name("conv2d" if bias else "add_") + target_node_name = "cat" if concat else "add_" + target_node = nncf_graph.get_node_by_name(target_node_name) + input_port_id = 2 if concat else 1 - builder = bias_update_transformation_builder if bias else constant_update_transformation_builder + builder = constant_update_transformation_builder + new_value = torch.tensor((42.0,)) + transformation = builder(target_node, value=new_value, input_port_id=input_port_id) + transformation(captured_model) + + target_graph_node = get_graph_node_by_name(captured_model.graph, target_node_name) + new_const_node = _get_node_by_input_port_id(target_graph_node, input_port_id) + assert get_tensor_constant_from_node(new_const_node, captured_model) == new_value + + transformed_nncf_graph = GraphConverter.create_nncf_graph(captured_model) + check_graph( + transformed_nncf_graph, + f"{'cat_' if concat else ''}constant_update.dot", + TRANSFORMED_GRAPH_DIR_NAME, + extended=True, + ) + + +def test_bias_update_transformation(): + model = MultiBranchesConnectedModelWithConcat() + captured_model = get_torch_fx_model(model, torch.ones(MultiBranchesConnectedModelWithConcat.INPUT_SIZE)) + nncf_graph = GraphConverter.create_nncf_graph(captured_model) + target_node = nncf_graph.get_node_by_name("conv2d") + + builder = bias_update_transformation_builder new_value = torch.tensor((42.0,)) transformation = builder(target_node, value=new_value, input_port_id=1) transformation(captured_model) @@ -216,7 +246,7 @@ def _check_qdq_params( ): target_node = get_graph_node_by_name(captured_model.graph, target_point.target_node_name) if target_point.target_type in [TargetType.OPERATION_WITH_WEIGHTS, TargetType.OPERATOR_PRE_HOOK]: - dq_node = target_node.args[target_point.input_port_id] + dq_node = _get_node_by_input_port_id(target_node, target_point.input_port_id) q_node = dq_node.args[0] else: q_node = list(target_node.users)[0] @@ -240,7 +270,7 @@ def get_value(node: torch.fx.Node): assert get_value(dq_node.args[2]) == self.REF_ZERO_POINT assert dq_node.args[-1] == ref_dtype - @pytest.mark.parametrize("target_point", MultiBranchesConnectedModel_TARGET_POINTS) + @pytest.mark.parametrize("target_point", MultiBranchesConnectedModelWithConcat_TARGET_POINTS) def test_one_target_point( self, is_per_channel: bool, @@ -254,8 +284,8 @@ def test_one_target_point( quantizer = self._get_quantizer(is_per_channel, symmetric, q_min, q_max, dtype) transformation = qdq_insertion_transformation_builder(quantizer, [target_point]) - model = MultiBranchesConnectedModel() - captured_model = get_torch_fx_model(model, torch.ones((1, 3, 3, 3))) + model = MultiBranchesConnectedModelWithConcat() + captured_model = get_torch_fx_model(model, torch.ones(MultiBranchesConnectedModelWithConcat.INPUT_SIZE)) transformation(captured_model) self._check_qdq_params(captured_model, target_point, dtype, is_per_channel) @@ -306,8 +336,8 @@ def test_shared_target_point( quantizer = self._get_quantizer(is_per_channel, symmetric, q_min, q_max, dtype) transformation = qdq_insertion_transformation_builder(quantizer, target_points) - model = MultiBranchesConnectedModel() - captured_model = get_torch_fx_model(model, torch.ones((1, 3, 3, 3))) + model = MultiBranchesConnectedModelWithConcat() + captured_model = get_torch_fx_model(model, torch.ones(MultiBranchesConnectedModelWithConcat.INPUT_SIZE)) if not weights: with pytest.raises(nncf.InternalError): transformation(captured_model) @@ -343,10 +373,10 @@ def test_node_removal_transformation(): @pytest.mark.parametrize("tuple_output", [False, True], ids=["node_out", "tuple_out"]) -@pytest.mark.parametrize("target_point", MultiBranchesConnectedModel_TARGET_POINTS) +@pytest.mark.parametrize("target_point", MultiBranchesConnectedModelWithConcat_TARGET_POINTS) def test_output_insertion_transformation(tuple_output: bool, target_point: PTTargetPoint): - model = MultiBranchesConnectedModel() - captured_model = get_torch_fx_model(model, torch.ones((1, 3, 3, 3))) + model = MultiBranchesConnectedModelWithConcat() + captured_model = get_torch_fx_model(model, torch.ones(MultiBranchesConnectedModelWithConcat.INPUT_SIZE)) if not tuple_output: output_node = [node for node in captured_model.graph.nodes if node.op == "output"][0] diff --git a/tests/torch/fx/test_models.py b/tests/torch/fx/test_models.py index a43c4fafb56..ad42a949d8c 100644 --- a/tests/torch/fx/test_models.py +++ b/tests/torch/fx/test_models.py @@ -43,6 +43,7 @@ from tests.torch.test_compressed_graph import check_graph from tests.torch.test_models.synthetic import MultiBranchesConnectedModel from tests.torch.test_models.synthetic import ShortTransformer +from tests.torch.test_models.synthetic import YOLO11N_SDPABlock FX_DIR_NAME = Path("fx") FX_QUANTIZED_DIR_NAME = Path("fx") / "quantized" @@ -68,6 +69,7 @@ def torchvision_model_case(model_id: str, input_shape: Tuple[int,]): torchvision_model_case("swin_v2_s", (1, 3, 224, 224)), ModelCase(test_models.UNet, "unet", [1, 3, 224, 224]), ModelCase(partial(ShortTransformer, 5, 10), "synthetic_transformer", [5]), + ModelCase(YOLO11N_SDPABlock, "yolo11n_sdpa_block", YOLO11N_SDPABlock.INPUT_SIZE), ) @@ -161,6 +163,11 @@ def test_model(test_case: ModelCase): {"model_type": nncf.ModelType.TRANSFORMER}, [(4, 4), (2, 2)], ), + ( + ModelCase(YOLO11N_SDPABlock, "yolo11n_sdpa_block", YOLO11N_SDPABlock.INPUT_SIZE), + {"model_type": nncf.ModelType.TRANSFORMER}, + [(4, 4), (3, 3)], + ), ) diff --git a/tests/torch/test_models/synthetic.py b/tests/torch/test_models/synthetic.py index 2074f1a31f2..c999deedfce 100644 --- a/tests/torch/test_models/synthetic.py +++ b/tests/torch/test_models/synthetic.py @@ -553,6 +553,26 @@ def forward(self, x): return self.conv_c(y) + self.bias +class MultiBranchesConnectedModelWithConcat(torch.nn.Module): + INPUT_SIZE = (1, 3, 3, 3) + + def __init__(self): + super().__init__() + self.conv_a = nn.Conv2d(in_channels=3, out_channels=3, kernel_size=1) + self.conv_b = nn.Conv2d(in_channels=3, out_channels=3, kernel_size=1) + self.conv_c = nn.Conv2d(in_channels=9, out_channels=3, kernel_size=1) + self.const = nn.Parameter(torch.ones(self.INPUT_SIZE)) + self.bias = torch.tensor([1]) + + def forward(self, x): + a = self.conv_a(x) + b = self.conv_b(a) + a += self.bias + b += self.bias + y = torch.cat([a, b, self.const], dim=1) + return self.conv_c(y) + self.bias + + class LinearPTQParamsTestModel(nn.Module): INPUT_SIZE = None @@ -609,3 +629,22 @@ def forward(self, input_ids): x = self.linear(x) res = self.lm_head(x) return res + + +class YOLO11N_SDPABlock(torch.nn.Module): + INPUT_SIZE = (1, 2, 4) + + def __init__(self): + super().__init__() + self.kqv = nn.Linear(4, 12, bias=False) + self.fc = nn.Linear + + def forward(self, x): + x = self.kqv(x) + k = x[:, :, :4] + q = x[:, :, 4:8] + v = x[:, :, 8:] + kq = torch.matmul(k, torch.transpose(q, 1, 2)) + kq /= 2**-2 + kq = torch.softmax(kq, -1) + return torch.matmul(torch.transpose(kq, 1, 2), v)