From 1b28c78d3468d9d7d584f97220844f8173dc53a5 Mon Sep 17 00:00:00 2001 From: Anton Voronov Date: Tue, 5 Dec 2023 07:37:24 +0100 Subject: [PATCH] fixed dynamic shape test cases --- .../intel_cpu/src/nodes/fullyconnected.cpp | 24 ++++++++++++++++++- src/plugins/intel_cpu/src/utils/cpu_utils.hpp | 17 +++++++++++++ 2 files changed, 40 insertions(+), 1 deletion(-) diff --git a/src/plugins/intel_cpu/src/nodes/fullyconnected.cpp b/src/plugins/intel_cpu/src/nodes/fullyconnected.cpp index d11ed228d9922c..934585c7c6559f 100644 --- a/src/plugins/intel_cpu/src/nodes/fullyconnected.cpp +++ b/src/plugins/intel_cpu/src/nodes/fullyconnected.cpp @@ -464,11 +464,33 @@ void FullyConnected::prepareWeightsUsingDummyShape() { if (selected_pd == nullptr) OPENVINO_THROW("Preferable primitive descriptor is not set for node ", getName(), "."); - auto inDesc = MemoryDescUtils::convertToDnnlMemoryDesc(MemoryDescUtils::makeDummyDesc(*getBaseMemDescAtInputPort(DATA_ID))); + DnnlMemoryDescPtr inDesc = nullptr; auto weightDesc = MemoryDescUtils::convertToDnnlMemoryDesc(weightDescIP); auto biasDesc = withBiases ? MemoryDescUtils::convertToDnnlMemoryDesc(getBaseMemDescAtInputPort(BIAS_ID)) : nullptr; auto outDesc = MemoryDescUtils::convertToDnnlMemoryDesc(MemoryDescUtils::makeDummyDesc(*getBaseMemDescAtOutputPort(0))); + Shape newInShape = getBaseMemDescAtInputPort(DATA_ID)->getShape(); + if (isDynamicNode()) { + auto originalInDesc = getBaseMemDescAtInputPort(DATA_ID); + auto originalInDims = originalInDesc->getShape().getDims(); + size_t dimIdx = originalInDims.size() == 3 ? 1 : 0; + // Propagate N dim from the output shape to the input shape + if (newInShape.getDims()[dimIdx] == Shape::UNDEFINED_DIM && + getBaseMemDescAtOutputPort(0)->getShape().getDims()[dimIdx] != Shape::UNDEFINED_DIM) { + newInShape = cloneShapeWithNewDim(newInShape, getBaseMemDescAtOutputPort(0)->getShape().getDims()[dimIdx], dimIdx); + } + // Propagate K dim from the weights shape to the input shape + if (newInShape.getDims()[dimIdx+1] == Shape::UNDEFINED_DIM && + weightDesc->getShape().getDims()[1] != Shape::UNDEFINED_DIM) { + newInShape = cloneShapeWithNewDim(newInShape, weightDesc->getShape().getDims()[1], dimIdx+1); + } + + auto newInDesc = DnnlBlockedMemoryDesc(originalInDesc->getPrecision(), MemoryDescUtils::makeDummyShape(newInShape)); + inDesc = MemoryDescUtils::convertToDnnlMemoryDesc(MemoryDescUtils::makeDummyDesc(newInDesc)); + } else { + inDesc = MemoryDescUtils::convertToDnnlMemoryDesc(MemoryDescUtils::makeDummyDesc(*getBaseMemDescAtInputPort(DATA_ID))); + } + const FCKey key = {inDesc, weightDesc, biasDesc, diff --git a/src/plugins/intel_cpu/src/utils/cpu_utils.hpp b/src/plugins/intel_cpu/src/utils/cpu_utils.hpp index c2f7e867956382..a89a44721bdaad 100644 --- a/src/plugins/intel_cpu/src/utils/cpu_utils.hpp +++ b/src/plugins/intel_cpu/src/utils/cpu_utils.hpp @@ -48,6 +48,23 @@ inline std::vector getNormalizedDimsBySize(const VectorDims &dims, size_ return normalizedDims; } +/** +* @brief Clones passed shape and replaces one its dimention. +* @param originalShape +* shape to clone +* @param newDimValue +* new dimention value +* @param dim +* dimention index +* @return cloned shape +*/ +inline Shape cloneShapeWithNewDim(Shape originalShape, Dim newDimValue, size_t dim) { + VectorDims newDims = originalShape.getDims(); + assert(dim < newDims.size()); + newDims[dim] = newDimValue; + return Shape(originalShape.getMinDims(), newDims); +} + /** * @brief Checked that secondInputDims unidirectional broadcastable per tensor or per channel to firstInputDims * @param firstInputDims