Skip to content

Commit

Permalink
[GPU] fix priorbox legacy code in CreateOp() and allow to use max_siz…
Browse files Browse the repository at this point in the history
…e is null (openvinotoolkit#19143)
  • Loading branch information
wilson-seok authored Aug 16, 2023
1 parent b4f6148 commit b656fee
Show file tree
Hide file tree
Showing 3 changed files with 86 additions and 55 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -121,52 +121,56 @@ KERNEL(ref)
#endif
}

for (uint ms_idx = 0; ms_idx < MIN_SIZE_SIZE; ++ms_idx) {
box_width = MIN_SIZE[ms_idx] * 0.5f;
box_height = MIN_SIZE[ms_idx] * 0.5f;
FUNC_CALL(calculate_data)(center_x, center_y, box_width, box_height, false, &out_index, output);
#ifdef MIN_MAX_ASPECT_RATIO_ORDER
if (MAX_SIZE_SIZE > ms_idx) {
box_width = box_height = sqrt(MIN_SIZE[ms_idx] * MAX_SIZE[ms_idx]) * 0.5f;
FUNC_CALL(calculate_data)(center_x, center_y, box_width, box_height, false, &out_index, output);
// Explicitly check MIN_SIZE has value to avoid seg fault duing opencl build
if (MIN_SIZE_SIZE > 0) {
for (uint ms_idx = 0; ms_idx < MIN_SIZE_SIZE; ++ms_idx) {
box_width = MIN_SIZE[ms_idx] * 0.5f;
box_height = MIN_SIZE[ms_idx] * 0.5f;
FUNC_CALL(calculate_data)(center_x, center_y, box_width, box_height, false, &out_index, output);
#ifdef MIN_MAX_ASPECT_RATIO_ORDER
// Explicitly check MAX_SIZE has value to avoid seg fault duing opencl build
if ((MAX_SIZE_SIZE > 0) && (MAX_SIZE_SIZE > ms_idx)) {
box_width = box_height = sqrt(MIN_SIZE[ms_idx] * MAX_SIZE[ms_idx]) * 0.5f;
FUNC_CALL(calculate_data)(center_x, center_y, box_width, box_height, false, &out_index, output);
}

}
if (SCALE_ALL_SIZES || (!SCALE_ALL_SIZES && (ms_idx == MIN_SIZE_SIZE - 1))) {
uint s_idx = SCALE_ALL_SIZES ? ms_idx : 0;
for (uint k = 0; k < ASPECT_RATIO_SIZE; ++k) {
OUTPUT_TYPE ar = ASPECT_RATIO[k];
if (fabs(ar - 1.0f) < 1e-6) {
continue;
}

if (SCALE_ALL_SIZES || (!SCALE_ALL_SIZES && (ms_idx == MIN_SIZE_SIZE - 1))) {
uint s_idx = SCALE_ALL_SIZES ? ms_idx : 0;
for (uint k = 0; k < ASPECT_RATIO_SIZE; ++k) {
OUTPUT_TYPE ar = ASPECT_RATIO[k];
if (fabs(ar - 1.0f) < 1e-6) {
continue;
ar = sqrt(ar);
box_width = MIN_SIZE[s_idx] * 0.5f * ar;
box_height = MIN_SIZE[s_idx] * 0.5f / ar;
FUNC_CALL(calculate_data)(center_x, center_y, box_width, box_height, false, &out_index, output);
}
}
#else
if (SCALE_ALL_SIZES || (!SCALE_ALL_SIZES && (ms_idx == MIN_SIZE_SIZE - 1))) {
uint s_idx = SCALE_ALL_SIZES ? ms_idx : 0;
for (uint k = 0; k < ASPECT_RATIO_SIZE; ++k) {
OUTPUT_TYPE ar = ASPECT_RATIO[k];
if (fabs(ar - 1.0f) < 1e-6) {
continue;
};

ar = sqrt(ar);
box_width = MIN_SIZE[s_idx] * 0.5f * ar;
box_height = MIN_SIZE[s_idx] * 0.5f / ar;
FUNC_CALL(calculate_data)(center_x, center_y, box_width, box_height, false, &out_index, output);
}

ar = sqrt(ar);
box_width = MIN_SIZE[s_idx] * 0.5f * ar;
box_height = MIN_SIZE[s_idx] * 0.5f / ar;
FUNC_CALL(calculate_data)(center_x, center_y, box_width, box_height, false, &out_index, output);
}
}
#else
if (SCALE_ALL_SIZES || (!SCALE_ALL_SIZES && (ms_idx == MIN_SIZE_SIZE - 1))) {
uint s_idx = SCALE_ALL_SIZES ? ms_idx : 0;
for (uint k = 0; k < ASPECT_RATIO_SIZE; ++k) {
OUTPUT_TYPE ar = ASPECT_RATIO[k];
if (fabs(ar - 1.0f) < 1e-6) {
continue;
};

ar = sqrt(ar);
box_width = MIN_SIZE[s_idx] * 0.5f * ar;
box_height = MIN_SIZE[s_idx] * 0.5f / ar;
// Explicitly check MAX_SIZE has value to avoid seg fault duing opencl build
if ((MAX_SIZE_SIZE > 0) && (MAX_SIZE_SIZE > ms_idx)) {
box_width = box_height = sqrt(MIN_SIZE[ms_idx] * MAX_SIZE[ms_idx]) * 0.5f;
FUNC_CALL(calculate_data)(center_x, center_y, box_width, box_height, false, &out_index, output);
}
}

if (MAX_SIZE_SIZE > ms_idx) {
box_width = box_height = sqrt(MIN_SIZE[ms_idx] * MAX_SIZE[ms_idx]) * 0.5f;
FUNC_CALL(calculate_data)(center_x, center_y, box_width, box_height, false, &out_index, output);
}
#endif
#endif
}
}

#ifdef CLIP
Expand Down
43 changes: 31 additions & 12 deletions src/plugins/intel_gpu/src/plugin/ops/prior_box.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -111,18 +111,31 @@ static void CreatePriorBoxOp(Program& p, const std::shared_ptr<ngraph::op::v0::P
OPENVINO_ASSERT(img_pshape.is_static(), "Dynamic shapes are not supported for PriorBox operation yet");

if (!output_pshape.is_dynamic()) {
auto img_shape = img_pshape.to_shape();
const auto output_size_constant = std::dynamic_pointer_cast<ngraph::op::Constant>(op->get_input_node_shared_ptr(0));
const auto image_size_constant = std::dynamic_pointer_cast<ngraph::op::Constant>(op->get_input_node_shared_ptr(1));

// output_size should be constant to be static output shape
OPENVINO_ASSERT(output_size_constant,
"[GPU] Unsupported parameter nodes type in ", op->get_friendly_name(), " (", op->get_type_name(), ")");

auto wdim = img_shape.back();
auto hdim = img_shape.at(img_shape.size()-2);
const auto output_size = output_size_constant->cast_vector<int64_t>();
const auto width = output_size[0];
const auto height = output_size[1];
const cldnn::tensor output_size_tensor{cldnn::spatial(width, height)};

cldnn::tensor img_size_tensor{};
// When image size is constant, set the value for primitive construction. Others don't have to set it. It will be determined in execute_impl time.
if (image_size_constant) {
const auto image_size = image_size_constant->cast_vector<int64_t>();
const auto image_width = image_size[0];
const auto image_height = image_size[1];
img_size_tensor = (cldnn::tensor) cldnn::spatial(image_width, image_height);
}

cldnn::tensor output_size{};
cldnn::tensor img_size = (cldnn::tensor) cldnn::spatial(TensorValue(wdim), TensorValue(hdim));
auto priorBoxPrim = cldnn::prior_box(layerName,
inputs,
output_size,
img_size,
output_size_tensor,
img_size_tensor,
min_size,
max_size,
aspect_ratio,
Expand Down Expand Up @@ -172,18 +185,24 @@ static void CreatePriorBoxOp(Program& p, const std::shared_ptr<ngraph::op::v8::P
if (!output_pshape.is_dynamic()) {
const auto output_size_constant = std::dynamic_pointer_cast<ngraph::op::Constant>(op->get_input_node_shared_ptr(0));
const auto image_size_constant = std::dynamic_pointer_cast<ngraph::op::Constant>(op->get_input_node_shared_ptr(1));
OPENVINO_ASSERT(output_size_constant && image_size_constant,

// output_size should be constant to be static output shape
OPENVINO_ASSERT(output_size_constant,
"[GPU] Unsupported parameter nodes type in ", op->get_friendly_name(), " (", op->get_type_name(), ")");

const auto output_size = output_size_constant->cast_vector<int64_t>();
const auto width = output_size[0];
const auto height = output_size[1];
const cldnn::tensor output_size_tensor{cldnn::spatial(width, height)};

const auto image_size = image_size_constant->cast_vector<int64_t>();
const auto image_width = image_size[0];
const auto image_height = image_size[1];
const cldnn::tensor img_size_tensor{cldnn::spatial(image_width, image_height)};
cldnn::tensor img_size_tensor{};
// When image size is constant, set the value for primitive construction. Others don't have to set it. It will be determined in execute_impl time.
if (image_size_constant) {
const auto image_size = image_size_constant->cast_vector<int64_t>();
const auto image_width = image_size[0];
const auto image_height = image_size[1];
img_size_tensor = (cldnn::tensor) cldnn::spatial(image_width, image_height);
}

const cldnn::prior_box prior_box{layer_name,
inputs,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,7 @@ typedef std::tuple<
InputShape,
InputShape,
ElementType, // Net precision
std::vector<float>,
priorbox_type
> PriorBoxLayerGPUTestParamsSet;
class PriorBoxLayerGPUTest : public testing::WithParamInterface<PriorBoxLayerGPUTestParamsSet>,
Expand All @@ -37,8 +38,9 @@ class PriorBoxLayerGPUTest : public testing::WithParamInterface<PriorBoxLayerGPU
InputShape input1Shape;
InputShape input2Shape;
ElementType netPrecision;
std::vector<float> max_size;
priorbox_type priorboxType;
std::tie(input1Shape, input2Shape, netPrecision, priorboxType) = obj.param;
std::tie(input1Shape, input2Shape, netPrecision, max_size, priorboxType) = obj.param;

std::ostringstream result;
switch (priorboxType) {
Expand Down Expand Up @@ -67,6 +69,7 @@ class PriorBoxLayerGPUTest : public testing::WithParamInterface<PriorBoxLayerGPU
for (const auto& shape : input2Shape.second) {
result << ov::test::utils::vec2str(shape) << "_";
}
result << "max_size=" << ov::test::utils::vec2str(max_size) << "_";
result << ")";
return result.str();
}
Expand All @@ -77,8 +80,9 @@ class PriorBoxLayerGPUTest : public testing::WithParamInterface<PriorBoxLayerGPU
auto netPrecision = ElementType::undefined;
InputShape input1Shape;
InputShape input2Shape;
std::vector<float> max_size;
priorbox_type priorboxType;
std::tie(input1Shape, input2Shape, netPrecision, priorboxType) = this->GetParam();
std::tie(input1Shape, input2Shape, netPrecision, max_size, priorboxType) = this->GetParam();


init_input_shapes({input1Shape, input2Shape});
Expand Down Expand Up @@ -125,7 +129,7 @@ class PriorBoxLayerGPUTest : public testing::WithParamInterface<PriorBoxLayerGPU
ngraph::op::v0::PriorBox::Attributes attributes_v0;

attributes_v0.min_size = {64};
attributes_v0.max_size = {300};
attributes_v0.max_size = max_size;
attributes_v0.aspect_ratio = {2};
attributes_v0.variance = {0.1, 0.1, 0.2, 0.2};
attributes_v0.step = 16;
Expand All @@ -145,7 +149,7 @@ class PriorBoxLayerGPUTest : public testing::WithParamInterface<PriorBoxLayerGPU
ngraph::op::v8::PriorBox::Attributes attributes_v8;

attributes_v8.min_size = {64};
attributes_v8.max_size = {300};
attributes_v8.max_size = max_size;
attributes_v8.aspect_ratio = {2};
attributes_v8.variance = {0.1, 0.1, 0.2, 0.2};
attributes_v8.step = 16;
Expand Down Expand Up @@ -203,12 +207,16 @@ std::vector<ov::test::InputShape> imgShapesDynamic = {
},
};

std::vector<std::vector<float>> max_size = {
{}, {300}
};
INSTANTIATE_TEST_SUITE_P(smoke_prior_box_full_dynamic,
PriorBoxLayerGPUTest,
::testing::Combine(
::testing::ValuesIn(inShapesDynamic),
::testing::ValuesIn(imgShapesDynamic),
::testing::ValuesIn(netPrecisions),
::testing::ValuesIn(max_size),
::testing::ValuesIn(mode)),
PriorBoxLayerGPUTest::getTestCaseName);
} // namespace
Expand Down

0 comments on commit b656fee

Please sign in to comment.