Skip to content

Commit

Permalink
Fixed vcpkg Android ARM64 build with ONNX 1.16.2 (openvinotoolkit#27217)
Browse files Browse the repository at this point in the history
Replacement for openvinotoolkit#27171

JIRA CVS-155558
  • Loading branch information
ilya-lavrenov authored Oct 24, 2024
1 parent 433e44e commit 79b0bad
Show file tree
Hide file tree
Showing 13 changed files with 28 additions and 91 deletions.
2 changes: 1 addition & 1 deletion conan.lock
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@
"opencl-icd-loader/2023.04.17#5f73dd9f0c023d416a7f162e320b9c77%1692732261.088",
"opencl-headers/2023.04.17#3d98f2d12a67c2400de6f11d5335b5a6%1683936272.16",
"opencl-clhpp-headers/2023.04.17#7c62fcc7ac2559d4839150d2ebaac5c8%1685450803.672",
"onnx/1.16.0#4d2d4f24d6f73b8a7551e001839631f0%1712404811.278",
"onnx/1.16.2#b5e8d35b10d454b26751762922465eb8%1712404811.278",
"onetbb/2021.10.0#cbb2fc43088070b48f6e4339bc8fa0e1%1693812561.235",
"ittapi/3.24.0#9246125f13e7686dee2b0c992b71db94%1682969872.743",
"hwloc/2.9.2#1c63e2eccac57048ae226e6c946ebf0e%1688677682.002",
Expand Down
2 changes: 1 addition & 1 deletion conanfile.txt
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@ opencl-icd-loader/[>=2023.04.17]
rapidjson/[>=1.1.0]
xbyak/[>=6.62]
snappy/[>=1.1.7]
onnx/1.16.0
onnx/1.16.2
pybind11/[>=2.12.0]
flatbuffers/[>=22.9.24]

Expand Down
14 changes: 0 additions & 14 deletions src/frontends/onnx/frontend/src/core/tensor.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -82,18 +82,11 @@ std::vector<int8_t> Tensor::get_data() const {
if (m_tensor_proto->has_raw_data()) {
return detail::__get_raw_data<int8_t>(m_tensor_proto->raw_data(), m_tensor_proto->data_type());
}
#ifdef ONNX_VERSION_116
if (m_tensor_proto->data_type() == TensorProto_DataType::TensorProto_DataType_INT8 ||
m_tensor_proto->data_type() == TensorProto_DataType::TensorProto_DataType_INT4) {
return detail::__get_data<int8_t>(m_tensor_proto->int32_data());
}
ONNX_INVALID_DATA_TYPE(m_tensor_proto->data_type(), "INT4, INT8, raw data");
#else
if (m_tensor_proto->data_type() == TensorProto_DataType::TensorProto_DataType_INT8) {
return detail::__get_data<int8_t>(m_tensor_proto->int32_data());
}
ONNX_INVALID_DATA_TYPE(m_tensor_proto->data_type(), "INT8, raw data");
#endif
}

template <>
Expand Down Expand Up @@ -146,18 +139,11 @@ std::vector<uint8_t> Tensor::get_data() const {
if (m_tensor_proto->has_raw_data()) {
return detail::__get_raw_data<uint8_t>(m_tensor_proto->raw_data(), m_tensor_proto->data_type());
}
#ifdef ONNX_VERSION_116
if (m_tensor_proto->data_type() == TensorProto_DataType::TensorProto_DataType_UINT8 ||
m_tensor_proto->data_type() == TensorProto_DataType::TensorProto_DataType_UINT4) {
return detail::__get_data<uint8_t>(m_tensor_proto->int32_data());
}
ONNX_INVALID_DATA_TYPE(m_tensor_proto->data_type(), "UINT4, UINT8, raw data");
#else
if (m_tensor_proto->data_type() == TensorProto_DataType::TensorProto_DataType_UINT8) {
return detail::__get_data<uint8_t>(m_tensor_proto->int32_data());
}
ONNX_INVALID_DATA_TYPE(m_tensor_proto->data_type(), "UINT8, raw data");
#endif
}

template <>
Expand Down
17 changes: 0 additions & 17 deletions src/frontends/onnx/frontend/src/core/tensor.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -65,10 +65,8 @@ class Tensor {
enum class Type {
undefined = TensorProto_DataType::TensorProto_DataType_UNDEFINED,
float32 = TensorProto_DataType::TensorProto_DataType_FLOAT,
#ifdef ONNX_VERSION_116
uint4 = TensorProto_DataType::TensorProto_DataType_UINT4,
int4 = TensorProto_DataType::TensorProto_DataType_INT4,
#endif
uint8 = TensorProto_DataType::TensorProto_DataType_UINT8,
int8 = TensorProto_DataType::TensorProto_DataType_INT8,
uint16 = TensorProto_DataType::TensorProto_DataType_UINT16,
Expand Down Expand Up @@ -146,10 +144,8 @@ class Tensor {
return ov::element::f16;
case TensorProto_DataType::TensorProto_DataType_DOUBLE:
return ov::element::f64;
#ifdef ONNX_VERSION_116
case TensorProto_DataType::TensorProto_DataType_INT4:
return ov::element::i4;
#endif
case TensorProto_DataType::TensorProto_DataType_INT8:
return ov::element::i8;
case TensorProto_DataType::TensorProto_DataType_INT16:
Expand All @@ -158,10 +154,8 @@ class Tensor {
return ov::element::i32;
case TensorProto_DataType::TensorProto_DataType_INT64:
return ov::element::i64;
#ifdef ONNX_VERSION_116
case TensorProto_DataType::TensorProto_DataType_UINT4:
return ov::element::u4;
#endif
case TensorProto_DataType::TensorProto_DataType_UINT8:
return ov::element::u8;
case TensorProto_DataType::TensorProto_DataType_UINT16:
Expand Down Expand Up @@ -205,10 +199,8 @@ class Tensor {
return make_ov_constant<ov::float16>(ov::element::f16);
case TensorProto_DataType::TensorProto_DataType_DOUBLE:
return make_ov_constant<double>(ov::element::f64);
#ifdef ONNX_VERSION_116
case TensorProto_DataType::TensorProto_DataType_INT4:
return make_ov_constant<int8_t>(ov::element::i4);
#endif
case TensorProto_DataType::TensorProto_DataType_INT8:
return make_ov_constant<int8_t>(ov::element::i8);
case TensorProto_DataType::TensorProto_DataType_INT16:
Expand All @@ -217,10 +209,8 @@ class Tensor {
return make_ov_constant<int32_t>(ov::element::i32);
case TensorProto_DataType::TensorProto_DataType_INT64:
return make_ov_constant<int64_t>(ov::element::i64);
#ifdef ONNX_VERSION_116
case TensorProto_DataType::TensorProto_DataType_UINT4:
return make_ov_constant<uint8_t>(ov::element::u4);
#endif
case TensorProto_DataType::TensorProto_DataType_UINT8:
return make_ov_constant<uint8_t>(ov::element::u8);
case TensorProto_DataType::TensorProto_DataType_UINT16:
Expand All @@ -238,17 +228,10 @@ class Tensor {
case TensorProto_DataType::TensorProto_DataType_STRING:
return make_ov_constant<std::string>(ov::element::string);
default:
#ifdef ONNX_VERSION_116
ONNX_UNSUPPORTED_DATA_TYPE(
m_tensor_proto->data_type(),
"BOOL, BFLOAT16, FLOAT8E4M3FN, FLOAT8E5M2, FLOAT, FLOAT16, DOUBLE, INT4, INT8, INT16, INT32, INT64, "
"UINT4, UINT8, UINT16, UINT32, UINT64, STRING");
#else
ONNX_UNSUPPORTED_DATA_TYPE(
m_tensor_proto->data_type(),
"BOOL, BFLOAT16, FLOAT8E4M3FN, FLOAT8E5M2, FLOAT, FLOAT16, DOUBLE, INT8, INT16, INT32, INT64, "
"UINT8, UINT16, UINT32, UINT64, STRING");
#endif
}
}

Expand Down
8 changes: 6 additions & 2 deletions src/frontends/onnx/frontend/src/frontend.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,9 @@
#endif
#include <google/protobuf/port_undef.inc>

#ifndef OV_PROTOBUF_ABSL_IS_USED
#ifdef OV_PROTOBUF_ABSL_IS_USED
# include <absl/log/globals.h>
#else
# include <google/protobuf/stubs/logging.h>
#endif

Expand Down Expand Up @@ -47,7 +49,9 @@ ONNX_FRONTEND_C_API void* get_front_end_data() {
};
#ifndef OPENVINO_DEBUG_ENABLE
// disable protobuf logging
# ifndef OV_PROTOBUF_ABSL_IS_USED
# ifdef OV_PROTOBUF_ABSL_IS_USED
absl::SetGlobalVLogLevel(0);
# else
google::protobuf::SetLogHandler(nullptr);
# endif
#endif
Expand Down
10 changes: 0 additions & 10 deletions src/frontends/onnx/frontend/src/utils/common.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -42,10 +42,8 @@ const ov::element::Type& get_ov_element_type(int64_t onnx_type) {
return ov::element::f16;
case TensorProto_DataType::TensorProto_DataType_FLOAT:
return ov::element::f32;
#ifdef ONNX_VERSION_116
case TensorProto_DataType::TensorProto_DataType_INT4:
return ov::element::i4;
#endif
case TensorProto_DataType::TensorProto_DataType_INT8:
return ov::element::i8;
case TensorProto_DataType::TensorProto_DataType_INT16:
Expand All @@ -54,10 +52,8 @@ const ov::element::Type& get_ov_element_type(int64_t onnx_type) {
return ov::element::i32;
case TensorProto_DataType::TensorProto_DataType_INT64:
return ov::element::i64;
#ifdef ONNX_VERSION_116
case TensorProto_DataType::TensorProto_DataType_UINT4:
return ov::element::u4;
#endif
case TensorProto_DataType::TensorProto_DataType_UINT8:
return ov::element::u8;
case TensorProto_DataType::TensorProto_DataType_UINT16:
Expand All @@ -77,15 +73,9 @@ const ov::element::Type& get_ov_element_type(int64_t onnx_type) {
case TensorProto_DataType::TensorProto_DataType_STRING:
return ov::element::string;
}
#ifdef ONNX_VERSION_116
ONNX_UNSUPPORTED_DATA_TYPE(onnx_type,
"BOOL, BFLOAT16, FLOAT8E4M3FN, FLOAT8E5M2, FLOAT, FLOAT16, DOUBLE, INT4, INT8, INT16, "
"INT32, INT64, UINT4, UINT8, UINT16, UINT32, UINT64, STRING, UNDEFINED");
#else
ONNX_UNSUPPORTED_DATA_TYPE(onnx_type,
"BOOL, BFLOAT16, FLOAT8E4M3FN, FLOAT8E5M2, FLOAT, FLOAT16, DOUBLE, INT8, INT16, "
"INT32, INT64, UINT8, UINT16, UINT32, UINT64, STRING, UNDEFINED");
#endif
}

void default_op_checks(const Node& node, size_t min_inputs_size) {
Expand Down
15 changes: 0 additions & 15 deletions src/frontends/onnx/onnx_common/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -35,18 +35,3 @@ ov_link_system_libraries(${TARGET_NAME} PUBLIC onnx_proto onnx)
ov_add_clang_format_target(${TARGET_NAME}_clang FOR_TARGETS ${TARGET_NAME})

ov_install_static_lib(${TARGET_NAME} ${OV_CPACK_COMP_CORE})

# Temporary solution until vcpkg doesn't have fresh ONNX,
# trying determine used version of ONNX to enable modern functionality
find_package(ONNX 1.16.0 QUIET COMPONENTS onnx onnx_proto NO_MODULE)
if(ONNX_FOUND)
target_compile_definitions(${TARGET_NAME} PUBLIC ONNX_VERSION_116)
else()
if(EXISTS "${CMAKE_SOURCE_DIR}/thirdparty/onnx/onnx/VERSION_NUMBER")
file(READ "${CMAKE_SOURCE_DIR}/thirdparty/onnx/onnx/VERSION_NUMBER" ONNX_VERSION)
string(STRIP "${ONNX_VERSION}" ONNX_VERSION)
if((ONNX_VERSION GREATER "1.16.0") OR (ONNX_VERSION EQUAL "1.16.0"))
target_compile_definitions(${TARGET_NAME} PUBLIC ONNX_VERSION_116)
endif()
endif()
endif()
8 changes: 0 additions & 8 deletions src/frontends/onnx/onnx_common/src/utils.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -30,10 +30,8 @@ size_t get_onnx_data_size(int32_t onnx_type) {
return sizeof(ov::float8_e4m3);
case TensorProto_DataType_FLOAT8E5M2:
return sizeof(ov::float8_e5m2);
#ifdef ONNX_VERSION_116
case TensorProto_DataType_INT4:
return sizeof(int8_t);
#endif
case TensorProto_DataType_INT8:
return sizeof(int8_t);
case TensorProto_DataType_INT16:
Expand All @@ -42,10 +40,8 @@ size_t get_onnx_data_size(int32_t onnx_type) {
return sizeof(int32_t);
case TensorProto_DataType_INT64:
return sizeof(int64_t);
#ifdef ONNX_VERSION_116
case TensorProto_DataType_UINT4:
return sizeof(uint8_t);
#endif
case TensorProto_DataType_UINT8:
return sizeof(uint8_t);
case TensorProto_DataType_UINT16:
Expand All @@ -66,16 +62,12 @@ const std::map<ov::element::Type_t, TensorProto_DataType> OV_2_ONNX_TYPES = {
{ov::element::Type_t::f16, TensorProto_DataType::TensorProto_DataType_FLOAT16},
{ov::element::Type_t::f32, TensorProto_DataType::TensorProto_DataType_FLOAT},
{ov::element::Type_t::f64, TensorProto_DataType::TensorProto_DataType_DOUBLE},
#ifdef ONNX_VERSION_116
{ov::element::Type_t::i4, TensorProto_DataType::TensorProto_DataType_INT4},
#endif
{ov::element::Type_t::i8, TensorProto_DataType::TensorProto_DataType_INT8},
{ov::element::Type_t::i16, TensorProto_DataType::TensorProto_DataType_INT16},
{ov::element::Type_t::i32, TensorProto_DataType::TensorProto_DataType_INT32},
{ov::element::Type_t::i64, TensorProto_DataType::TensorProto_DataType_INT64},
#ifdef ONNX_VERSION_116
{ov::element::Type_t::u4, TensorProto_DataType::TensorProto_DataType_UINT4},
#endif
{ov::element::Type_t::u8, TensorProto_DataType::TensorProto_DataType_UINT8},
{ov::element::Type_t::u16, TensorProto_DataType::TensorProto_DataType_UINT16},
{ov::element::Type_t::u32, TensorProto_DataType::TensorProto_DataType_UINT32},
Expand Down
15 changes: 0 additions & 15 deletions src/frontends/onnx/tests/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -134,21 +134,6 @@ target_compile_definitions(ov_onnx_frontend_tests
set(ONNX_OPSET_VERSION 17 CACHE INTERNAL "Supported version of ONNX operator set")
target_compile_definitions(ov_onnx_frontend_tests PRIVATE ONNX_OPSET_VERSION=${ONNX_OPSET_VERSION})

# Temporary solution until vcpkg doesn't have fresh ONNX,
# trying determine used version of ONNX to enable modern functionality
find_package(ONNX 1.16.0 QUIET COMPONENTS onnx onnx_proto NO_MODULE)
if(ONNX_FOUND)
target_compile_definitions(ov_onnx_frontend_tests PRIVATE ONNX_VERSION_116)
else()
if(EXISTS "${CMAKE_SOURCE_DIR}/thirdparty/onnx/onnx/VERSION_NUMBER")
file(READ "${CMAKE_SOURCE_DIR}/thirdparty/onnx/onnx/VERSION_NUMBER" ONNX_VERSION)
string(STRIP "${ONNX_VERSION}" ONNX_VERSION)
if((ONNX_VERSION GREATER "1.16.0") OR (ONNX_VERSION EQUAL "1.16.0"))
target_compile_definitions(ov_onnx_frontend_tests PRIVATE ONNX_VERSION_116)
endif()
endif()
endif()

if(ONNX_TESTS_DEPENDENCIES)
add_dependencies(ov_onnx_frontend_tests ${ONNX_TESTS_DEPENDENCIES})
endif()
Expand Down
2 changes: 0 additions & 2 deletions src/frontends/onnx/tests/onnx_import.in.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -159,7 +159,6 @@ OPENVINO_TEST(${BACKEND_NAME}, onnx_bool_init_raw) {
test_case.run();
}

#ifdef ONNX_VERSION_116
OPENVINO_TEST(${BACKEND_NAME}, onnx_int4_const) {
auto model = convert_model("int4_const.onnx");

Expand Down Expand Up @@ -195,7 +194,6 @@ OPENVINO_TEST(${BACKEND_NAME}, onnx_uint4_input) {

test_case.run();
}
#endif

OPENVINO_TEST(${BACKEND_NAME}, onnx_model_add_abc_initializers) {
auto model = convert_model("add_abc_initializers.onnx");
Expand Down
8 changes: 6 additions & 2 deletions src/frontends/paddle/src/frontend.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,9 @@
#endif
#include <google/protobuf/port_undef.inc>

#ifndef OV_PROTOBUF_ABSL_IS_USED
#ifdef OV_PROTOBUF_ABSL_IS_USED
# include <absl/log/globals.h>
#else
# include <google/protobuf/stubs/logging.h>
#endif

Expand Down Expand Up @@ -594,7 +596,9 @@ PADDLE_C_API void* get_front_end_data() {

#ifndef OPENVINO_DEBUG_ENABLE
// disable protobuf logging
# ifndef OV_PROTOBUF_ABSL_IS_USED
# ifdef OV_PROTOBUF_ABSL_IS_USED
absl::SetGlobalVLogLevel(0);
# else
google::protobuf::SetLogHandler(nullptr);
# endif
#endif
Expand Down
14 changes: 12 additions & 2 deletions thirdparty/dependencies.cmake
Original file line number Diff line number Diff line change
Expand Up @@ -335,7 +335,10 @@ if(ENABLE_OV_PADDLE_FRONTEND OR ENABLE_OV_ONNX_FRONTEND OR ENABLE_OV_TF_FRONTEND
# try to find newer version first (major is changed)
# see https://protobuf.dev/support/version-support/ and
# https://github.com/protocolbuffers/protobuf/commit/d61f75ff6db36b4f9c0765f131f8edc2f86310fa
find_package(Protobuf 4.22.0 QUIET CONFIG)
find_package(Protobuf 5.26.0 QUIET CONFIG)
if(NOT Protobuf_FOUND)
find_package(Protobuf 4.22.0 QUIET CONFIG)
endif()
if(Protobuf_FOUND)
# protobuf was found via CONFIG mode, let's save it for later usage in OpenVINOConfig.cmake static build
set(protobuf_config CONFIG)
Expand Down Expand Up @@ -500,10 +503,17 @@ endif()
#

if(ENABLE_OV_ONNX_FRONTEND)
find_package(ONNX 1.15.0 QUIET COMPONENTS onnx onnx_proto NO_MODULE)
find_package(ONNX 1.16.2 QUIET COMPONENTS onnx onnx_proto NO_MODULE)

if(ONNX_FOUND)
# conan and vcpkg create imported targets 'onnx' and 'onnx_proto'
# newer versions of ONNX in vcpkg has ONNX:: prefix, let's create aliases
if(TARGET ONNX::onnx)
add_library(onnx ALIAS ONNX::onnx)
endif()
if(TARGET ONNX::onnx_proto)
add_library(onnx_proto ALIAS ONNX::onnx_proto)
endif()
else()
add_subdirectory(thirdparty/onnx)
endif()
Expand Down
4 changes: 2 additions & 2 deletions vcpkg.json
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
{
"$schema": "https://raw.githubusercontent.com/microsoft/vcpkg-tool/main/docs/vcpkg.schema.json",
"name": "openvino",
"version": "2024.0.0",
"version": "2024.5.0",
"maintainers": "OpenVINO Developers <[email protected]>",
"summary": "This is a port for Open Visual Inference And Optimization toolkit for AI inference",
"description": [
Expand All @@ -14,7 +14,7 @@
"homepage": "https://github.com/openvinotoolkit/openvino",
"documentation": "https://docs.openvino.ai/latest/index.html",
"license": "Apache-2.0",
"builtin-baseline": "7ba0ba7334c3346e7eee1e049ba85da193a8d821",
"builtin-baseline": "88a0bf87b5efd6270502dfe4dde75dd155bd992b",
"dependencies": [
{
"name": "pkgconf",
Expand Down

0 comments on commit 79b0bad

Please sign in to comment.