Skip to content
Merged
Show file tree
Hide file tree
Changes from 18 commits
Commits
Show all changes
21 commits
Select commit Hold shift + click to select a range
b218169
Disable deprecated features in cpp20 as errors
praasz Feb 12, 2024
9a8563c
Build compatibility for cpp 11 and 20
praasz Feb 12, 2024
33e8a0d
Use cpp20 in CC builds
praasz Feb 12, 2024
a693bce
Fix c++20 build issues on GPU
praasz Feb 13, 2024
0a3d0f6
Fix cpp20 build issues on windows (use private)
praasz Feb 13, 2024
0515827
Move rapidjson iterator operators and build it only for c++20
praasz Feb 13, 2024
4480f8b
Use invoke_result for cpp20 instead result_of
praasz Feb 13, 2024
019867d
Fix cpp20 detection because of MSVC bug
praasz Feb 14, 2024
3dfb834
Cpp20 MSVC fix add lambda return type to avoid deduction issues
praasz Feb 14, 2024
ee77aba
Cpp20 MSVC fix declare specialization
praasz Feb 14, 2024
5250e18
Update gtest to fix build issues with cpp20
praasz Feb 14, 2024
1fdc7d1
Don't use wstring for logger and exception message
praasz Feb 14, 2024
ec91b9c
Remove not required c-style cast
praasz Feb 14, 2024
ac33f64
Remove ov_deprecated_featurs_no_errors macro
praasz Feb 15, 2024
0ebb775
Merge remote-tracking branch 'origin/master' into feature/support-ov-…
praasz Feb 15, 2024
bd85f46
Fix condition for Wno-error=deprecated
praasz Feb 15, 2024
4d2a920
Add -Wno-error=deprecated to Clang
praasz Feb 15, 2024
0ea0e3c
Merge branch 'master' into feature/support-ov-compilatoin-with-cpp20
praasz Feb 16, 2024
8b59864
Remove warnings for `abs` use in inverse
praasz Feb 16, 2024
9ad6b3a
Update gtest submodule
praasz Feb 16, 2024
a553132
Merge branch 'master' into feature/support-ov-compilatoin-with-cpp20
ilya-lavrenov Feb 17, 2024
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions .github/workflows/linux_conditional_compilation.yml
Original file line number Diff line number Diff line change
Expand Up @@ -152,6 +152,7 @@ jobs:
run: |
cmake \
-G "${{ env.CMAKE_GENERATOR }}" \
-DCMAKE_CXX_STANDARD=20 \
-DBUILD_SHARED_LIBS=OFF \
-DENABLE_TESTS=ON \
-DENABLE_CPPLINT=OFF \
Expand Down
1 change: 1 addition & 0 deletions .github/workflows/windows_conditional_compilation.yml
Original file line number Diff line number Diff line change
Expand Up @@ -147,6 +147,7 @@ jobs:
run: |
cmake -G "${{ env.CMAKE_GENERATOR }}" `
-DBUILD_SHARED_LIBS=OFF `
-DCMAKE_CXX_STANDARD=20 `
-DENABLE_TESTS=ON `
-DENABLE_CPPLINT=OFF `
-DENABLE_NCC_STYLE=OFF `
Expand Down
4 changes: 4 additions & 0 deletions docs/snippets/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,10 @@ if(UNUSED_BUT_SET_VARIABLE_SUPPORTED)
ov_add_compiler_flags(-Wno-unused-but-set-variable)
endif()

if((CMAKE_COMPILER_IS_GNUCXX OR OV_COMPILER_IS_CLANG) AND CMAKE_CXX_STANDARD GREATER_EQUAL 20)
set(CMAKE_CXX_FLAGS "-Wno-error=deprecated ${CMAKE_CXX_FLAGS}")
endif()

file(GLOB SOURCES "${CMAKE_CURRENT_SOURCE_DIR}/*.cpp"
"${CMAKE_CURRENT_SOURCE_DIR}/src/*.cpp"
"${CMAKE_CURRENT_SOURCE_DIR}/src/*.c")
Expand Down
3 changes: 2 additions & 1 deletion samples/cpp/hello_classification/main.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,8 @@ int tmain(int argc, tchar* argv[]) {

// -------- Parsing and validation of input arguments --------
if (argc != 4) {
slog::info << "Usage : " << argv[0] << " <path_to_model> <path_to_image> <device_name>" << slog::endl;
slog::info << "Usage : " << TSTRING2STRING(argv[0]) << " <path_to_model> <path_to_image> <device_name>"
<< slog::endl;
return EXIT_FAILURE;
}

Expand Down
3 changes: 3 additions & 0 deletions src/common/low_precision_transformations/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,9 @@ source_group("src" FILES ${LIBRARY_SRC})
source_group("include" FILES ${PUBLIC_HEADERS})

# Create library
if((CMAKE_COMPILER_IS_GNUCXX OR OV_COMPILER_IS_CLANG) AND CMAKE_CXX_STANDARD GREATER_EQUAL 20)
set(CMAKE_CXX_FLAGS "-Wno-error=deprecated ${CMAKE_CXX_FLAGS}")
endif()

add_library(${TARGET_NAME}_obj OBJECT
${LIBRARY_SRC}
Expand Down
3 changes: 3 additions & 0 deletions src/common/snippets/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,9 @@ source_group("src" FILES ${LIBRARY_SRC})
source_group("include" FILES ${PUBLIC_HEADERS})

# Create static library
if((CMAKE_COMPILER_IS_GNUCXX OR OV_COMPILER_IS_CLANG) AND CMAKE_CXX_STANDARD GREATER_EQUAL 20)
set(CMAKE_CXX_FLAGS "-Wno-error=deprecated ${CMAKE_CXX_FLAGS}")
endif()

add_library(${TARGET_NAME} STATIC
${LIBRARY_SRC}
Expand Down
3 changes: 3 additions & 0 deletions src/common/transformations/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,9 @@ source_group("src" FILES ${LIBRARY_SRC})
source_group("include" FILES ${PUBLIC_HEADERS})

# Create library
if((CMAKE_COMPILER_IS_GNUCXX OR OV_COMPILER_IS_CLANG) AND CMAKE_CXX_STANDARD GREATER_EQUAL 20)
set(CMAKE_CXX_FLAGS "-Wno-error=deprecated ${CMAKE_CXX_FLAGS}")
endif()

add_library(${TARGET_NAME}_obj OBJECT ${LIBRARY_SRC} ${PUBLIC_HEADERS})
target_compile_definitions(${TARGET_NAME}_obj PRIVATE IMPLEMENT_OPENVINO_API)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@ TRANSFORMATIONS_API bool has_nms_selected_indices(const Node* node);

TRANSFORMATIONS_API void set_nms_selected_indices(Node* node);

class TRANSFORMATIONS_API NmsSelectedIndices : ov::RuntimeAttribute {
class TRANSFORMATIONS_API NmsSelectedIndices : public ov::RuntimeAttribute {
public:
OPENVINO_RTTI("nms_selected_indices", "0");
NmsSelectedIndices() = default;
Expand Down
2 changes: 1 addition & 1 deletion src/core/tests/matcher_pass.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@ class TestMatcherPass : public ov::pass::MatcherPass {
auto m_relu1 = ov::pass::pattern::wrap_type<ov::op::v0::Relu>(pattern::consumers_count(1));
auto m_relu2 = ov::pass::pattern::wrap_type<ov::op::v0::Relu>({m_relu1});

ov::graph_rewrite_callback callback = [=](pattern::Matcher& m) {
ov::graph_rewrite_callback callback = [m_relu1, this](pattern::Matcher& m) {
// Map that helps to connect labels with matched outputs
auto& node_to_output = m.get_pattern_value_map();

Expand Down
4 changes: 4 additions & 0 deletions src/frontends/paddle/src/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,10 @@
# SPDX-License-Identifier: Apache-2.0
#

if((CMAKE_COMPILER_IS_GNUCXX OR OV_COMPILER_IS_CLANG) AND CMAKE_CXX_STANDARD GREATER_EQUAL 20)
set(CMAKE_CXX_FLAGS "-Wno-error=deprecated ${CMAKE_CXX_FLAGS}")
endif()

ov_add_frontend(NAME paddle
LINKABLE_FRONTEND
PROTOBUF_REQUIRED
Expand Down
4 changes: 4 additions & 0 deletions src/frontends/pytorch/src/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,10 @@
# SPDX-License-Identifier: Apache-2.0
#

if((CMAKE_COMPILER_IS_GNUCXX OR OV_COMPILER_IS_CLANG) AND CMAKE_CXX_STANDARD GREATER_EQUAL 20)
set(CMAKE_CXX_FLAGS "-Wno-error=deprecated ${CMAKE_CXX_FLAGS}")
endif()

ov_add_frontend(NAME pytorch
LINKABLE_FRONTEND
SHUTDOWN_PROTOBUF
Expand Down
4 changes: 2 additions & 2 deletions src/frontends/tensorflow/src/variables_index.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -228,11 +228,11 @@ bool VariablesIndex::read_variables(std::ifstream& vi_stream, const std::wstring
}
if (m_mmap_enabled) {
m_data_files[shard].mmap = load_mmap_object(fullPath);
FRONT_END_GENERAL_CHECK(m_data_files[shard].mmap->data(), L"Variable index data cannot be mapped");
FRONT_END_GENERAL_CHECK(m_data_files[shard].mmap->data(), "Variable index data cannot be mapped");
} else {
m_data_files[shard].stream = std::shared_ptr<std::ifstream>(
new std::ifstream(fullPath.c_str(), std::ifstream::in | std::ifstream::binary));
FRONT_END_GENERAL_CHECK(m_data_files[shard].stream->is_open(), L"Variable index data file does not exist");
FRONT_END_GENERAL_CHECK(m_data_files[shard].stream->is_open(), "Variable index data file does not exist");
}
}

Expand Down
6 changes: 2 additions & 4 deletions src/inference/tests/functional/caching_test.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -2359,9 +2359,7 @@ TEST_P(CachingTest, LoadBATCHWithConfig) {
EXPECT_CALL(*mockPlugin, get_property(ov::internal::caching_properties.name(), _)).Times(AnyNumber());
EXPECT_CALL(*mockPlugin, get_property(ov::hint::performance_mode.name(), _))
.Times(AnyNumber())
.WillRepeatedly(Return([] {
return ov::hint::PerformanceMode::THROUGHPUT;
}));
.WillRepeatedly(Return(ov::hint::PerformanceMode::THROUGHPUT));
if (m_remoteContext) {
return; // skip the remote Context test for Auto plugin
}
Expand Down Expand Up @@ -2490,4 +2488,4 @@ INSTANTIATE_TEST_SUITE_P(CacheTestWithProxyEnabled,
CacheTestWithProxyEnabled,
::testing::Combine(::testing::ValuesIn(loadVariants), ::testing::ValuesIn(cacheFolders)),
getTestCaseName);
#endif
#endif
2 changes: 1 addition & 1 deletion src/plugins/auto_batch/src/sync_infer_request.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -160,4 +160,4 @@ std::vector<ov::ProfilingInfo> SyncInferRequest::get_profiling_info() const {
return m_batched_request_wrapper->_infer_request_batched->get_profiling_info();
}
} // namespace autobatch_plugin
} // namespace ov
} // namespace ov
6 changes: 5 additions & 1 deletion src/plugins/intel_cpu/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,10 @@ endif()

set(TARGET_NAME "openvino_intel_cpu_plugin")

if((CMAKE_COMPILER_IS_GNUCXX OR OV_COMPILER_IS_CLANG) AND CMAKE_CXX_STANDARD GREATER_EQUAL 20)
set(CMAKE_CXX_FLAGS "-Wno-error=deprecated ${CMAKE_CXX_FLAGS}")
endif()

if(CMAKE_CXX_COMPILER_ID STREQUAL "MSVC")
# C4267, 4244 issues from oneDNN headers conversion from 'XXX' to 'YYY', possible loss of data
ov_add_compiler_flags(/wd4018)
Expand Down Expand Up @@ -205,7 +209,7 @@ if(BUILD_SHARED_LIBS)
$<TARGET_PROPERTY:openvino::conditional_compilation,INTERFACE_INCLUDE_DIRECTORIES>)

target_include_directories(${TARGET_NAME}_obj SYSTEM PUBLIC $<TARGET_PROPERTY:dnnl,INCLUDE_DIRECTORIES>)

if(ENABLE_MLAS_FOR_CPU)
target_include_directories(${TARGET_NAME}_obj SYSTEM PUBLIC $<TARGET_PROPERTY:mlas,INCLUDE_DIRECTORIES>)
endif()
Expand Down
12 changes: 8 additions & 4 deletions src/plugins/intel_cpu/src/cache/multi_cache.h
Original file line number Diff line number Diff line change
Expand Up @@ -41,10 +41,14 @@ class MultiCache {
* Also the builder type is used for the ValueType deduction
* @return result of the operation which is a pair of the requested object of ValType and the status of whether the cache hit or miss occurred
*/

template<typename KeyType, typename BuilderType, typename ValueType = typename std::result_of<BuilderType&(const KeyType&)>::type>
typename CacheEntry<KeyType, ValueType>::ResultType
getOrCreate(const KeyType& key, BuilderType builder) {
template <typename KeyType,
typename BuilderType,
#if (defined(_MSVC_LANG) && (_MSVC_LANG > 201703L)) || (defined(__cplusplus) && (__cplusplus > 201703L))
typename ValueType = std::invoke_result_t<BuilderType&, const KeyType&>>
#else
typename ValueType = typename std::result_of<BuilderType&(const KeyType&)>::type>
#endif
typename CacheEntry<KeyType, ValueType>::ResultType getOrCreate(const KeyType& key, BuilderType builder) {
auto entry = getEntry<KeyType, ValueType>();
return entry->getOrCreate(key, std::move(builder));
}
Expand Down
23 changes: 17 additions & 6 deletions src/plugins/intel_cpu/src/graph.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1093,6 +1093,17 @@ class UpdateNodesSeq : public IUpdateNodes {
#endif

#if (OV_THREAD == OV_THREAD_TBB || OV_THREAD == OV_THREAD_TBB_AUTO || OV_THREAD == OV_THREAD_OMP)

# if (defined(_MSVC_LANG) && (_MSVC_LANG > 201703L)) || (defined(__cplusplus) && (__cplusplus > 201703L))
# define ov_memory_order_release std::memory_order_release
# define ov_memory_order_relaxed std::memory_order_relaxed
# define ov_memory_order_acquire std::memory_order_acquire
# else
# define ov_memory_order_release std::memory_order::memory_order_release
# define ov_memory_order_relaxed std::memory_order::memory_order_relaxed
# define ov_memory_order_acquire std::memory_order::memory_order_acquire
# endif

class UpdateNodesBase : public IUpdateNodes {
public:
explicit UpdateNodesBase(std::vector<NodePtr>& executableGraphNodes) : m_executableGraphNodes(executableGraphNodes) {}
Expand All @@ -1103,22 +1114,22 @@ class UpdateNodesBase : public IUpdateNodes {
if (node->isDynamicNode()) {
node->updateShapes();
}
m_prepareCounter.store(i, std::memory_order::memory_order_release);
m_prepareCounter.store(i, ov_memory_order_release);
}
}
catch(...) {
m_completion.store(true, std::memory_order::memory_order_relaxed);
m_completion.store(true, ov_memory_order_relaxed);
throw;
}
m_prepareCounter.store(stop_indx, std::memory_order::memory_order_relaxed);
m_completion.store(true, std::memory_order::memory_order_release);
m_prepareCounter.store(stop_indx, ov_memory_order_relaxed);
m_completion.store(true, ov_memory_order_release);
}

void updateDynParams(size_t node_indx, size_t /*unused*/) {
size_t local_counter = node_indx;
while (true) {
const bool completion = m_completion.load(std::memory_order::memory_order_acquire);
const size_t prepareCounter = m_prepareCounter.load(std::memory_order::memory_order_relaxed);
const bool completion = m_completion.load(ov_memory_order_acquire);
const size_t prepareCounter = m_prepareCounter.load(ov_memory_order_relaxed);
if (completion && local_counter == prepareCounter) {
break;
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -244,7 +244,10 @@ const std::vector<ExecutorImplementation<FCAttrs>>& getImplementations() {
return true;
},
// create
[](const FCAttrs& attrs, const PostOps& postOps, const MemoryArgs& memory, ExecutorContext::CPtr context) {
[](const FCAttrs& attrs,
const PostOps& postOps,
const MemoryArgs& memory,
ExecutorContext::CPtr context) -> std::shared_ptr<Executor> {
struct ConvolutionInstantiator {
std::shared_ptr<DnnlConvolutionPrimitive> operator()(
const MemoryArgs& memory,
Expand Down
3 changes: 3 additions & 0 deletions src/plugins/intel_cpu/src/nodes/kernels/x64/jit_kernel.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -700,6 +700,9 @@ struct jit_kernel : public dnnl::impl::cpu::x64::jit_generator {
std::unordered_map<size_t, std::unique_ptr<jit_emitter>> _emitters;
};

template <>
const Xbyak::Reg64& jit_kernel::reserve<Xbyak::Reg64>();

template<typename T>
void jit_kernel::copy(const Xbyak::Reg64& dst,
const Xbyak::Reg64& src,
Expand Down
4 changes: 4 additions & 0 deletions src/plugins/intel_gpu/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,10 @@ endif()

set (TARGET_NAME "openvino_intel_gpu_plugin")

if((CMAKE_COMPILER_IS_GNUCXX OR OV_COMPILER_IS_CLANG) AND CMAKE_CXX_STANDARD GREATER_EQUAL 20)
set(CMAKE_CXX_FLAGS "-Wno-error=deprecated ${CMAKE_CXX_FLAGS}")
endif()

if(CMAKE_COMPILER_IS_GNUCXX)
ov_add_compiler_flags(-Wno-strict-aliasing)
endif()
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -689,16 +689,16 @@ void reorder_inputs::run(program& p, layout_optimizer& lo, reorder_factory& rf)
}

GPU_DEBUG_IF(debug_config->verbose >= 2) {
reorder_cnt total_reorder_count = std::accumulate(
p.get_processing_order().begin(),
p.get_processing_order().end(),
reorder_cnt{ 0, 0 },
[&](reorder_cnt& total, program_node* node) {
if (fmt_map.count(node) == 0 || fmt_map.at(node) == format::any)
return total;
auto count = count_reorders(fmt_map, lo, node);
return reorder_cnt{ total.number + count.number, total.total_sizes + count.total_sizes };
});
reorder_cnt total_reorder_count =
std::accumulate(p.get_processing_order().begin(),
p.get_processing_order().end(),
reorder_cnt{0, 0},
[&](reorder_cnt total, program_node* node) {
if (fmt_map.count(node) == 0 || fmt_map.at(node) == format::any)
return total;
auto count = count_reorders(fmt_map, lo, node);
return reorder_cnt{total.number + count.number, total.total_sizes + count.total_sizes};
});
// Divide results by two as above function will each reorder from both sides
GPU_DEBUG_LOG_PASS << "Total number of reorders: " << total_reorder_count.number / 2 << std::endl;
GPU_DEBUG_LOG_PASS << "Total elements count of all reorders: " << total_reorder_count.total_sizes / 2 << std::endl;
Expand Down
21 changes: 21 additions & 0 deletions src/plugins/intel_gpu/src/kernel_selector/auto_tuner.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -36,6 +36,27 @@
#include <dlfcn.h>
#endif

#if __cplusplus > 201703L

// Add operators `==` and `!=` for rapidjson::GenericMemberIterator for non const iterator when build with C++20,
// is more strict regarding type checks.
namespace rapidjson {

template <typename Encoding, typename Allocator>
inline bool operator==(GenericMemberIterator<false, Encoding, Allocator> lhs,
GenericMemberIterator<false, Encoding, Allocator> rhs) {
return static_cast<GenericMemberIterator<true, Encoding, Allocator>>(lhs) ==
static_cast<GenericMemberIterator<true, Encoding, Allocator>>(rhs);
}

template <typename Encoding, typename Allocator>
inline bool operator!=(GenericMemberIterator<false, Encoding, Allocator> lhs,
GenericMemberIterator<false, Encoding, Allocator> rhs) {
return !(lhs == rhs);
}
} // namespace rapidjson
#endif

namespace kernel_selector {

class TuningCache::Impl {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -612,10 +612,8 @@ std::string toString_v2(const DataTensor& tensor) {
std::stringstream s;
s << toString(tensor.GetDType()) << "_";
s << toString(tensor.GetLayout());
int i = 0;
for (auto dim : tensor.GetDims()) {
s << "_v" << dim.v << "_p" << dim.pad.before << "_" << dim.pad.after;
i++;
}
return s.str();
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,13 @@
#include <intel_gpu/primitives/gather.hpp>
#include <intel_gpu/primitives/permute.hpp>

namespace cldnn {
// For gtest NE compare, class defines only `==` operator. Required when building using C++20
inline bool operator!=(const range& lhs, const fully_connected& rhs) {
return !(lhs.operator==(rhs));
}
} // namespace cldnn

using namespace cldnn;
using namespace ::tests;

Expand Down
4 changes: 2 additions & 2 deletions src/tests/test_utils/common_test_utils/src/file_utils.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -192,7 +192,7 @@ std::string getRelativePath(const std::string& from, const std::string& to) {
output += std::accumulate(mismatch_it.first,
from_vec.end(),
std::string{},
[&separator](std::string& a, const std::string&) -> std::string {
[&separator](std::string a, const std::string&) -> std::string {
return a += ".." + separator;
});
}
Expand All @@ -203,7 +203,7 @@ std::string getRelativePath(const std::string& from, const std::string& to) {
output += std::accumulate(mismatch_it.second,
to_vec.end(),
std::string{},
[&separator](std::string& a, const std::string& b) -> std::string {
[&separator](std::string a, const std::string& b) -> std::string {
return a.empty() ? a += b : a += separator + b;
});
return output;
Expand Down
2 changes: 1 addition & 1 deletion thirdparty/gtest/gtest
10 changes: 5 additions & 5 deletions thirdparty/itt_collector/sea_itt_lib/sea_itt_lib.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -327,14 +327,14 @@ SEA_EXPORT int NotifyEvent(iJIT_JVM_EVENT event_type, void* EventSpecificData) {

switch (event_type) {
case iJVM_EVENT_TYPE_METHOD_LOAD_FINISHED: {
sea::WriteJit(&(uint32_t)methodData->method_id, sizeof(uint32_t));
sea::WriteJit(&methodData->method_id, sizeof(uint32_t));
sea::WriteJit(&methodData->method_load_address, sizeof(void*));
sea::WriteJit(&(uint32_t)methodData->method_size, sizeof(uint32_t));
sea::WriteJit(&(uint32_t)methodData->line_number_size, sizeof(uint32_t));
sea::WriteJit(&methodData->method_size, sizeof(uint32_t));
sea::WriteJit(&methodData->line_number_size, sizeof(uint32_t));
for (unsigned int i = 0; i < methodData->line_number_size; ++i) {
const LineNumberInfo& lni = methodData->line_number_table[i];
sea::WriteJit(&(uint32_t)lni.Offset, sizeof(uint32_t));
sea::WriteJit(&(uint32_t)lni.LineNumber, sizeof(uint32_t));
sea::WriteJit(&lni.Offset, sizeof(uint32_t));
sea::WriteJit(&lni.LineNumber, sizeof(uint32_t));
}

const char* strings[] = {methodData->method_name, methodData->class_file_name, methodData->source_file_name};
Expand Down