diff --git a/model_api/cpp/adapters/include/adapters/inference_adapter.h b/model_api/cpp/adapters/include/adapters/inference_adapter.h index c4352b75..a32e1fac 100644 --- a/model_api/cpp/adapters/include/adapters/inference_adapter.h +++ b/model_api/cpp/adapters/include/adapters/inference_adapter.h @@ -38,6 +38,7 @@ class InferenceAdapter virtual ~InferenceAdapter() = default; virtual InferenceOutput infer(const InferenceInput& input) = 0; + virtual void infer(const InferenceInput& input, InferenceOutput& output) = 0; virtual void setCallback(std::function callback) = 0; virtual void inferAsync(const InferenceInput& input, CallbackData callback_args) = 0; virtual bool isReady() = 0; @@ -48,6 +49,9 @@ class InferenceAdapter const std::string& device = "", const ov::AnyMap& compilationConfig = {}, size_t max_num_requests = 0) = 0; virtual ov::PartialShape getInputShape(const std::string& inputName) const = 0; + virtual ov::PartialShape getOutputShape(const std::string& inputName) const = 0; + virtual ov::element::Type_t getInputDatatype(const std::string& inputName) const = 0; + virtual ov::element::Type_t getOutputDatatype(const std::string& outputName) const = 0; virtual std::vector getInputNames() const = 0; virtual std::vector getOutputNames() const = 0; virtual const ov::AnyMap& getModelConfig() const = 0; diff --git a/model_api/cpp/adapters/include/adapters/openvino_adapter.h b/model_api/cpp/adapters/include/adapters/openvino_adapter.h index c4df9b99..332ef787 100644 --- a/model_api/cpp/adapters/include/adapters/openvino_adapter.h +++ b/model_api/cpp/adapters/include/adapters/openvino_adapter.h @@ -32,6 +32,7 @@ class OpenVINOInferenceAdapter :public InferenceAdapter OpenVINOInferenceAdapter() = default; virtual InferenceOutput infer(const InferenceInput& input) override; + virtual void infer(const InferenceInput& input, InferenceOutput& output) override; virtual void inferAsync(const InferenceInput& input, const CallbackData callback_args) override; virtual void setCallback(std::function callback); virtual bool isReady(); @@ -42,6 +43,9 @@ class OpenVINOInferenceAdapter :public InferenceAdapter size_t max_num_requests = 1) override; virtual size_t getNumAsyncExecutors() const; virtual ov::PartialShape getInputShape(const std::string& inputName) const override; + virtual ov::PartialShape getOutputShape(const std::string& outputName) const override; + virtual ov::element::Type_t getInputDatatype(const std::string& inputName) const override; + virtual ov::element::Type_t getOutputDatatype(const std::string& outputName) const override; virtual std::vector getInputNames() const override; virtual std::vector getOutputNames() const override; virtual const ov::AnyMap& getModelConfig() const override; diff --git a/model_api/cpp/adapters/src/openvino_adapter.cpp b/model_api/cpp/adapters/src/openvino_adapter.cpp index 39632e3d..34f095bc 100644 --- a/model_api/cpp/adapters/src/openvino_adapter.cpp +++ b/model_api/cpp/adapters/src/openvino_adapter.cpp @@ -49,6 +49,20 @@ void OpenVINOInferenceAdapter::loadModel(const std::shared_ptr& } } +void OpenVINOInferenceAdapter::infer(const InferenceInput& input, InferenceOutput& output) { + auto request = asyncQueue->operator[](asyncQueue->get_idle_request_id()); + for (const auto& [name, tensor] : input) { + request.set_tensor(name, tensor); + } + for (const auto& [name, tensor] : output) { + request.set_tensor(name, tensor); + } + request.infer(); + for (const auto& name : outputNames) { + output[name] = request.get_tensor(name); + } +} + InferenceOutput OpenVINOInferenceAdapter::infer(const InferenceInput& input) { auto request = asyncQueue->operator[](asyncQueue->get_idle_request_id()); // Fill input blobs @@ -95,6 +109,9 @@ size_t OpenVINOInferenceAdapter::getNumAsyncExecutors() const { ov::PartialShape OpenVINOInferenceAdapter::getInputShape(const std::string& inputName) const { return compiledModel.input(inputName).get_partial_shape(); } +ov::PartialShape OpenVINOInferenceAdapter::getOutputShape(const std::string& outputName) const { + return compiledModel.output(outputName).get_partial_shape(); +} void OpenVINOInferenceAdapter::initInputsOutputs() { for (const auto& input : compiledModel.inputs()) { @@ -105,6 +122,12 @@ void OpenVINOInferenceAdapter::initInputsOutputs() { outputNames.push_back(output.get_any_name()); } } +ov::element::Type_t OpenVINOInferenceAdapter::getInputDatatype(const std::string&) const { + throw std::runtime_error("Not implemented"); +} +ov::element::Type_t OpenVINOInferenceAdapter::getOutputDatatype(const std::string&) const { + throw std::runtime_error("Not implemented"); +} std::vector OpenVINOInferenceAdapter::getInputNames() const { return inputNames;