diff --git a/src/bindings/js/node/include/infer_request.hpp b/src/bindings/js/node/include/infer_request.hpp index 3162e5481ab34a..944064f11d82d1 100644 --- a/src/bindings/js/node/include/infer_request.hpp +++ b/src/bindings/js/node/include/infer_request.hpp @@ -124,4 +124,6 @@ class InferRequestWrap : public Napi::ObjectWrap { void FinalizerCallback(Napi::Env env, void* finalizeData, TsfnContext* context); +std::map get_js_infer_result(ov::InferRequest* infer_request); + void performInferenceThread(TsfnContext* context); diff --git a/src/bindings/js/node/src/infer_request.cpp b/src/bindings/js/node/src/infer_request.cpp index 0b5629adac6fe9..5acbb10a15700f 100644 --- a/src/bindings/js/node/src/infer_request.cpp +++ b/src/bindings/js/node/src/infer_request.cpp @@ -138,15 +138,33 @@ Napi::Value InferRequestWrap::get_output_tensor(const Napi::CallbackInfo& info) return TensorWrap::wrap(info.Env(), tensor); } +std::map get_js_infer_result(ov::InferRequest* infer_request) { + auto model_outputs = infer_request->get_compiled_model().outputs(); + std::map outputs; + for (auto& output : model_outputs) { + const auto& tensor = infer_request->get_tensor(output); + auto new_tensor = ov::Tensor(tensor.get_element_type(), tensor.get_shape()); + tensor.copy_to(new_tensor); + const auto name = output.get_names().empty() ? output.get_node()->get_name() : output.get_any_name(); + + auto key = name; + int counter = 1; + while (outputs.find(key) != outputs.end()) { + key = name + std::to_string(counter); + ++counter; + } + + outputs.insert({key, new_tensor}); + } + return outputs; +} + Napi::Value InferRequestWrap::get_output_tensors(const Napi::CallbackInfo& info) { - auto compiled_model = _infer_request.get_compiled_model().outputs(); + auto output_map = get_js_infer_result(&_infer_request); auto outputs_obj = Napi::Object::New(info.Env()); - for (auto& node : compiled_model) { - auto tensor = _infer_request.get_tensor(node); - auto new_tensor = ov::Tensor(tensor.get_element_type(), tensor.get_shape()); - tensor.copy_to(new_tensor); - outputs_obj.Set(node.get_any_name(), TensorWrap::wrap(info.Env(), new_tensor)); + for (const auto& [key, tensor] : output_map) { + outputs_obj.Set(key, TensorWrap::wrap(info.Env(), tensor)); } return outputs_obj; } @@ -208,17 +226,8 @@ void performInferenceThread(TsfnContext* context) { } context->_ir->infer(); - auto compiled_model = context->_ir->get_compiled_model().outputs(); - std::map outputs; - - for (auto& node : compiled_model) { - const auto& tensor = context->_ir->get_tensor(node); - auto new_tensor = ov::Tensor(tensor.get_element_type(), tensor.get_shape()); - tensor.copy_to(new_tensor); - outputs.insert({node.get_any_name(), new_tensor}); - } - - context->result = outputs; + auto model_outputs = context->_ir->get_compiled_model().outputs(); + context->result = get_js_infer_result(context->_ir); } auto callback = [](Napi::Env env, Napi::Function, TsfnContext* context) { diff --git a/src/bindings/js/node/tests/setup.js b/src/bindings/js/node/tests/setup.js index b4885d24157abe..34c110c04e9833 100644 --- a/src/bindings/js/node/tests/setup.js +++ b/src/bindings/js/node/tests/setup.js @@ -6,4 +6,5 @@ if (require.main === module) { async function main() { await downloadTestModel(testModels.testModelFP32); + await downloadTestModel(testModels.modelV3Small); } diff --git a/src/bindings/js/node/tests/unit/infer_request.test.js b/src/bindings/js/node/tests/unit/infer_request.test.js index b324630d08d6d2..b0593dba81e31d 100644 --- a/src/bindings/js/node/tests/unit/infer_request.test.js +++ b/src/bindings/js/node/tests/unit/infer_request.test.js @@ -2,6 +2,7 @@ // Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 +const fs = require('node:fs/promises'); const { addon: ov } = require('../..'); const assert = require('assert'); const { describe, it, before, beforeEach } = require('node:test'); @@ -324,3 +325,45 @@ describe('ov.InferRequest tests', () => { }); }); }); + +describe('ov.InferRequest tests with missing outputs names', () => { + const { modelV3Small } = testModels; + let compiledModel = null; + let tensorData = null; + let tensor = null; + let inferRequest = null; + + before(async () => { + await isModelAvailable(modelV3Small); + + const core = new ov.Core(); + + let modelData = await fs.readFile(modelV3Small.xml, 'utf8'); + const weights = await fs.readFile(modelV3Small.bin); + modelData = modelData.replace( + 'names="MobilenetV3/Predictions/Softmax:0"', + '' + ); + const model = await core.readModel(Buffer.from(modelData, 'utf8'), weights); + + compiledModel = await core.compileModel(model, 'CPU'); + inferRequest = compiledModel.createInferRequest(); + + tensorData = Float32Array.from( + { length: lengthFromShape(modelV3Small.inputShape) }, + () => Math.random() + epsilon, + ); + tensor = new ov.Tensor(ov.element.f32, modelV3Small.inputShape, tensorData); + }); + + it('Test infer(inputData: Tensor[])', () => { + const result = inferRequest.infer([tensor]); + assert.deepStrictEqual(Object.keys(result).length, 1); + }); + + it('Test inferAsync(inputData: Tensor[])', () => { + inferRequest.inferAsync([tensor]).then((result) => { + assert.deepStrictEqual(Object.keys(result).length, 1); + }); + }); +}); diff --git a/src/bindings/js/node/tests/utils.js b/src/bindings/js/node/tests/utils.js index 88f48066d1abf2..243a39cef74264 100644 --- a/src/bindings/js/node/tests/utils.js +++ b/src/bindings/js/node/tests/utils.js @@ -26,6 +26,16 @@ const testModels = { binURL: 'https://media.githubusercontent.com/media/openvinotoolkit/testdata/master/models/test_model/test_model_fp32.bin', }, + modelV3Small: { + xml: getModelPath('v3-small_224_1.0_float.xml'), + bin: getModelPath('v3-small_224_1.0_float.bin'), + inputShape: [1, 224, 224, 3], + outputShape: [1, 1001], + xmlURL: + 'https://storage.openvinotoolkit.org/repositories/openvino_notebooks/models/mobelinet-v3-tf/FP32/v3-small_224_1.0_float.xml', + binURL: + 'https://storage.openvinotoolkit.org/repositories/openvino_notebooks/models/mobelinet-v3-tf/FP32/v3-small_224_1.0_float.bin', + }, }; module.exports = {