Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Multi output tensor #8

Merged
merged 4 commits into from
Apr 22, 2020
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion .gitignore
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
# Microsoft Visual Studio Code
.project
.vscode/
build/*
build*/*
# Auto-generated
shared/version.hpp
9 changes: 0 additions & 9 deletions host/core/nnet/nnet_packet.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -39,15 +39,6 @@ class NNetPacket
}
}

std::vector<std::shared_ptr<HostDataPacket>> getTensors()
{
return _tensors_raw_data;
}

unsigned getTensorsNumber() const
{
return _tensors_raw_data.size();
}

#ifdef HOST_PYTHON_MODULE
py::array* getTensor(unsigned index)
Expand Down
11 changes: 7 additions & 4 deletions host/core/nnet/tensor_entry_container.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,7 @@ class TensorEntryContainer
{
assert(nullptr != _tensors_info);
// assert(nullptr != _tensors_raw_data);
assert(_tensors_info->size() == _tensors_raw_data.size());
// assert(_tensors_info->size() == _tensors_raw_data.size());

std::vector<TensorEntry> entry;

Expand All @@ -52,11 +52,14 @@ class TensorEntryContainer
TensorEntry te;

const TensorInfo& ti = (*_tensors_info)[tensor_index];
const auto& trd = _tensors_raw_data[tensor_index];
const auto& trd = _tensors_raw_data[ti.offset == 0 ? tensor_index : 0];

auto entry_byte_size = ti.getEntryByteSize();

te.raw_data = trd->data.data() + entry_index * entry_byte_size; // TODO: check whether it works for all outputs
if(ti.offset == 0)
te.raw_data = trd->data.data() + entry_index * entry_byte_size; // TODO: check whether it works for all outputs
else
te.raw_data = trd->data.data() + ti.offset;

te.output_properties_type = ti.output_properties_type;
te.output_properties_type_size = size_of_type(te.output_properties_type);
te.properties_number = entry_byte_size / te.output_properties_type_size;
Expand Down
2 changes: 2 additions & 0 deletions host/core/nnet/tensor_info.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,8 @@ struct TensorInfo
int nnet_input_width = 0;
int nnet_input_height = 0;

uint32_t offset = 0;

std::vector<int> output_properties_dimensions;

std::vector<std::vector<std::string>> output_property_key_index_to_string;
Expand Down
19 changes: 11 additions & 8 deletions host/py_module/py_bindings.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -96,9 +96,11 @@ int wdog_start(void)
}
int wdog_stop(void)
{
wdog_thread_alive = 0;
wd_thread.join();

if(wdog_thread_alive)
{
wdog_thread_alive = 0;
wd_thread.join();
}
return 0;
}

Expand Down Expand Up @@ -479,10 +481,13 @@ std::shared_ptr<CNNHostPipeline> create_pipeline(
printf("CNN input num channels: %d\n", cnn_input_info.cnn_input_num_channels);

// update tensor infos
for (auto &ti : tensors_info)
assert(!(tensors_info.size() > (sizeof(cnn_input_info.offsets)/sizeof(cnn_input_info.offsets[0]))));

for (int i = 0; i < tensors_info.size(); i++)
{
ti.nnet_input_width = cnn_input_info.cnn_input_width;
ti.nnet_input_height = cnn_input_info.cnn_input_height;
tensors_info[i].nnet_input_width = cnn_input_info.cnn_input_width;
tensors_info[i].nnet_input_height = cnn_input_info.cnn_input_height;
tensors_info[i].offset = cnn_input_info.offsets[i];
}

c_streams_myriad_to_pc["previewout"].dimensions = {
Expand Down Expand Up @@ -752,8 +757,6 @@ PYBIND11_MODULE(depthai, m)
py::class_<NNetPacket, std::shared_ptr<NNetPacket>>(m, "NNetPacket")
.def("get_tensor", &NNetPacket::getTensor, py::return_value_policy::copy)
.def("get_tensor", &NNetPacket::getTensorByName, py::return_value_policy::copy)
.def("get_tensors_number", &NNetPacket::getTensorsNumber)
.def("tensors", &NNetPacket::getTensors, py::return_value_policy::copy)
.def("entries", &NNetPacket::getTensorEntryContainer, py::return_value_policy::copy)
;

Expand Down
1 change: 1 addition & 0 deletions shared/cnn_info.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -10,4 +10,5 @@ struct cnn_info
uint16_t cnn_input_num_channels;
uint16_t number_of_cmx_slices;
uint16_t number_of_shaves;
uint32_t offsets[7];
};
2 changes: 1 addition & 1 deletion shared/version.hpp
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
#pragma once

const char *c_depthai_dev_version = "unknown";
const char *c_depthai_dev_version = "c722ebde932d6627463321816a5654b5be6069e1";
const char *c_depthai_version = "0.0.10a";