Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Exposing trace context to python backend #346

Merged
merged 9 commits into from
Mar 15, 2024
Merged
Show file tree
Hide file tree
Changes from 8 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -155,6 +155,8 @@ set(
src/infer_response.h
src/infer_request.cc
src/infer_request.h
src/infer_trace.cc
src/infer_trace.h
src/message_queue.h
src/ipc_message.cc
src/ipc_message.h
Expand Down
19 changes: 13 additions & 6 deletions src/infer_request.cc
Original file line number Diff line number Diff line change
Expand Up @@ -170,7 +170,7 @@ InferRequest::GetPreferredMemory()
}

InferenceTrace&
InferRequest::Trace()
InferRequest::GetTrace()
{
return trace_;
}
Expand Down Expand Up @@ -210,7 +210,6 @@ InferRequest::SaveToSharedMemory(std::unique_ptr<SharedMemoryManager>& shm_pool)
infer_request_shm_ptr_->is_decoupled = is_decoupled_;
infer_request_shm_ptr_->timeout = timeout_;
infer_request_shm_ptr_->preferred_memory = preferred_memory_;
infer_request_shm_ptr_->trace = trace_;
infer_request_shm_ptr_->request_release_flags = request_release_flags_;

output_names_handle_shm_ptr_ =
Expand Down Expand Up @@ -258,6 +257,9 @@ InferRequest::SaveToSharedMemory(std::unique_ptr<SharedMemoryManager>& shm_pool)
PbString::Create(shm_pool, Parameters());
infer_request_shm_ptr_->parameters_shm_handle = parameters_shm->ShmHandle();

trace_.SaveToSharedMemory(shm_pool);
infer_request_shm_ptr_->trace_shm_handle = trace_.ShmHandle();

// Save the references to shared memory.
infer_request_shm_ = std::move(infer_request_shm);
request_id_shm_ = std::move(request_id_shm);
Expand Down Expand Up @@ -312,6 +314,10 @@ InferRequest::LoadFromSharedMemory(
CorrelationId::LoadFromSharedMemory(
shm_pool, infer_request_shm_ptr->correlation_id_shm_handle);

std::unique_ptr<InferenceTrace> infer_trace_shm =
InferenceTrace::LoadFromSharedMemory(
shm_pool, infer_request_shm_ptr->trace_shm_handle);

std::unique_ptr<PbString> model_name_shm = PbString::LoadFromSharedMemory(
shm_pool, infer_request_shm_ptr->model_name_shm_handle);
std::unique_ptr<PbString> request_id_shm = PbString::LoadFromSharedMemory(
Expand All @@ -321,8 +327,8 @@ InferRequest::LoadFromSharedMemory(

return std::unique_ptr<InferRequest>(new InferRequest(
infer_request_shm, request_id_shm, correlation_id_shm,
requested_output_names_shm, model_name_shm, input_tensors,
parameters_shm));
requested_output_names_shm, model_name_shm, input_tensors, parameters_shm,
infer_trace_shm));
}

InferRequest::InferRequest(
Expand All @@ -332,7 +338,8 @@ InferRequest::InferRequest(
std::vector<std::unique_ptr<PbString>>& requested_output_names_shm,
std::unique_ptr<PbString>& model_name_shm,
std::vector<std::shared_ptr<PbTensor>>& input_tensors,
std::unique_ptr<PbString>& parameters_shm)
std::unique_ptr<PbString>& parameters_shm,
std::unique_ptr<InferenceTrace>& infer_trace_shm)
: infer_request_shm_(std::move(infer_request_shm)),
request_id_shm_(std::move(request_id_shm)),
requested_output_names_shm_(std::move(requested_output_names_shm)),
Expand Down Expand Up @@ -373,7 +380,7 @@ InferRequest::InferRequest(
is_decoupled_ = infer_request_shm_ptr_->is_decoupled;
timeout_ = infer_request_shm_ptr_->timeout;
preferred_memory_ = infer_request_shm_ptr_->preferred_memory;
trace_ = infer_request_shm_ptr_->trace;
trace_ = InferenceTrace(infer_trace_shm);
request_release_flags_ = infer_request_shm_ptr_->request_release_flags;

#ifdef TRITON_PB_STUB
Expand Down
24 changes: 5 additions & 19 deletions src/infer_request.h
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,7 @@

#include "correlation_id.h"
#include "infer_response.h"
#include "infer_trace.h"
#include "pb_preferred_memory.h"
#include "pb_tensor.h"

Expand All @@ -43,22 +44,6 @@ namespace triton { namespace backend { namespace python {

class Stub;

//
// Inference Trace
//
struct InferenceTrace {
#ifndef TRITON_PB_STUB
TRITONSERVER_InferenceTrace* triton_trace_;
InferenceTrace(TRITONSERVER_InferenceTrace* triton_trace)
: triton_trace_(triton_trace)
{
}
#else
void* triton_trace_;
#endif
InferenceTrace() : triton_trace_(nullptr) {}
};

//
// Inference Request
//
Expand All @@ -72,7 +57,7 @@ struct InferRequestShm {
bool is_decoupled;
uint64_t timeout;
PreferredMemory preferred_memory;
InferenceTrace trace;
bi::managed_external_buffer::handle_t trace_shm_handle;
uint32_t request_release_flags;
bi::managed_external_buffer::handle_t correlation_id_shm_handle;
bi::managed_external_buffer::handle_t model_name_shm_handle;
Expand Down Expand Up @@ -108,7 +93,7 @@ class InferRequest {
bool IsDecoupled();
void SetIsDecoupled(const bool is_decoupled);
PreferredMemory& GetPreferredMemory();
InferenceTrace& Trace();
InferenceTrace& GetTrace();
uint32_t ReleaseFlags();
void SetReleaseFlags(const uint32_t& flags);

Expand Down Expand Up @@ -149,7 +134,8 @@ class InferRequest {
std::vector<std::unique_ptr<PbString>>& requested_output_names_shm,
std::unique_ptr<PbString>& model_name_shm,
std::vector<std::shared_ptr<PbTensor>>& input_tensors,
std::unique_ptr<PbString>& parameters_shm);
std::unique_ptr<PbString>& parameters_shm,
std::unique_ptr<InferenceTrace>& infer_trace_shm);

std::string request_id_;
CorrelationId correlation_id_;
Expand Down
101 changes: 101 additions & 0 deletions src/infer_trace.cc
Original file line number Diff line number Diff line change
@@ -0,0 +1,101 @@
// Copyright 2024, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

#include "infer_trace.h"

namespace triton { namespace backend { namespace python {

InferenceTrace::InferenceTrace(const InferenceTrace& rhs)
{
triton_trace_ = rhs.triton_trace_;
trace_context_ = rhs.trace_context_;
}

InferenceTrace&
InferenceTrace::operator=(const InferenceTrace& rhs)
{
triton_trace_ = rhs.triton_trace_;
trace_context_ = rhs.trace_context_;
return *this;
}

InferenceTrace::InferenceTrace(std::unique_ptr<InferenceTrace>& trace_shm)
{
triton_trace_ = trace_shm->triton_trace_;
trace_context_ = trace_shm->trace_context_;
}

void
InferenceTrace::SaveToSharedMemory(
std::unique_ptr<SharedMemoryManager>& shm_pool)
{
AllocatedSharedMemory<InferenceTraceShm> infer_trace_shm =
shm_pool->Construct<InferenceTraceShm>();
infer_trace_shm_ptr_ = infer_trace_shm.data_.get();

infer_trace_shm_ptr_->triton_trace = triton_trace_;

std::unique_ptr<PbString> trace_context_shm =
PbString::Create(shm_pool, trace_context_);

infer_trace_shm_ptr_->trace_context_shm_handle =
trace_context_shm->ShmHandle();

// Save the references to shared memory.
trace_context_shm_ = std::move(trace_context_shm);
infer_trace_shm_ = std::move(infer_trace_shm);
shm_handle_ = infer_trace_shm_.handle_;
}

std::unique_ptr<InferenceTrace>
InferenceTrace::LoadFromSharedMemory(
std::unique_ptr<SharedMemoryManager>& shm_pool,
bi::managed_external_buffer::handle_t handle)
{
AllocatedSharedMemory<InferenceTraceShm> infer_trace_shm =
shm_pool->Load<InferenceTraceShm>(handle);
InferenceTraceShm* infer_trace_shm_ptr = infer_trace_shm.data_.get();

std::unique_ptr<PbString> trace_context_shm = PbString::LoadFromSharedMemory(
shm_pool, infer_trace_shm_ptr->trace_context_shm_handle);

return std::unique_ptr<InferenceTrace>(
new InferenceTrace(infer_trace_shm, trace_context_shm));
}

InferenceTrace::InferenceTrace(
AllocatedSharedMemory<InferenceTraceShm>& infer_trace_shm,
std::unique_ptr<PbString>& trace_context_shm)
: infer_trace_shm_(std::move(infer_trace_shm)),
trace_context_shm_(std::move(trace_context_shm))
{
infer_trace_shm_ptr_ = infer_trace_shm_.data_.get();
shm_handle_ = infer_trace_shm_.handle_;
triton_trace_ = infer_trace_shm_ptr_->triton_trace;
trace_context_ = trace_context_shm_->String();
}

}}}; // namespace triton::backend::python
90 changes: 90 additions & 0 deletions src/infer_trace.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,90 @@
// Copyright 2024, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

#pragma once

#include <string>

#include "pb_string.h"
#include "pb_utils.h"

namespace triton { namespace backend { namespace python {

struct InferenceTraceShm {
bi::managed_external_buffer::handle_t trace_context_shm_handle;
// The address of the 'TRITONSERVER_InferTrace' object.
void* triton_trace;
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Can you remind me why this is void* ? To avoid C API references out of process in stub?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

yes, stub does not have an access to TRITONSERVER_InferTrace

};

//
// Inference Trace
//
class InferenceTrace {
public:
InferenceTrace(void* triton_trace, const std::string& ctxt)
: triton_trace_(triton_trace), trace_context_(ctxt)
{
}
InferenceTrace() : triton_trace_(nullptr), trace_context_("") {}
InferenceTrace(const InferenceTrace& rhs);
InferenceTrace(std::unique_ptr<InferenceTrace>& trace_shm);
InferenceTrace& operator=(const InferenceTrace& rhs);
/// Save InferenceTrace object to shared memory.
/// \param shm_pool Shared memory pool to save the InferenceTrace object.
void SaveToSharedMemory(std::unique_ptr<SharedMemoryManager>& shm_pool);

/// Create a InferenceTrace object from shared memory.
/// \param shm_pool Shared memory pool
/// \param handle Shared memory handle of the InferenceTrace.
/// \return Returns the InferenceTrace in the specified handle
/// location.
static std::unique_ptr<InferenceTrace> LoadFromSharedMemory(
std::unique_ptr<SharedMemoryManager>& shm_pool,
bi::managed_external_buffer::handle_t handle);

void* TritonTrace() { return triton_trace_; }
const std::string& Context() const { return trace_context_; }

bi::managed_external_buffer::handle_t ShmHandle() { return shm_handle_; }

private:
// The private constructor for creating a InferenceTrace object from shared
// memory.
InferenceTrace(
AllocatedSharedMemory<InferenceTraceShm>& infer_trace_shm,
std::unique_ptr<PbString>& trace_context_shm);

void* triton_trace_;
std::string trace_context_;

// Shared Memory Data Structures
AllocatedSharedMemory<InferenceTraceShm> infer_trace_shm_;
InferenceTraceShm* infer_trace_shm_ptr_;
bi::managed_external_buffer::handle_t shm_handle_;
std::unique_ptr<PbString> trace_context_shm_;
};

}}}; // namespace triton::backend::python
14 changes: 12 additions & 2 deletions src/pb_stub.cc
Original file line number Diff line number Diff line change
Expand Up @@ -1611,7 +1611,17 @@ PYBIND11_EMBEDDED_MODULE(c_python_backend_utils, module)
.export_values();

py::class_<InferenceTrace, std::shared_ptr<InferenceTrace>>(
module, "InferenceTrace");
module, "InferenceTrace")
.def(
"get_context",
[](InferenceTrace& self, const std::string mode) -> py::object {
auto context = (mode == "opentelemetry") ? self.Context() : "";
if (context != "") {
return py::str(context);
}
return py::none();
},
py::arg("mode").none(false) = "triton");
oandreeva-nv marked this conversation as resolved.
Show resolved Hide resolved

py::class_<InferRequest, std::shared_ptr<InferRequest>>(
module, "InferenceRequest")
Expand Down Expand Up @@ -1698,7 +1708,7 @@ PYBIND11_EMBEDDED_MODULE(c_python_backend_utils, module)
.def("set_flags", &InferRequest::SetFlags)
.def("timeout", &InferRequest::Timeout)
.def("parameters", &InferRequest::Parameters)
.def("trace", &InferRequest::Trace)
.def("trace", &InferRequest::GetTrace)
.def(
"exec",
[](std::shared_ptr<InferRequest>& infer_request,
Expand Down
14 changes: 12 additions & 2 deletions src/python_be.cc
Original file line number Diff line number Diff line change
Expand Up @@ -387,10 +387,21 @@ ModelInstanceState::SaveRequestsToSharedMemory(
auto err = TRITONBACKEND_RequestTrace(request, &triton_trace);
if (err != nullptr) {
triton_trace = nullptr;
LOG_MESSAGE(TRITONSERVER_LOG_ERROR, TRITONSERVER_ErrorMessage(err));
oandreeva-nv marked this conversation as resolved.
Show resolved Hide resolved
TRITONSERVER_ErrorDelete(err);
}
const char* val = nullptr;
if (triton_trace != nullptr) {
err = TRITONSERVER_InferenceTraceContext(triton_trace, &val);
if (err != nullptr) {
LOG_MESSAGE(TRITONSERVER_LOG_ERROR, TRITONSERVER_ErrorMessage(err));
TRITONSERVER_ErrorDelete(err);
}
oandreeva-nv marked this conversation as resolved.
Show resolved Hide resolved
}
std::string context = (val != nullptr) ? std::string(val) : "";

InferenceTrace trace = InferenceTrace(triton_trace);
InferenceTrace trace =
InferenceTrace(reinterpret_cast<void*>(triton_trace), context);

uint64_t request_timeout;
RETURN_IF_ERROR(TRITONBACKEND_InferenceRequestTimeoutMicroseconds(
Expand All @@ -415,7 +426,6 @@ ModelInstanceState::SaveRequestsToSharedMemory(
reinterpret_cast<intptr_t>(request),
PreferredMemory(PreferredMemory::kDefault, 0), trace);
}

RETURN_IF_EXCEPTION(infer_request->SaveToSharedMemory(Stub()->ShmPool()));
requests_shm[r] = infer_request->ShmHandle();
pb_infer_requests.emplace_back(std::move(infer_request));
Expand Down
Loading
Loading