Skip to content

Commit

Permalink
format code
Browse files Browse the repository at this point in the history
  • Loading branch information
fs-eire committed Apr 19, 2023
1 parent eaa818d commit ae5e444
Show file tree
Hide file tree
Showing 20 changed files with 229 additions and 239 deletions.
4 changes: 2 additions & 2 deletions onnxruntime/core/providers/js/allocator.cc
Original file line number Diff line number Diff line change
Expand Up @@ -10,14 +10,14 @@ namespace onnxruntime {
namespace js {

void* JsCustomAllocator::Alloc(size_t size) {
void* p = EM_ASM_PTR({return Module.jsepAlloc($0);}, size);
void* p = EM_ASM_PTR({ return Module.jsepAlloc($0); }, size);
stats_.num_allocs++;
stats_.bytes_in_use += size;
return p;
}

void JsCustomAllocator::Free(void* p) {
size_t size = (size_t)(void*)EM_ASM_PTR({return Module.jsepFree($0);}, p);
size_t size = (size_t)(void*)EM_ASM_PTR({ return Module.jsepFree($0); }, p);
stats_.bytes_in_use -= size;
}

Expand Down
2 changes: 1 addition & 1 deletion onnxruntime/core/providers/js/data_transfer.cc
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@

#include "core/providers/js/data_transfer.h"

EM_ASYNC_JS(void, jsepDownload, (const void *src_data, void *dst_data, size_t bytes), {
EM_ASYNC_JS(void, jsepDownload, (const void* src_data, void* dst_data, size_t bytes), {
await Module.jsepCopyAsync(src_data, dst_data, bytes);
});

Expand Down
4 changes: 2 additions & 2 deletions onnxruntime/core/providers/js/data_transfer.h
Original file line number Diff line number Diff line change
Expand Up @@ -11,8 +11,8 @@ namespace js {

class DataTransfer : public IDataTransfer {
public:
DataTransfer() {};
~DataTransfer() {};
DataTransfer(){};
~DataTransfer(){};

bool CanCopy(const OrtDevice& src_device, const OrtDevice& dst_device) const override;

Expand Down
4 changes: 2 additions & 2 deletions onnxruntime/core/providers/js/js_execution_provider.cc
Original file line number Diff line number Diff line change
Expand Up @@ -164,7 +164,6 @@ class ONNX_OPERATOR_VERSIONED_TYPED_KERNEL_CLASS_NAME(kJsExecutionProvider, kOnn
class ONNX_OPERATOR_TYPED_KERNEL_CLASS_NAME(kJsExecutionProvider, kOnnxDomain, 12, float, MaxPool);
class ONNX_OPERATOR_TYPED_KERNEL_CLASS_NAME(kJsExecutionProvider, kOnnxDomain, 1, float, GlobalMaxPool);


std::unique_ptr<KernelRegistry> RegisterKernels() {
auto kernel_registry = std::make_unique<onnxruntime::KernelRegistry>();

Expand Down Expand Up @@ -310,7 +309,8 @@ void JsExecutionProvider::RegisterAllocator(AllocatorManager& allocator_manager)
if (!custom_alloc) {
AllocatorCreationInfo customAllocatorCreationInfo([&](int) {
return std::make_unique<js::JsCustomAllocator>();
}, 0, false);
},
0, false);
custom_alloc = CreateAllocator(customAllocatorCreationInfo);
allocator_manager.InsertAllocator(custom_alloc);
}
Expand Down
2 changes: 1 addition & 1 deletion onnxruntime/core/providers/js/js_execution_provider.h
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@ namespace js {
template <typename T>
KernelCreateInfo BuildKernelCreateInfo();

}
} // namespace js

// placeholder for future use. no options currently
struct JsExecutionProviderInfo {
Expand Down
30 changes: 15 additions & 15 deletions onnxruntime/core/providers/js/js_export.cc
Original file line number Diff line number Diff line change
Expand Up @@ -5,22 +5,22 @@

#include "core/framework/op_kernel.h"

const void * JsepOutput(void * context, int index, void * data) {
uint32_t * data_offset = reinterpret_cast<uint32_t *>(data);
uint32_t dim = *data_offset++;
size_t dim_size = static_cast<size_t>(dim);
std::vector<int64_t> dims;
dims.reserve(dim_size);
dims.resize(dim_size);
for (size_t i = 0; i < dim_size; i++) {
dims[i] = static_cast<int64_t>(*data_offset++);
}
const void* JsepOutput(void* context, int index, void* data) {
uint32_t* data_offset = reinterpret_cast<uint32_t*>(data);
uint32_t dim = *data_offset++;
size_t dim_size = static_cast<size_t>(dim);
std::vector<int64_t> dims;
dims.reserve(dim_size);
dims.resize(dim_size);
for (size_t i = 0; i < dim_size; i++) {
dims[i] = static_cast<int64_t>(*data_offset++);
}

LOGF_DEFAULT(VERBOSE, "JsepOutput(%d, %s)", index, onnxruntime::TensorShape(dims).ToString().c_str());
LOGF_DEFAULT(VERBOSE, "JsepOutput(%d, %s)", index, onnxruntime::TensorShape(dims).ToString().c_str());

auto output = reinterpret_cast<onnxruntime::OpKernelContext*>(context)->Output(index, onnxruntime::TensorShape(dims));
auto r = output->DataRaw();
auto output = reinterpret_cast<onnxruntime::OpKernelContext*>(context)->Output(index, onnxruntime::TensorShape(dims));
auto r = output->DataRaw();

LOGF_DEFAULT(VERBOSE, "JsepOutput -- data=%zu", (size_t)(r));
return r;
LOGF_DEFAULT(VERBOSE, "JsepOutput -- data=%zu", (size_t)(r));
return r;
}
2 changes: 1 addition & 1 deletion onnxruntime/core/providers/js/js_export.h
Original file line number Diff line number Diff line change
Expand Up @@ -10,5 +10,5 @@
// TODO: Move to api.h

extern "C" {
const void * EMSCRIPTEN_KEEPALIVE JsepOutput(void * context, int index, void * data);
const void* EMSCRIPTEN_KEEPALIVE JsepOutput(void* context, int index, void* data);
};
175 changes: 90 additions & 85 deletions onnxruntime/core/providers/js/js_kernel.h
Original file line number Diff line number Diff line change
Expand Up @@ -17,50 +17,55 @@ struct pthreadpool;
namespace onnxruntime {
namespace js {

#define JSEP_INIT_KERNEL(optype) EM_ASM({ Module.jsepCreateKernel(#optype, $0, undefined); }, this)
#define JSEP_INIT_KERNEL_ATTRIBUTE(optype, attr, ...) EM_ASM({ Module.jsepCreateKernel(#optype, $0, attr); }, this, __VA_ARGS__)
// This macro is defined to bypass the code format from clang-format, which will overwrite "=>" into "= >"
// We can use it to write JS inline code with arrow functions.

#define JSEP_KERNEL_IMPL(classname, optype) \
class classname : public JsKernel { \
public: \
classname(const OpKernelInfo& info) : JsKernel(info) { \
JSEP_INIT_KERNEL(optype); \
} \
};
// clang-format off
#define JS_ARROW =>
// clang-format on

#define JSEP_KERNEL_TYPED_IMPL(classname, optype) \
template<typename T> \
class classname : public JsKernel { \
public: \
classname(const OpKernelInfo& info) : JsKernel(info) { \
JSEP_INIT_KERNEL(optype); \
} \
};
#define JSEP_INIT_KERNEL(optype) EM_ASM({ Module.jsepCreateKernel(#optype, $0, undefined); }, this)
#define JSEP_INIT_KERNEL_ATTRIBUTE(optype, attr, ...) EM_ASM({ Module.jsepCreateKernel(#optype, $0, attr); }, this, __VA_ARGS__)

#define JSEP_CLASS_IMPL_ATTRIBUTE(classname, optype, attr_pre, attr, ...) \
class classname : public JsKernel { \
public: \
classname(const OpKernelInfo& info) : JsKernel(info) { \
attr_pre \
JSEP_INIT_KERNEL_ATTRIBUTE(optype, attr, __VA_ARGS__); \
} \
};
#define JSEP_KERNEL_IMPL(classname, optype) \
class classname : public JsKernel { \
public: \
classname(const OpKernelInfo& info) : JsKernel(info) { \
JSEP_INIT_KERNEL(optype); \
} \
};

#define JSEP_KERNEL_TYPED_IMPL(classname, optype) \
template <typename T> \
class classname : public JsKernel { \
public: \
classname(const OpKernelInfo& info) : JsKernel(info) { \
JSEP_INIT_KERNEL(optype); \
} \
};

#define JSEP_CLASS_IMPL_ATTRIBUTE(classname, optype, attr_pre, attr, ...) \
class classname : public JsKernel { \
public: \
classname(const OpKernelInfo& info) : JsKernel(info) { \
attr_pre \
JSEP_INIT_KERNEL_ATTRIBUTE(optype, attr, __VA_ARGS__); \
} \
};

#define JSEP_CLASS_IMPL_ATTRIBUTE_FLOAT_DEFAULT(classname, optype, attr_name, default_value, ...) \
JSEP_CLASS_IMPL_ATTRIBUTE(classname, optype, , ({#attr_name:$1}), static_cast<double>(info.GetAttrOrDefault<float>(#attr_name, default_value)))
JSEP_CLASS_IMPL_ATTRIBUTE(classname, optype, , ({#attr_name : $1}), static_cast<double>(info.GetAttrOrDefault<float>(#attr_name, default_value)))

#define JSEP_CLASS_IMPL_ATTRIBUTE_FLOAT_2_DEFAULT(classname, optype, attr_name_1, default_value_1, attr_name_2, default_value_2, ...) \
JSEP_CLASS_IMPL_ATTRIBUTE(classname, optype, , ({#attr_name_1:$1, #attr_name_2:$2}), \
static_cast<double>(info.GetAttrOrDefault<float>(#attr_name_1, default_value_1)), \
static_cast<double>(info.GetAttrOrDefault<float>(#attr_name_2, default_value_2)))


#define JSEP_CLASS_IMPL_ATTRIBUTE_FLOAT(classname, optype, attr_name, ...) \
JSEP_CLASS_IMPL_ATTRIBUTE(classname, optype, \
float value; \
ORT_ENFORCE(info.GetAttr<float>(#attr_name, &value)); , \
, ({#attr_name:$1}), static_cast<double>(value))
JSEP_CLASS_IMPL_ATTRIBUTE(classname, optype, , ({#attr_name_1 : $1, #attr_name_2 : $2}), \
static_cast<double>(info.GetAttrOrDefault<float>(#attr_name_1, default_value_1)), \
static_cast<double>(info.GetAttrOrDefault<float>(#attr_name_2, default_value_2)))

#define JSEP_CLASS_IMPL_ATTRIBUTE_FLOAT(classname, optype, attr_name, ...) \
JSEP_CLASS_IMPL_ATTRIBUTE(classname, optype, \
float value; \
ORT_ENFORCE(info.GetAttr<float>(#attr_name, &value));, \
, ({#attr_name : $1}), static_cast<double>(value))

// TODO:
// class JsMultiProgramKernel : public OpKernel { /* TBD */ };
Expand All @@ -70,73 +75,73 @@ class JsKernel : public OpKernel {
explicit JsKernel(const OpKernelInfo& info)
: OpKernel(info) {}
~JsKernel() override {
EM_ASM({ Module.jsepReleaseKernel($0); }, this);
EM_ASM({ Module.jsepReleaseKernel($0); }, this);
}

void * SerializeKernelContext(OpKernelContext* context, AllocatorPtr alloc) const {
//
// temp_data_format (every item is (u)int32_t):
// context_prt | input_count | [input_data_0] ... [input_data_N-1]
//
// input_data_format:
// type | data_ptr | dim_size | dim[0] ... dim[N-1]
//
size_t temp_data_size = sizeof(size_t) * 2;
for (int i = 0; i < context->InputCount(); i++) {
temp_data_size += sizeof(size_t) * (3 + context->Input<Tensor>(i)->Shape().NumDimensions());
}
uint32_t *p_serialized_kernel_context = reinterpret_cast<uint32_t*>(alloc->Alloc(temp_data_size));
if (p_serialized_kernel_context == nullptr) {
return nullptr;
}

p_serialized_kernel_context[0] = reinterpret_cast<uint32_t>(context);
p_serialized_kernel_context[1] = static_cast<uint32_t>(context->InputCount());
size_t index = 2;
for (int i = 0; i < context->InputCount(); i++) {
p_serialized_kernel_context[index++] = static_cast<uint32_t>(context->Input<Tensor>(i)->GetElementType());
p_serialized_kernel_context[index++] = reinterpret_cast<uint32_t>(context->Input<Tensor>(i)->DataRaw());
p_serialized_kernel_context[index++] = static_cast<uint32_t>(context->Input<Tensor>(i)->Shape().NumDimensions());
for (size_t d = 0; d < context->Input<Tensor>(i)->Shape().NumDimensions(); d++) {
p_serialized_kernel_context[index++] = static_cast<uint32_t>(context->Input<Tensor>(i)->Shape()[d]);
}
void* SerializeKernelContext(OpKernelContext* context, AllocatorPtr alloc) const {
//
// temp_data_format (every item is (u)int32_t):
// context_prt | input_count | [input_data_0] ... [input_data_N-1]
//
// input_data_format:
// type | data_ptr | dim_size | dim[0] ... dim[N-1]
//
size_t temp_data_size = sizeof(size_t) * 2;
for (int i = 0; i < context->InputCount(); i++) {
temp_data_size += sizeof(size_t) * (3 + context->Input<Tensor>(i)->Shape().NumDimensions());
}
uint32_t* p_serialized_kernel_context = reinterpret_cast<uint32_t*>(alloc->Alloc(temp_data_size));
if (p_serialized_kernel_context == nullptr) {
return nullptr;
}

p_serialized_kernel_context[0] = reinterpret_cast<uint32_t>(context);
p_serialized_kernel_context[1] = static_cast<uint32_t>(context->InputCount());
size_t index = 2;
for (int i = 0; i < context->InputCount(); i++) {
p_serialized_kernel_context[index++] = static_cast<uint32_t>(context->Input<Tensor>(i)->GetElementType());
p_serialized_kernel_context[index++] = reinterpret_cast<uint32_t>(context->Input<Tensor>(i)->DataRaw());
p_serialized_kernel_context[index++] = static_cast<uint32_t>(context->Input<Tensor>(i)->Shape().NumDimensions());
for (size_t d = 0; d < context->Input<Tensor>(i)->Shape().NumDimensions(); d++) {
p_serialized_kernel_context[index++] = static_cast<uint32_t>(context->Input<Tensor>(i)->Shape()[d]);
}
}

#ifndef NDEBUG
std::ostringstream os;
os << "temp data size: " << temp_data_size << ". Data:";
size_t temp_data_count = temp_data_size >> 2;
for (size_t i = 0; i < temp_data_count; i++) {
os << " " << p_serialized_kernel_context[i];
}
LOGS_DEFAULT(VERBOSE) << os.str();
std::ostringstream os;
os << "temp data size: " << temp_data_size << ". Data:";
size_t temp_data_count = temp_data_size >> 2;
for (size_t i = 0; i < temp_data_count; i++) {
os << " " << p_serialized_kernel_context[i];
}
LOGS_DEFAULT(VERBOSE) << os.str();
#endif

return p_serialized_kernel_context;
return p_serialized_kernel_context;
}

virtual Status ComputeInternal(OpKernelContext* context) const {
AllocatorPtr alloc;
ORT_RETURN_IF_ERROR(context->GetTempSpaceCPUAllocator(&alloc));
AllocatorPtr alloc;
ORT_RETURN_IF_ERROR(context->GetTempSpaceCPUAllocator(&alloc));

auto p_serialized_kernel_context = SerializeKernelContext(context, alloc);
auto p_serialized_kernel_context = SerializeKernelContext(context, alloc);

int status = EM_ASM_INT({ return Module.jsepRun($0, $1); }, this, p_serialized_kernel_context);
int status = EM_ASM_INT({ return Module.jsepRun($0, $1); }, this, p_serialized_kernel_context);

LOGS_DEFAULT(VERBOSE) << "outputs = " << context->OutputCount() << ". Y.data="
<< (size_t)(context->Output<Tensor>(0)->DataRaw()) << ".";
LOGS_DEFAULT(VERBOSE) << "outputs = " << context->OutputCount() << ". Y.data="
<< (size_t)(context->Output<Tensor>(0)->DataRaw()) << ".";

alloc->Free(p_serialized_kernel_context);
alloc->Free(p_serialized_kernel_context);

if (status == 0) {
return Status::OK();
} else {
return ORT_MAKE_STATUS(ONNXRUNTIME, FAIL, "Failed to run JSEP kernel");
}
if (status == 0) {
return Status::OK();
} else {
return ORT_MAKE_STATUS(ONNXRUNTIME, FAIL, "Failed to run JSEP kernel");
}
}

Status Compute(OpKernelContext* context) const override {
return ComputeInternal(context);
return ComputeInternal(context);
}
};
} // namespace js
Expand Down
1 change: 0 additions & 1 deletion onnxruntime/core/providers/js/operators/binary.cc
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,6 @@ namespace js {
KernelDefBuilder().TypeConstraint("T", DataTypeImpl::GetTensorType<TYPE>()), \
KERNEL_CLASS);


JSEP_KERNEL_IMPL(Add, Add)
REG_ELEMENTWISE_VERSIONED_KERNEL(Add, 7, 12, float, Add);
REG_ELEMENTWISE_VERSIONED_KERNEL(Add, 13, 13, float, Add);
Expand Down
2 changes: 0 additions & 2 deletions onnxruntime/core/providers/js/operators/conv.cc
Original file line number Diff line number Diff line change
Expand Up @@ -35,8 +35,6 @@ namespace js {
(*KernelDefBuilder::Create()).TypeConstraint("T", DataTypeImpl::GetTensorType<T>()), \
Conv<T, false>);



REGISTER_KERNEL_TYPED(float)

} // namespace js
Expand Down
Loading

0 comments on commit ae5e444

Please sign in to comment.