Skip to content

Commit

Permalink
Initial commit for (de)serialization
Browse files Browse the repository at this point in the history
This commit intoduces (de)serialization of primitive types.
It also introduces an additional debug header - magic_enum - to
be used in debugging return codes and pretty printing.
I think, it should be deleted before this implementation goes to
main branch.
Signed-off-by: delphi <[email protected]>

Signed-off-by: delphi <[email protected]>
  • Loading branch information
asmfreak committed Mar 1, 2022
1 parent 38d0cb8 commit 2e95ec3
Show file tree
Hide file tree
Showing 11 changed files with 2,305 additions and 25 deletions.
14 changes: 14 additions & 0 deletions src/nunavut/lang/cpp/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@
"""

import functools
import fractions
import io
import re
import textwrap
Expand Down Expand Up @@ -163,6 +164,19 @@ def filter_constant_value(language: Language, constant: pydsdl.Constant) -> str:
return c_filter_literal(language, constant.value.native_value, constant.data_type, "static_cast<{type}>({value})")


@template_language_filter(__name__)
def filter_literal(
language: Language,
value: typing.Union[fractions.Fraction, bool, int],
ty: pydsdl.Any,
cast_format: str = "static_cast<{type}>({value})",
) -> str:
"""
Renders the specified value of the specified type as a literal.
"""
return c_filter_literal(language, value, ty, cast_format)


def filter_to_standard_bit_length(t: pydsdl.PrimitiveType) -> int:
"""
Returns the nearest standard bit length of a type as an int.
Expand Down
113 changes: 101 additions & 12 deletions src/nunavut/lang/cpp/support/serialization.j2
Original file line number Diff line number Diff line change
Expand Up @@ -140,6 +140,16 @@ public:
return derived_bitspan(self.data_, self.offset_bits_ + bits);
}

derived_bitspan subspan({{ typename_unsigned_bit_length }} bits_at=0) const noexcept {
auto& self = *static_cast<const derived_bitspan*>(this);
const {{ typename_unsigned_bit_length }} offset_bits = self.offset_bits_ + bits_at;
const {{ typename_unsigned_length }} offset_bytes = offset_bits / 8U;
{{ assert('offset_bytes * 8U <= offset_bits') }}
const {{ typename_unsigned_length }} new_offset_bits = offset_bits - offset_bytes * 8U;
{{ assert('offset_bytes <= self.data_.size()') }}
return derived_bitspan({ self.data_.data() + offset_bytes, self.data_.size() - offset_bytes}, new_offset_bits);
}

void add_offset({{ typename_unsigned_bit_length }} bits) noexcept{
auto& self = *static_cast<derived_bitspan*>(this);
self.offset_bits_ += bits;
Expand All @@ -150,6 +160,19 @@ public:
self.offset_bits_ = bits;
}

{{ typename_unsigned_bit_length }} offset_misalignment({{ typename_unsigned_bit_length }} alignment_bits) const noexcept{
auto& self = *static_cast<const derived_bitspan*>(this);
return self.offset_bits_ % alignment_bits;
}

bool offset_alings_to({{ typename_unsigned_bit_length }} alignment_bits) const noexcept{
return offset_misalignment(alignment_bits) == 0U;
}

bool offset_alings_to_byte() const noexcept{
return offset_alings_to(8U);
}

{{ typename_unsigned_bit_length }} size() const noexcept{
auto& self = *static_cast<const derived_bitspan*>(this);
{{ typename_unsigned_bit_length }} bit_size = {# -#}
Expand All @@ -160,27 +183,45 @@ public:
return bit_size - self.offset_bits_;
}

{{ typename_byte }}& aligned_ref({{ typename_unsigned_length }} plus_offset_bits=0U) noexcept {
{{ typename_unsigned_bit_length }} offset() const noexcept {
auto& self = *static_cast<const derived_bitspan*>(this);
return self.offset_bits_;
}

{{ typename_unsigned_bit_length }} offset_bytes() const noexcept {
auto& self = *static_cast<const derived_bitspan*>(this);
const {{ typename_unsigned_length }} offset_bytes = (self.offset_bits_) / 8U;
return offset_bytes ;
}

{{ typename_unsigned_bit_length }} offset_bytes_ceil() const noexcept {
auto& self = *static_cast<const derived_bitspan*>(this);
const {{ typename_unsigned_length }} offset_bytes = ((self.offset_bits_ + 7U) / 8U);
return offset_bytes ;
}

{{ typename_byte }}& aligned_ref({{ typename_unsigned_bit_length }} plus_offset_bits=0U) noexcept {
auto& self = *static_cast<derived_bitspan*>(this);
const {{ typename_unsigned_length }} offset_bytes = ((self.offset_bits_ + plus_offset_bits) / 8U);
{{ assert('offset_bytes <= self.data_.size()') }}
return self.data_[offset_bytes];
}

const {{ typename_byte }}& aligned_ref({{ typename_unsigned_length }} plus_offset_bits=0U) const noexcept {
const {{ typename_byte }}& aligned_ref({{ typename_unsigned_bit_length }} plus_offset_bits=0U) const noexcept {
auto& self = *static_cast<const derived_bitspan*>(this);
const {{ typename_unsigned_length }} offset_bytes = ((self.offset_bits_ + plus_offset_bits) / 8U);
{{ assert('offset_bytes <= self.data_.size()') }}
return self.data_[offset_bytes];
}

{{ typename_byte }}* aligned_ptr({{ typename_unsigned_length }} plus_offset_bits=0U) noexcept {
{{ typename_byte }}* aligned_ptr({{ typename_unsigned_bit_length }} plus_offset_bits=0U) noexcept {
return &aligned_ref(plus_offset_bits);
}

const {{ typename_byte }}* aligned_ptr({{ typename_unsigned_length }} plus_offset_bits=0U) const noexcept {
const {{ typename_byte }}* aligned_ptr({{ typename_unsigned_bit_length }} plus_offset_bits=0U) const noexcept {
return &aligned_ref(plus_offset_bits);
}

};

} // namespace detail
Expand Down Expand Up @@ -214,6 +255,11 @@ public:
VoidResult setF32(const {{ typename_float_32 }} value);

VoidResult setF64(const {{ typename_float_64 }} value);

VoidResult setZeros() { return setZeros(size()); }
VoidResult setZeros({{ typename_unsigned_bit_length }} length);

VoidResult padAndMoveToAlignment({{ typename_unsigned_bit_length }} length);
};

struct const_bitspan: public detail::any_bitspan<const_bitspan>{
Expand Down Expand Up @@ -257,7 +303,10 @@ public:
{
{{ assert('length_mod < 8U') }}
const uint8_t mask = static_cast<uint8_t>((1U << length_mod) - 1U);
dst.data_[length_bytes] = (dst.data_[length_bytes] & static_cast<{{ typename_byte }}>(~mask)) | (data_[length_bytes] & mask);
//dst.data_[length_bytes] = (dst.data_[length_bytes] & static_cast<{{ typename_byte }}>(~mask)) | (data_[length_bytes] & mask);
dst.aligned_ref(length_bits) = {# -#}
(dst.aligned_ref(length_bits) & static_cast<{{ typename_byte }}>(~mask)) {# -#}
| (aligned_ref(length_bits) & mask);
}
}
else
Expand Down Expand Up @@ -292,7 +341,7 @@ public:
const uint8_t in = static_cast<uint8_t>(static_cast<uint8_t>(data_[src_off / 8U] >> src_mod) << dst_mod) & 0xFFU; // NOSONAR
// Intentional violation of MISRA: indexing on a pointer.
// This simplifies the implementation greatly and avoids pointer arithmetics.
const uint8_t a = dst.data_[dst_off / 8U] & (static_cast<uint8_t>(~mask)); // NOSONAR
const uint8_t a = dst.data_[dst_off / 8U] & (static_cast<uint8_t>((~mask) & 0xFFU)); // NOSONAR
const uint8_t b = in & mask;
// Intentional violation of MISRA: indexing on a pointer.
// This simplifies the implementation greatly and avoids pointer arithmetics.
Expand All @@ -304,6 +353,12 @@ public:
}
}

template<{{ typename_unsigned_bit_length }} n_bits>
void align_offset_to(){
static_assert((n_bits == 8) or (n_bits == 16) or (n_bits == 32) or (n_bits == 64), "Non-standard alignment!");
offset_bits_ = (offset_bits_ + (n_bits - 1)) & ~(static_cast<{{ typename_unsigned_bit_length }}>(n_bits - 1));
}

/// Calculate the number of bits to safely copy from/to a serialized buffer.
/// Mind the units! By convention, buffer size is specified in bytes, but fragment length and offset are in bits.
///
Expand Down Expand Up @@ -387,6 +442,39 @@ public:
{{ typename_float_64 }} getF64();
};

VoidResult bitspan::setZeros({{ typename_unsigned_bit_length }} length){
if(length > size()){
return -Error::SERIALIZATION_BUFFER_TOO_SMALL;
}
if(length == 0){
return {};
}
const {{ typename_unsigned_length }} offset_bytes = offset_bits_ / 8U;
const {{ typename_unsigned_bit_length }} offset_bits_mod = offset_bits_ % 8U;
const {{ typename_unsigned_bit_length }} length_bytes_ceil = (length + 7U) / 8U;
{{ assert('offset_bits_mod < 8U') }}
const auto first_byte_temp = data_[offset_bytes] & static_cast<{{ typename_byte }}>(0xFF >> (8U - offset_bits_mod));
memset(&data_[offset_bytes], 0, length_bytes_ceil);
data_[offset_bytes] = static_cast<{{ typename_byte }}>(data_[offset_bytes] | first_byte_temp);
return {};
}

VoidResult bitspan::padAndMoveToAlignment({{ typename_unsigned_bit_length }} n_bits){
const auto padding = static_cast<uint8_t>(n_bits - offset_misalignment(n_bits));
if (padding != n_bits) // Pad to n_bits bits. TODO: Eliminate redundant padding checks.
{
{{ assert('padding > 0') }}
auto ref_result = setZeros(padding);
if(not ref_result){
return ref_result;
}
add_offset( padding);
{{ assert('offset_alings_to(n_bits)') }}
}
return {};
}


uint8_t const_bitspan::getU8(const uint8_t len_bits) const noexcept
{
{{ assert('data_.data() != nullptr') }}
Expand Down Expand Up @@ -425,11 +513,12 @@ uint32_t const_bitspan::getU32(const uint8_t len_bits) const noexcept
return val;
{%- elif options.target_endianness in ('any', 'big') %}
uint8_t tmp[sizeof(uint32_t)] = {0};
copyTo(bitspan{ { &tmp[0], sizeof(tmp) } }, bits);
return static_cast<uint32_t>(static_cast<uint32_t>(tmp[0]) |
(static_cast<uint32_t>(tmp[1]) << 8U) |
(static_cast<uint32_t>(tmp[2]) << 16U) |
(static_cast<uint32_t>(tmp[3]) << 24U));
copyTo(bitspan{ { tmp, sizeof(tmp) } }, bits);
return static_cast<uint32_t>(
(static_cast<uint32_t>(tmp[0])) |
(static_cast<uint32_t>(tmp[1]) << 8U) |
(static_cast<uint32_t>(tmp[2]) << 16U) |
(static_cast<uint32_t>(tmp[3]) << 24U));
{%- else %}{%- assert False %}
{%- endif %}
}
Expand All @@ -445,7 +534,7 @@ uint64_t const_bitspan::getU64(const uint8_t len_bits) const noexcept
return val;
{%- elif options.target_endianness in ('any', 'big') %}
uint8_t tmp[sizeof(uint64_t)] = {0};
copyTo(bitspan{ { &tmp[0], sizeof(tmp) } }, bits);
copyTo(bitspan{ { tmp, sizeof(tmp) } }, bits);
return static_cast<uint64_t>(static_cast<uint64_t>(tmp[0]) |
(static_cast<uint64_t>(tmp[1]) << 8U) |
(static_cast<uint64_t>(tmp[2]) << 16U) |
Expand Down
28 changes: 25 additions & 3 deletions src/nunavut/lang/cpp/templates/_composite_type.j2
Original file line number Diff line number Diff line change
Expand Up @@ -39,6 +39,22 @@
/// This type does not have a fixed port-ID. See https://forum.uavcan.org/t/choosing-message-and-service-ids/889
static constexpr bool HasFixedPortID = false;
{% endif -%}
{%- assert composite_type.extent % 8 == 0 %}
{%- assert composite_type.inner_type.extent % 8 == 0 %}
/// Extent is the minimum amount of memory required to hold any serialized representation of any compatible
/// version of the data type; or, on other words, it is the the maximum possible size of received objects of this type.
/// The size is specified in bytes (rather than bits) because by definition, extent is an integer number of bytes long.
/// When allocating a deserialization (RX) buffer for this data type, it should be at least extent bytes large.
/// When allocating a serialization (TX) buffer, it is safe to use the size of the largest serialized representation
/// instead of the extent because it provides a tighter bound of the object size; it is safe because the concrete type
/// is always known during serialization (unlike deserialization). If not sure, use extent everywhere.

static constexpr {{ typename_unsigned_length }} EXTENT_BYTES = {#- -#}
{{ composite_type.extent // 8 }}UL;
static constexpr {{ typename_unsigned_length }} SERIALIZATION_BUFFER_SIZE_BYTES = {#- -#}
{{ composite_type.inner_type.extent // 8 }}UL;
static_assert(EXTENT_BYTES >= SERIALIZATION_BUFFER_SIZE_BYTES, "Internal constraint violation");

{%- for constant in composite_type.constants %}
{% if loop.first %}
// +---------------------------------------------------------------------------------------------------------------+
Expand All @@ -60,11 +76,17 @@
{%- if not nunavut.support.omit %}

nunavut::support::SerializeResult
serialize(nunavut::support::span<{{ typename_byte }}> out_buffer) const
serialize(nunavut::support::bitspan out_buffer) const
{
(void)out_buffer;
{% from 'serialization.j2' import serialize -%}
{{ serialize(composite_type) | trim }}
{{ serialize(composite_type) | trim | remove_blank_lines | indent }}
}

nunavut::support::SerializeResult
deserialize(nunavut::support::const_bitspan in_buffer)
{
{% from 'deserialization.j2' import deserialize -%}
{{ deserialize(composite_type) | trim | remove_blank_lines | indent }}
}
{%- endif %}
}{{ composite_type | definition_end }}
Expand Down
12 changes: 12 additions & 0 deletions src/nunavut/lang/cpp/templates/_definitions.j2
Original file line number Diff line number Diff line change
@@ -0,0 +1,12 @@
{%- macro assert(expression) -%}
{%- if options.enable_serialization_asserts -%}
NUNAVUT_ASSERT({{ expression }});
{%- endif -%}
{%- endmacro -%}

{% if options.target_endianness == 'little' %}
{% set LITTLE_ENDIAN = True %}
{% elif options.target_endianness in ('any', 'big') %}
{% set LITTLE_ENDIAN = False %}
{% else %}{% assert False %}
{% endif %}
Loading

0 comments on commit 2e95ec3

Please sign in to comment.