Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[mpm] Add ParticleSorter #22457

Open
wants to merge 2 commits into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
41 changes: 41 additions & 0 deletions multibody/mpm/BUILD.bazel
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,8 @@ drake_cc_package_library(
":bspline_weights",
":grid_data",
":particle_data",
":particle_sorter",
":spgrid_flags",
],
)

Expand Down Expand Up @@ -60,6 +62,22 @@ drake_cc_library(
],
)

drake_cc_library(
name = "particle_sorter",
srcs = [
"particle_sorter.cc",
],
hdrs = [
"particle_sorter.h",
],
deps = [
":bspline_weights",
":spgrid_flags",
"//common:essential",
"//math:gradient",
],
)

# TODO(xuchenhan-tri): when we enable SPGrid in our releases, we also need to
# install its license file in drake/tools/workspace/BUILD.bazel.

Expand All @@ -72,11 +90,24 @@ drake_cc_library_linux_only(
"spgrid.h",
],
deps = [
":spgrid_flags",
"//common:essential",
"@spgrid_internal",
],
)

drake_cc_library(
name = "spgrid_flags",
hdrs = [
"spgrid_flags.h",
],
deps = [
":bspline_weights",
"//common:essential",
"//math:gradient",
],
)

drake_cc_googletest(
name = "bspline_weights_test",
deps = [
Expand All @@ -99,6 +130,16 @@ drake_cc_googletest(
],
)

drake_cc_googletest_linux_only(
name = "particle_sorter_test",
deps = [
":bspline_weights",
":grid_data",
":particle_sorter",
":spgrid",
],
)

drake_cc_googletest_linux_only(
name = "spgrid_test",
deps = [
Expand Down
21 changes: 4 additions & 17 deletions multibody/mpm/grid_data.h
Original file line number Diff line number Diff line change
Expand Up @@ -108,33 +108,20 @@ struct GridData {
static_assert(std::is_same_v<T, float> || std::is_same_v<T, double>,
"T must be float or double.");

/* Resets `this` GridData to its default state where all floating point values
are set to NAN and the index is inactive. */
/* Resets all floating point data to zero and the index to be inactive.*/
void reset() { *this = {}; }

/* Returns true iff `this` GridData is bit-wise equal to `other`. */
bool operator==(const GridData<T>& other) const {
return std::memcmp(this, &other, sizeof(GridData<T>)) == 0;
}

Vector3<T> v{Vector3<T>::Constant(nan_with_all_bits_set())};
T m{nan_with_all_bits_set()};
Vector3<T> scratch{Vector3<T>::Constant(nan_with_all_bits_set())};
Vector3<T> v{Vector3<T>::Zero()};
T m{0};
Vector3<T> scratch{Vector3<T>::Zero()};
std::conditional_t<std::is_same_v<T, float>, IndexOrFlag<int32_t>,
IndexOrFlag<int64_t>>
index_or_flag{};

private:
/* Returns a floating point NaN value with all bits set to one. This choice
makes the reset() function more efficient. In particlar, it allows the
generated machine code to memset all bits to 1 instead of handling each field
individually. */
static T nan_with_all_bits_set() {
using IntType =
std::conditional_t<std::is_same_v<T, float>, int32_t, int64_t>;
constexpr IntType kAllBitsOn = -1;
return drake::internal::bit_cast<T>(kAllBitsOn);
}
};

/* With T = float, GridData is expected to be 32 bytes. With T = double,
Expand Down
99 changes: 99 additions & 0 deletions multibody/mpm/particle_sorter.cc
Original file line number Diff line number Diff line change
@@ -0,0 +1,99 @@
#include "drake/multibody/mpm/particle_sorter.h"

namespace drake {
namespace multibody {
namespace mpm {
namespace internal {

void ConvertToRangeVector(const std::vector<int>& data, RangeVector* ranges) {
ranges->clear();
ranges->reserve(data.size() - 1);
for (int i = 1; i < ssize(data); ++i) {
ranges->push_back({data[i - 1], data[i]});
}
}

std::vector<uint64_t> ParticleSorter::GetActiveBlockOffsets() const {
std::vector<uint64_t> offsets(ssize(sentinel_particles_) - 1);
for (int i = 0; i < ssize(sentinel_particles_) - 1; ++i) {
offsets[i] = base_node_offsets_[sentinel_particles_[i]];
}
return offsets;
}

void ParticleSorter::Initialize(int num_particles) {
particle_sorters_.resize(num_particles);
base_node_offsets_.resize(num_particles);
data_indices_.resize(num_particles);
sentinel_particles_.clear();
for (int c = 0; c < 8; ++c) {
ranges_indices_[c].clear();
colored_ranges_[c].clear();
}
}

void ParticleSorter::ComputeBitInformation(const SpGridFlags& flags,
int num_particles) {
/* Recall that all base node offsets in SPGrid have the following bit
representation:

page_bits | block_bits | data_bits

All the data bits are zero, guaranteed by SPGrid. In addition, a
few bits (starting from the left) in the page bits are also zero (we
explain why below). Therefore, we can truncate the zero bits in the
base_node_offset and squeeze in the original particle index into the lower
bits of a single 64 bit unsigned integer.

Q: Why are the left most bits in page_bits zero?

A: Since we cap the total number of allowed grid nodes. There are at most
2^(3*log2_max_grid_size) number of grid nodes in the grid, which requires
3*log2_max_grid_size bits to represent. SPGrid uses page_bits and
block_bits to represent the grid nodes (and data_bits are used, well, to
store data). The page_bits and block_bits have (64 - data_bits) in total,
and only 3*log2_max_grid_size bits are used, so the left most
(64 - data_bits - 3*log2_max_grid_size) bits are zero.

Q: So how many zeros can we truncate in the bits for base_node_offsets to
make room for the particle indices? And is it enough room?

A: There are (64 - data_bits - 3*log2_max_grid_size) zeros on the left and
data_bits zeros on the right, so they add up to (64 - 3*log2_max_grid_size)
zeros in total. with a typical log2_max_grid_size == 10, we have
(64 - 3*10) = 34 bits to work with, which is more than enough to store the
32 bit particle indices.

Therefore, the overall strategy is to left shift the base node offset by
(64 - data_bits - 3*log2_max_grid_size), which leaves the lower
(64 - 3*log2_max_grid_size) bits to be zero. We then use the right most 32
bits to store the original index of the particles. */
log2_max_grid_size_ = flags.log2_max_grid_size;
data_bits_ = flags.data_bits;
log2_page_ = flags.log2_page;
zero_page_bits_ = 64 - data_bits_ - 3 * log2_max_grid_size_;
index_bits_ = 64 - 3 * log2_max_grid_size_; // = zero_page_bits_ + data_bits_
// TODO(xuchenhan-tri): This should be checked when we register particles to
// the MPM model.
/* Confirm that index_bits_ is enough to store num_particles. */
const uint64_t capacity = (uint64_t(1) << index_bits_);
DRAKE_DEMAND(capacity > static_cast<uint64_t>(num_particles));
}

void ParticleSorter::UnpackResults() {
const uint64_t index_mask = ((uint64_t(1) << index_bits_) - 1);

for (int p = 0; p < static_cast<int>(particle_sorters_.size()); ++p) {
/* Lower bits store the particle's original index. */
data_indices_[p] = (particle_sorters_[p] & index_mask);

/* Upper bits store the base_node_offset (shifted); shift back & re-apply
data_bits_. */
base_node_offsets_[p] = (particle_sorters_[p] >> index_bits_) << data_bits_;
}
}

} // namespace internal
} // namespace mpm
} // namespace multibody
} // namespace drake
Loading