Skip to content

Commit

Permalink
Merge pull request #23 from stuarteberg/fix-smoothing-off-by-one
Browse files Browse the repository at this point in the history
Fix off-by-one error in laplacian smoothing
  • Loading branch information
k-dominik authored Jun 23, 2020
2 parents baee6ef + 58999c2 commit 12def75
Show file tree
Hide file tree
Showing 6 changed files with 162 additions and 9 deletions.
3 changes: 2 additions & 1 deletion .gitignore
Original file line number Diff line number Diff line change
@@ -1,2 +1,3 @@
build/
*.pyd
*.pyd
__pycache__/
1 change: 1 addition & 0 deletions conda-recipe/meta.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -36,6 +36,7 @@ test:
- marching_cubes
requires:
- pytest
- pandas
source_files:
- test/*
commands:
Expand Down
2 changes: 0 additions & 2 deletions src/laplacian_smoothing.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -94,8 +94,6 @@ void smooth(Mesh& mesh, unsigned int rounds)
swap(normals, new_norms);
swap(vertices, new_verts);
}
swap(normals, new_norms);
swap(vertices, new_verts);
delete[] new_norms;
delete[] new_verts;
mesh.normals = normals;
Expand Down
Binary file modified test/data/reference/sample_mesh_smoothed4.npz
Binary file not shown.
89 changes: 89 additions & 0 deletions test/laplacian_smooth.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,89 @@
import numpy as np
import pandas as pd

def laplacian_smooth(vertices, faces, rounds=1):
"""
Pure-python reference implementation of laplacian smoothing.
Smooth the mesh in-place.
This is simplest mesh smoothing technique, known as Laplacian Smoothing.
Relocates each vertex by averaging its position with those of its adjacent neighbors.
Repeat for N iterations.
One disadvantage of this technique is that it results in overall shrinkage
of the mesh, especially for many iterations. (But nearly all smoothing techniques
cause at least some shrinkage.)
(Obviously, any normals you have for this mesh will be out-of-date after smoothing.)
Args:
vertices:
Vertex coordinates shape=(N,3)
faces
Face definitions. shape=(N,3)
Each row lists 3 vertices (indexes into the vertices array)
rounds:
How many passes to take over the data.
More iterations results in a smoother mesh, but more shrinkage (and more CPU time).
Returns:
new vertices
Note:
Smoothing can cause degenerate faces, particularly in some
small special cases like this:
1 1
/ \ |
2---3 ==> X (where X is occupied by both 2 and 3)
\ / |
4 4
(Such meshes are not usually produced by marching cubes, though.)
"""
vertices = np.asarray(vertices, dtype=np.float32)
faces = np.asarray(faces)

# Compute the list of all unique vertex adjacencies
all_edges = np.concatenate( [faces[:,(0,1)],
faces[:,(1,2)],
faces[:,(2,0)]] )
all_edges.sort(axis=1)
edges_df = pd.DataFrame( all_edges, columns=['v1_id', 'v2_id'] )
edges_df.drop_duplicates(inplace=True)
del all_edges

# (This sort isn't technically necessary, but it might give
# better cache locality for the vertex lookups below.)
edges_df.sort_values(['v1_id', 'v2_id'], inplace=True)

# How many neighbors for each vertex == how many times it is mentioned in the edge list
neighbor_counts = np.bincount(edges_df.values.reshape(-1), minlength=len(vertices))

new_vertices = np.empty_like(vertices)
for _ in range(rounds):
new_vertices[:] = vertices

# For the complete edge index list, accumulate (sum) the vertexes on
# the right side of the list into the left side's address and vice-versa.
#
## We want something like this:
# v1_indexes, v2_indexes = df['v1_id'], df['v2_id']
# new_vertices[v1_indexes] += vertices[v2_indexes]
# new_vertices[v2_indexes] += vertices[v1_indexes]
#
# ...but that doesn't work because v1_indexes will contain repeats,
# and "fancy indexing" behavior is undefined in that case.
#
# Instead, it turns out that np.ufunc.at() works (it's an "unbuffered" operation)
np.add.at(new_vertices, edges_df['v1_id'], vertices[edges_df['v2_id'], :])
np.add.at(new_vertices, edges_df['v2_id'], vertices[edges_df['v1_id'], :])

# (plus one here because each point itself is also included in the sum)
new_vertices[:] /= (neighbor_counts[:,None] + 1)

# Swap (save RAM allocation overhead by reusing the new_vertices array between iterations)
vertices, new_vertices = new_vertices, vertices

return vertices
76 changes: 70 additions & 6 deletions test/test_marching_regression.py
Original file line number Diff line number Diff line change
@@ -1,21 +1,22 @@
import marching_cubes
import numpy
import numpy as np
import pytest
import os

from laplacian_smooth import laplacian_smooth

@pytest.fixture
def volume():
vol_path = os.path.join(os.path.split(__file__)[0], "data/input/sample.npy")
return numpy.load(vol_path)
return np.load(vol_path)


@pytest.fixture
def mesh_loader():
"""Load mesh data from npz file"""

def _loader(mesh_file_name):
data = numpy.load(mesh_file_name)
data = np.load(mesh_file_name)
vertices = data["vertices"]
normals = data["normals"]
faces = data["faces"]
Expand All @@ -36,6 +37,69 @@ def test_regression(volume, mesh_loader, smoothing, reference_mesh_file):

ref_vertices, ref_normals, ref_faces = mesh_loader(reference_mesh_file)

numpy.testing.assert_array_almost_equal(vertices, ref_vertices)
numpy.testing.assert_array_almost_equal(normals, ref_normals)
numpy.testing.assert_array_almost_equal(faces, ref_faces)
np.testing.assert_array_almost_equal(vertices, ref_vertices)
np.testing.assert_array_almost_equal(normals, ref_normals)
assert (faces == ref_faces).all()


def test_smoothing(volume):
ROUNDS = 3
vertices, normals, faces = marching_cubes.march(volume, 0)
smoothed_vertices, smoothed_normals, smoothed_faces = marching_cubes.march(volume, ROUNDS)

# Compare with our reference implementation of laplacian smoothing.
ref_smoothed_vertices = laplacian_smooth(vertices, faces, ROUNDS)
np.allclose(smoothed_vertices, ref_smoothed_vertices, rtol=0.001)

assert (faces == smoothed_faces).all(), \
"Smoothing should not affect face definitions."

assert not (normals == smoothed_normals).all(), \
"Normals should not be the same after smoothing."


def test_reference_smoothing_trivial():
"""
This is a simple test of our laplacian_smoothing reference function.
"""
vertices = np.array([[0.0, 0.0, 0.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 2.0]])

# This "face" is actually straight line,
# which makes it easy to see what's going on
faces = np.array([[0,1,2]])
average_vertex = vertices.sum(axis=0) / 3
vertices = laplacian_smooth(vertices, faces, 1)
assert (vertices == average_vertex).all()


def test_reference_smoothing_hexagon():
"""
This is a simple test of our laplacian_smoothing reference function.
Try 'smoothing' a simple 2D hexagon, which is an easy case to understand.
"""
# This map is correctly labeled with the vertex indices
_ = -1
hexagon = [[[_,_,_,_,_,_,_],
[_,_,0,_,1,_,_],
[_,_,_,_,_,_,_],
[_,2,_,3,_,4,_],
[_,_,_,_,_,_,_],
[_,_,5,_,6,_,_],
[_,_,_,_,_,_,_]]]

hexagon = 1 + np.array(hexagon)
original_vertices = np.transpose(hexagon.nonzero())
faces = [[3,1,4],
[3,4,6],
[3,6,5],
[3,5,2],
[3,2,0],
[3,0,1]]

vertices = laplacian_smooth(original_vertices, faces, 1)

# Since vertex 3 is exactly centered between the rest,
# its location never changes.
assert (vertices[3] == original_vertices[3]).all()

0 comments on commit 12def75

Please sign in to comment.