-
Notifications
You must be signed in to change notification settings - Fork 4
/
Copy pathrecovery.py
executable file
·2172 lines (1746 loc) · 90.6 KB
/
recovery.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
#!/usr/bin/env python
from numpy import *
from math import acos
import itertools
def recovery1( mesh ):
## 1 Take only front-facing triangles of the mesh.
## 2 Fix the boundaries.
## 3 Attach normals to interior vertices.
## TODO Q: Why not attach to faces?
## A1: Then I need a laplace operator on faces, and this is strange when my boundaries are edges.
## 4 Solve a poisson system for values at vertices.
## 1
from trimesh import TriMesh
output = TriMesh()
vmap = [-1] * len( mesh.vs )
vcount = 0
output_face2mesh_face = dict()
for fi, face in enumerate( mesh.faces ):
if mesh.face_normals[ fi ][2] < 0.: continue
for v in face:
if vmap[ v ] == -1:
vmap[ v ] = vcount
vcount += 1
output.vs.append( mesh.vs[v] )
output_face2mesh_face[ len( output.faces ) ] = fi
output.faces.append( tuple( [ vmap[v] for v in face ] ) )
output.vs = asarray( output.vs )
## 2
'''
boundary_verts = set()
boundary_vert_normal_sum = {}
boundary_vert_num_normals = {}
for (i,j) in output.boundary_edges():
boundary_verts.add( i )
boundary_verts.add( j )
## The vector from vertex i to vertex j.
edge_vec = asarray( output.vs[j] ) - asarray( output.vs[i] )
## Pretend it's a perfectly aligned silhouette edge, as in our input data.
edge_vec[2] = 0.
edge_vec *= 1./sqrt(dot( edge_vec, edge_vec ))
## 90 degree clockwise turn
n = array( [ edge_vec[1], -edge_vec[0], 0. ] )
boundary_vert_normal_sum[i] = boundary_vert_normal_sum.setdefault( i, zeros( 3 ) ) + n
boundary_vert_normal_sum[j] = boundary_vert_normal_sum.setdefault( j, zeros( 3 ) ) + n
boundary_vert_num_normals[i] = boundary_vert_num_normals.setdefault( i, 0 ) + 1
boundary_vert_num_normals[j] = boundary_vert_num_normals.setdefault( j, 0 ) + 1
assert sorted( boundary_vert_normal_sum.keys() ) == sorted( boundary_vert_num_normals.keys() )
assert set( boundary_vert_normal_sum.keys() ) == boundary_verts
assert len( boundary_vert_normal_sum ) == len( boundary_verts )
'''
## Solve for 'output' in-place and return 'output' as well.
#output = solve_nonlinear( output, [ ( ofi, mesh.face_normals[ mfi ] ) for ofi, mfi in output_face2mesh_face.iteritems() ] )
output = solve_linear( output, [ ( ofi, mesh.face_normals[ mfi ] ) for ofi, mfi in output_face2mesh_face.items() ] )
output.write_OBJ( 'output.obj' )
def zero_dx_dy_constraints_from_cut_edges( cut_edges ):
'''
Given a parameter 'cut_edges' suitable for passing to
'gen_symmetric_grid_laplacian()',
returns two values ( dx_constraints, dy_constraints ) suitable for
passing to 'solve_grid_linear()' that enforce derivatives of zero across
the edges in 'cut_edges'. This allows 'solve_grid_linear()' to solve
for a continuous surface with discontinuous derivatives.
tested
'''
dx_constraints = []
dy_constraints = []
for ( i, j ), ( k, l ) in cut_edges:
assert i == k or j == l
assert abs( i - k ) + abs( j - l ) == 1
if 0 != i - k:
dx_constraints.append( ( min( i, k ), j, 0. ) )
else:
dy_constraints.append( ( i, min( j, l ), 0. ) )
assert len( dx_constraints ) + len( dy_constraints ) == len( cut_edges )
return dx_constraints, dy_constraints
def cut_edges_from_mask( mask ):
'''
Given a boolean 2D array 'mask', returns 'cut_edges' suitable for passing
to 'gen_symmetric_grid_laplacian()' where the horizontal and vertical
transitions between True and False entries becomes cut edges.
tested
(see also test_cut_edges())
'''
mask = asarray( mask, dtype = bool )
cut_edges = []
horiz = mask[:-1,:] != mask[1:,:]
for i,j in zip( *where( horiz ) ):
cut_edges.append( ( ( i,j ), ( i+1, j ) ) )
del horiz
vert = mask[:,:-1] != mask[:,1:]
for i,j in zip( *where( vert ) ):
cut_edges.append( ( ( i,j ), ( i, j+1 ) ) )
del vert
return cut_edges
def assert_valid_cut_edges( rows, cols, cut_edges ):
assert rows > 0 and cols > 0
if len( cut_edges ) > 0:
## cut edge indices must both be inside the grid
def in_grid( i,j ): return i >= 0 and i < rows and j >= 0 and j < cols
assert all([ in_grid( i,j ) and in_grid( k,l ) for (i,j), (k,l) in cut_edges ])
## cut edges must be horizontal or vertical neighbors
assert all([ abs( i - k ) + abs( j - l ) == 1 for (i,j), (k,l) in cut_edges ])
## cut edges must be unique
assert len( frozenset([ ( tuple( ij ), tuple( kl ) ) for ij, kl in cut_edges ]) ) == len( cut_edges )
def gen_symmetric_grid_laplacian1( rows, cols, cut_edges = None ):
'''
Returns a Laplacian operator matrix for a grid of dimensions 'rows' by 'cols'.
The matrix is symmetric and normalized such that the diagonal values of interior vertices equal 1.
Optional parameter 'cut_edges', a sequence of 2-tuples ( (i,j), (k,l) )
where (i,j) and (k,l) must be horizontal or vertical neighbors in the grid,
specifies edges in the grid which should be considered to be disconnected.
tested
(see also test_cut_edges())
'''
assert rows > 0
assert cols > 0
if cut_edges is None: cut_edges = []
assert_valid_cut_edges( rows, cols, cut_edges )
from scipy import sparse
N = rows
M = cols
def ind2ij( ind ):
assert ind >= 0 and ind < N*M
return ind // M, ind % M
def ij2ind( i,j ):
assert i >= 0 and i < N and j >= 0 and j < M
return i*M + j
Adj = []
for i in range( 0, rows ):
for j in range( 0, cols ):
ind00 = ij2ind( i,j )
## If I wanted to remove these conditionals, I could do
## one loop for 1...-1 and another for 0 and another for -1.
## I could also only add adjacencies in positive coordinate
## directions and then do "AdjM += AdjM.T" to get symmetry (the negative direction).
if i+1 < rows:
indp0 = ij2ind( i+1,j )
Adj.append( ( ind00, indp0 ) )
if j+1 < cols:
ind0p = ij2ind( i,j+1 )
Adj.append( ( ind00, ind0p ) )
if i > 0:
indm0 = ij2ind( i-1,j )
Adj.append( ( ind00, indm0 ) )
if j > 0:
ind0m = ij2ind( i,j-1 )
Adj.append( ( ind00, ind0m ) )
## Build the adjacency matrix.
AdjMatrix = sparse.coo_matrix( ( ones( len( Adj ) ), asarray( Adj ).T ), shape = ( rows*cols, rows*cols ) )
#AdjMatrix = AdjMatrix.tocsr()
## Build the adjacency matrix representing cut edges and subtract it
if len( cut_edges ) > 0:
CutAdj = []
for ij, kl in cut_edges:
CutAdj.append( ( ij2ind( *ij ), ij2ind( *kl ) ) )
CutAdj.append( ( ij2ind( *kl ), ij2ind( *ij ) ) )
CutAdjMatrix = sparse.coo_matrix( ( ones( len( CutAdj ) ), asarray( CutAdj ).T ), shape = ( rows*cols, rows*cols ) )
## Update AdjMatrix
AdjMatrix = AdjMatrix - CutAdjMatrix
'''
## One over mass
ooMass = sparse.identity( rows*cols )
ooMass.setdiag( 1./asarray(AdjMatrix.sum(1)).ravel() )
## NOTE: ooMass*AdjMatrix isn't symmetric because of boundaries!!!
L = sparse.identity( rows*cols ) - ooMass * AdjMatrix
'''
## This formulation is symmetric: each vertex has a consistent weight
## according to its area (meaning boundary vertices have smaller
## weights than interior vertices).
## NOTE: I tried sparse.dia_matrix(), but sparse.dia_matrix.setdiag() fails with a statement that dia_matrix doesn't have element assignment.
## UPDATE: setdiag() seems to just be generally slow. coo_matrix is fast!
#Mass = sparse.lil_matrix( ( rows*cols, rows*cols ) )
#Mass.setdiag( asarray(AdjMatrix.sum(1)).ravel() )
#debugger()
Mass = sparse.coo_matrix( ( asarray(AdjMatrix.sum(1)).ravel(), ( list(range( rows*cols)), list(range( rows*cols)) ) ) )
L = .25 * ( Mass - AdjMatrix )
#debugger()
## The rows should sum to 0.
assert ( abs( asarray( L.sum(1) ).ravel() ) < 1e-5 ).all()
## The columns should also sum to 0, since L is symmetric.
assert ( abs( asarray( L.sum(0) ).ravel() ) < 1e-5 ).all()
## It should be symmetric.
assert len( ( L - L.T ).nonzero()[0] ) == 0
return L
def gen_symmetric_grid_laplacian2( rows, cols, cut_edges = None ):
'''
The same as 'gen_symmetric_grid_laplacian1()', except boundary weights are correct.
tested
(see also test_cut_edges())
'''
assert rows > 0
assert cols > 0
if cut_edges is None: cut_edges = []
assert_valid_cut_edges( rows, cols, cut_edges )
from scipy import sparse
N = rows
M = cols
def ind2ij( ind ):
assert ind >= 0 and ind < N*M
return ind // M, ind % M
def ij2ind( i,j ):
assert i >= 0 and i < N and j >= 0 and j < M
return i*M + j
Adj = []
AdjValues = []
## The middle (lacking the first and last columns) strip down
## to the bottom, not including the bottom row.
for i in range( 0, rows-1 ):
for j in range( 1, cols-1 ):
ind00 = ij2ind( i,j )
indp0 = ij2ind( i+1,j )
Adj.append( ( ind00, indp0 ) )
AdjValues.append( .25 )
## The first and last columns down to the bottom,
## not including the bottom row.
for i in range( 0, rows-1 ):
for j in ( 0, cols-1 ):
ind00 = ij2ind( i,j )
indp0 = ij2ind( i+1,j )
Adj.append( ( ind00, indp0 ) )
AdjValues.append( .125 )
## The middle (lacking the first and last rows) strip to
## the right, not including the last column.
for i in range( 1, rows-1 ):
for j in range( 0, cols-1 ):
ind00 = ij2ind( i,j )
ind0p = ij2ind( i,j+1 )
Adj.append( ( ind00, ind0p ) )
AdjValues.append( .25 )
## The first and last rows over to the right,
## not including the right-most column.
for i in ( 0, rows-1 ):
for j in range( 0, cols-1 ):
ind00 = ij2ind( i,j )
ind0p = ij2ind( i,j+1 )
Adj.append( ( ind00, ind0p ) )
AdjValues.append( .125 )
## Build the adjacency matrix.
AdjMatrix = sparse.coo_matrix( ( AdjValues, asarray( Adj ).T ), shape = ( rows*cols, rows*cols ) )
## We have so far only counted right and downward edges.
## Add left and upward edges by adding the transpose.
AdjMatrix = AdjMatrix.T + AdjMatrix
#AdjMatrix = AdjMatrix.tocsr()
## Build the adjacency matrix representing cut edges and subtract it
if len( cut_edges ) > 0:
CutAdj = []
for ij, kl in cut_edges:
CutAdj.append( ( ij2ind( *ij ), ij2ind( *kl ) ) )
CutAdj.append( ( ij2ind( *kl ), ij2ind( *ij ) ) )
CutAdjMatrix = sparse.coo_matrix( ( ones( len( CutAdj ) ), asarray( CutAdj ).T ), shape = ( rows*cols, rows*cols ) )
## Update AdjMatrix.
## We need to subtract the component-wise product of CutAdjMatrix and AdjMatrix
## because AdjMatrix has non-zero values and CutAdjMatrix acts like a mask.
AdjMatrix = AdjMatrix - CutAdjMatrix.multiply( AdjMatrix )
'''
## One over mass
ooMass = sparse.identity( rows*cols )
ooMass.setdiag( 1./asarray(AdjMatrix.sum(1)).ravel() )
## NOTE: ooMass*AdjMatrix isn't symmetric because of boundaries!!!
L = sparse.identity( rows*cols ) - ooMass * AdjMatrix
'''
## This formulation is symmetric: each vertex has a consistent weight
## according to its area (meaning boundary vertices have smaller
## weights than interior vertices).
## NOTE: I tried sparse.dia_matrix(), but sparse.dia_matrix.setdiag() fails with a statement that dia_matrix doesn't have element assignment.
## UPDATE: setdiag() seems to just be generally slow. coo_matrix is fast!
#Mass = sparse.lil_matrix( ( rows*cols, rows*cols ) )
#Mass.setdiag( asarray(AdjMatrix.sum(1)).ravel() )
#debugger()
Mass = sparse.coo_matrix( ( asarray(AdjMatrix.sum(1)).ravel(), ( list(range( rows*cols)), list(range( rows*cols)) ) ) )
L = ( Mass - AdjMatrix )
## The rows should sum to 0.
assert ( abs( asarray( L.sum(1) ).ravel() ) < 1e-5 ).all()
## The columns should also sum to 0, since L is symmetric.
assert ( abs( asarray( L.sum(0) ).ravel() ) < 1e-5 ).all()
## It should be symmetric.
assert len( ( L - L.T ).nonzero()[0] ) == 0
return L
def gen_grid_laplacian2_with_boundary_reflection( rows, cols, cut_edges = None ):
'''
The same as 'gen_symmetric_grid_laplacian2()',
except the surface reflects across boundaries.
The resulting matrix is not symmetric, however.
tested
(see also test_cut_edges())
'''
assert rows > 0
assert cols > 0
if cut_edges is None: cut_edges = []
assert_valid_cut_edges( rows, cols, cut_edges )
from scipy import sparse
N = rows
M = cols
def ind2ij( ind ):
assert ind >= 0 and ind < N*M
return ind // M, ind % M
def ij2ind( i,j ):
assert i >= 0 and i < N and j >= 0 and j < M
return i*M + j
Adj = []
AdjValues = []
## NOTE: Unlike gen_symmetric_grid_laplacian2(), we don't use different weights
## for boundary edges, because we continue the surface across the boundary
## by reflection.
## The middle strip down to the bottom, not including the bottom row.
for i in range( 0, rows-1 ):
for j in range( 0, cols ):
ind00 = ij2ind( i,j )
indp0 = ij2ind( i+1,j )
Adj.append( ( ind00, indp0 ) )
AdjValues.append( .25 )
## The middle strip to the right, not including the last column.
for i in range( 0, rows ):
for j in range( 0, cols-1 ):
ind00 = ij2ind( i,j )
ind0p = ij2ind( i,j+1 )
Adj.append( ( ind00, ind0p ) )
AdjValues.append( .25 )
## Build the adjacency matrix.
AdjMatrix = sparse.coo_matrix( ( AdjValues, asarray( Adj ).T ), shape = ( rows*cols, rows*cols ) )
## We have so far only counted right and downward edges.
## Add left and upward edges by adding the transpose.
AdjMatrix = AdjMatrix.T + AdjMatrix
#AdjMatrix = AdjMatrix.tocsr()
## Build the adjacency matrix representing cut edges and subtract it
CutAdj = []
if len( cut_edges ) > 0:
def reflect_a_across_b( a, b ):
## What we want is the step from a to b taken from b:
## ( b - a ) + b = 2*b - a
## NOTE: The caller of this function may be assuming
## that a and b are adjacent (horizontally or vertically);
## this was checked at the beginning of gen_grid_laplacian*().
return ( 2*b[0] - a[0], 2*b[1] - a[1] )
def ij_valid( ij ):
return ij[0] >= 0 and ij[1] >= 0 and ij[0] < rows and ij[1] < cols
for ij, kl in cut_edges:
CutAdj.append( ( ij2ind( *ij ), ij2ind( *kl ) ) )
## Boundary reflection works out to simply cutting
## the edge between ij and the reflection of kl across ij
## from ij's point of view.
## Our matrix will no longer be symmetric :-(.
kl_across_ij = reflect_a_across_b( kl, ij )
if ij_valid( kl_across_ij ):
CutAdj.append( ( ij2ind( *ij ), ij2ind( *kl_across_ij ) ) )
CutAdj.append( ( ij2ind( *kl ), ij2ind( *ij ) ) )
## Boundary reflection works out to simply cutting
## the edge between kl and the reflection of ij across kl
## from kl's point of view.
## Our matrix will no longer be symmetric :-(.
ij_across_kl = reflect_a_across_b( ij, kl )
if ij_valid( ij_across_kl ):
CutAdj.append( ( ij2ind( *kl ), ij2ind( *ij_across_kl ) ) )
## We also add "cut_edges" for the grid boundary.
if rows > 1:
for j in range( 0, cols ):
CutAdj.append( ( ij2ind( 0, j ), ij2ind( 1, j ) ) )
CutAdj.append( ( ij2ind( rows-1, j ), ij2ind( rows-2, j ) ) )
if cols > 1:
for i in range( 0, rows ):
CutAdj.append( ( ij2ind( i, 0 ), ij2ind( i, 1 ) ) )
CutAdj.append( ( ij2ind( i, cols-1 ), ij2ind( i, cols-2 ) ) )
if len( CutAdj ) > 0:
## Cutting reflected edges could lead to some entries appearing multiple times.
## We want to use the entries like a {0,1} mask,
## so make sure each entry appears at most once.
CutAdj = list(set( CutAdj ))
CutAdjMatrix = sparse.coo_matrix( ( ones( len( CutAdj ) ), asarray( CutAdj ).T ), shape = ( rows*cols, rows*cols ) )
## Update AdjMatrix.
## We need to subtract the component-wise product of CutAdjMatrix and AdjMatrix
## because AdjMatrix has non-zero values and CutAdjMatrix acts like a mask.
AdjMatrix = AdjMatrix - CutAdjMatrix.multiply( AdjMatrix )
'''
## One over mass
ooMass = sparse.identity( rows*cols )
ooMass.setdiag( 1./asarray(AdjMatrix.sum(1)).ravel() )
## NOTE: ooMass*AdjMatrix isn't symmetric because of boundaries!!!
L = sparse.identity( rows*cols ) - ooMass * AdjMatrix
'''
## This formulation is symmetric: each vertex has a consistent weight
## according to its area (meaning boundary vertices have smaller
## weights than interior vertices).
## NOTE: I tried sparse.dia_matrix(), but sparse.dia_matrix.setdiag() fails with a statement that dia_matrix doesn't have element assignment.
## UPDATE: setdiag() seems to just be generally slow. coo_matrix is fast!
#Mass = sparse.lil_matrix( ( rows*cols, rows*cols ) )
#Mass.setdiag( asarray(AdjMatrix.sum(1)).ravel() )
#debugger()
Mass = sparse.coo_matrix( ( asarray(AdjMatrix.sum(1)).ravel(), ( list(range( rows*cols)), list(range( rows*cols)) ) ) )
L = ( Mass - AdjMatrix )
## The rows should sum to 0.
assert ( abs( asarray( L.sum(1) ).ravel() ) < 1e-5 ).all()
## The columns need not also sum to 0, since L is not symmetric.
#assert ( abs( asarray( L.sum(0) ).ravel() ) < 1e-5 ).all()
## No row should be empty.
## UPDATE: The corner vertices will be empty.
#assert ( asarray( abs( L ).sum(1) ).ravel() > 1e-5 ).all()
## UPDATE 2: With cut edges we can have an arbitrary number of
## corner-like vertices.
assert len( where( asarray( abs( L ).sum(1) ).ravel() < 1e-5 )[0] ) == ( 2 if rows == 1 or cols == 1 else 4 ) or len( cut_edges ) > 0
## UPDATE 2: But their columns should still have non-zero values.
assert ( asarray( abs( L ).sum(0) ).ravel() > 1e-5 ).all()
## It should be symmetric.
#assert len( ( L - L.T ).nonzero()[0] ) == 0
return L
gen_symmetric_grid_laplacian = gen_symmetric_grid_laplacian2
def gen_constraint_system( rows, cols, dx_constraints = [], dy_constraints = [], value_constraints = [] ):
'''
Given dimensions of a 'rows' by 'cols' 2D grid,
a sequence 'dx_constraints' of tuples ( i, j, dx ) specifying the value of grid[ i+1,j ] - grid[ i,j ],
a sequence 'dy_constraints' of tuples ( i, j, dy ) specifying the value of grid[ i,j+1 ] - grid[ i,j ],
a sequence 'value_constraints' of tuples ( i, j, val ) specifying the value of grid[ i,j ],
returns two items, a matrix 'C' and an array 'Crhs', representing the constraints specified
by 'dx_constraints' and 'dy_constraints' and 'value_constraints' in matrix form:
C*dof = Crhs
NOTE: The constrained values dx, dy, and val can be K-dimensional vectors instead of scalars,
in which case Crhs will have shape #constraints-by-K.
tested
'''
from scipy import sparse
## Indices for dx constraints should be valid.
assert False not in [ i >= 0 and i < rows-1 for i, j, dx in dx_constraints ]
assert False not in [ j >= 0 and j < cols for i, j, dx in dx_constraints ]
## Indices for dy constraints should be valid.
assert False not in [ i >= 0 and i < rows for i, j, dy in dy_constraints ]
assert False not in [ j >= 0 and j < cols-1 for i, j, dy in dy_constraints ]
## Indices for value constraints should be valid.
assert False not in [ i >= 0 and i < rows for i, j, val in value_constraints ]
assert False not in [ j >= 0 and j < cols for i, j, val in value_constraints ]
## There shouldn't be duplicate constraints.
assert len( set( [ (i,j) for i, j, dx in dx_constraints ] ) ) == len( dx_constraints )
assert len( set( [ (i,j) for i, j, dy in dy_constraints ] ) ) == len( dy_constraints )
assert len( set( [ (i,j) for i, j, val in value_constraints ] ) ) == len( value_constraints )
def ij2ind( i,j ):
assert i >= 0 and i < rows and j >= 0 and j <= cols
return i*cols + j
### Constrain gradients (system).
## The following three lists define linear equations: \sum_i \sum_j zs[ indices[i][j] ] * vals[i][j] = rhs[i]
indices = []
vals = []
rhs = []
## Do this by constraining directional differences according to the given dx and dy in 'dx_constraints' and 'dy_constraints'.
for i,j,dx in dx_constraints:
indices.append( [ ij2ind( i+1, j ), ij2ind( i, j ) ] )
vals.append( [ 1., -1. ] )
rhs.append( dx )
for i,j,dy in dy_constraints:
indices.append( [ ij2ind( i, j+1 ), ij2ind( i, j ) ] )
vals.append( [ 1., -1. ] )
rhs.append( dy )
for i,j,val in value_constraints:
indices.append( [ ij2ind( i, j ) ] )
vals.append( [ 1. ] )
rhs.append( val )
assert len( indices ) == len( vals )
assert len( indices ) == len( rhs )
## Build the constraints system.
Crows = [ [i] * len( js ) for i, js in enumerate( indices ) ]
#debugger()
C = sparse.coo_matrix( ( concatenate( vals ), ( concatenate( Crows ), concatenate( indices ) ) ), shape = ( len( Crows ), rows*cols ) )
Crhs = asarray( rhs )
return C, Crhs
def gen_constraint_rhs( dx_constraints = [], dy_constraints = [], value_constraints = [] ):
'''
Given a sequence 'dx_constraints' of values dx,
a sequence 'dy_constraints' of values dy,
a sequence 'value_constraints' of values val,
returns an array 'Crhs', representing the right-hand-side for the system generated by
gen_constraint_system() with same-named 'dx_constraints' and 'dy_constraints' and
'value_constraints' in matrix form:
C*dof = Crhs
NOTE: The constrained values dx, dy, and val can be K-dimensional vectors instead of scalars,
in which case Crhs will have shape #constraints-by-K.
'''
Crhs = asarray( list( dx_constraints ) + list( dy_constraints ) + list( value_constraints ) )
return Crhs
def system_and_rhs_with_value_constraints1( system, rhs, value_constraints, cols ):
'''
tested
'''
from tictoc import tic, toc
## Copy rhs, because we modify it in-place.
rhs = array( rhs )
tic( 'value constraint walking:' )
## Now incorporate value_constraints by setting some identity rows and changing the right-hand-side.
## Set constraint rows to identity rows.
## We also zero the columns to keep the matrix symmetric.
## We also have to update the right-hand-side when we zero the columns.
## There shouldn't be duplicate constraints.
assert len( set( [ (i,j) for i, j, val in value_constraints ] ) ) == len( value_constraints )
## A dict has the right algorithmic access properties for what we want,
## which is a lot of checking whether a given degree-of-freedom is inside.
value_constraints_dict = dict([ ( i*cols+j, val ) for i,j,val in value_constraints ])
## Turn 'system' into a coo_matrix so it has .data, .row, .col properties.
system = system.tocoo()
## Set corresponding entries of the right-hand-side vector to the constraint value.
for index, value in value_constraints_dict.items(): rhs[ index ] = value
## Update the right-hand-side elements wherever a fixed degree-of-freedom
## is involved in a row (the columns of constrained degrees-of-freedom).
for val, row, col in zip( system.data, system.row, system.col ):
if row in value_constraints_dict and col not in value_constraints_dict:
rhs[ col ] -= val * value_constraints_dict[ row ]
system_vij = [ (val,row,col) for val,row,col in zip( system.data, system.row, system.col ) if row not in value_constraints_dict and col not in value_constraints_dict ]
## Then add a 1 along the diagonal.
system_vij.extend( [ ( 1., index, index ) for index in value_constraints_dict.keys() ] )
toc()
tic( 'transpose vij:' )
## Transpose system_vij for passing to matrix constructors.
## UPDATE: Also convert each row to an array so that cvxopt doesn't complain.
system_vij = [ asarray( a ) for a in zip( *system_vij ) ]
del system
del value_constraints_dict
toc()
import scipy.sparse
system = scipy.sparse.coo_matrix( ( system_vij[0], system_vij[1:] ) )
return system, rhs
def system_and_rhs_with_value_constraints2( system, rhs, value_constraints, cols ):
'''
tested
'''
from scipy import sparse
## Now incorporate value_constraints by setting some identity rows and changing the right-hand-side.
## Set constraint rows to identity rows.
## We also zero the columns to keep the matrix symmetric.
## NOTE: We have to update the right-hand-side when we zero the columns.
## There shouldn't be duplicate constraints.
assert len( set( [ (i,j) for i, j, val in value_constraints ] ) ) == len( value_constraints )
value_constraint_values = asarray([ val for i,j,val in value_constraints ])
value_constraint_indices = asarray([ (i,j) for i,j,val in value_constraints ])
value_constraint_indices = value_constraint_indices[:,0] * cols + value_constraint_indices[:,1]
## Update the right-hand-side elements wherever a fixed degree-of-freedom
## is involved in a row (the columns of constrained degrees-of-freedom).
'''
for index, val in zip( value_constraint_values, value_constraint_indices ):
rhs -= system.getrow( index ) * val
'''
#R = sparse.csr_matrix( ( 1, system.shape[0] ) )
#for index, val in zip( value_constraint_values, value_constraint_indices ): R[ 0, index ] = val
R = sparse.coo_matrix(
## values
( value_constraint_values,
## row indices are zero
( zeros( value_constraint_indices.shape, value_constraint_indices.dtype ),
## column indices
value_constraint_indices ) ),
## shape is 1 x N
shape = ( 1, system.shape[0] )
)
R = R.tocsr()
rhs = rhs - R * system
rhs = asarray( rhs ).ravel()
## Set corresponding entries of the right-hand-side vector to the constraint value.
#for index, value in value_constraints_dict.iteritems(): rhs[ index ] = value
rhs[ value_constraint_indices ] = value_constraint_values
## Zero the constrained rows and columns, and set the diagonal to 1.
## Zero the rows and columns.
diag = ones( rhs.shape[0] )
diag[ value_constraint_indices ] = 0.
#D = sparse.identity( system.shape[0] )
#D.setdiag( diag )
D = sparse.coo_matrix( ( diag, ( arange( system.shape[0] ), arange( system.shape[0] ) ) ) ).tocsr()
system = D * system * D
## Set the constraints' diagonals to 1.
#diag = zeros( rhs.shape[0] )
#diag[ value_constraint_indices ] = 1.
#D.setdiag( diag )
#D.setdiag( 1. - diag )
D = sparse.coo_matrix( ( ones( value_constraint_indices.shape[0] ), ( value_constraint_indices, value_constraint_indices ) ), shape = system.shape )
system = system + D
return system, rhs
def system_and_rhs_with_value_constraints2_multiple_rhs( system, rhs, value_constraints, cols ):
'''
used (successfully by a function that is tested)
'''
from scipy import sparse
## Now incorporate value_constraints by setting some identity rows and changing the right-hand-side.
## Set constraint rows to identity rows.
## We also zero the columns to keep the matrix symmetric.
## NOTE: We have to update the right-hand-side when we zero the columns.
## There shouldn't be duplicate constraints.
assert len( set( [ (i,j) for i, j, val in value_constraints ] ) ) == len( value_constraints )
value_constraint_values = asarray([ val for i,j,val in value_constraints ])
value_constraint_indices = asarray([ (i,j) for i,j,val in value_constraints ])
value_constraint_indices = value_constraint_indices[:,0] * cols + value_constraint_indices[:,1]
## Update the right-hand-side elements wherever a fixed degree-of-freedom
## is involved in a row (the columns of constrained degrees-of-freedom).
'''
for index, val in zip( value_constraint_values, value_constraint_indices ):
rhs -= system.getrow( index ) * val
'''
#R = sparse.csr_matrix( ( 1, system.shape[0] ) )
#for index, val in zip( value_constraint_values, value_constraint_indices ): R[ 0, index ] = val
R = sparse.coo_matrix(
## values
( value_constraint_values.T.ravel(),
## row indices are zeros for the zero-th element in each value, ones for the one-th element in each value, and so on.
( repeat( arange( value_constraint_values.shape[1], dtype = value_constraint_indices.dtype ), value_constraint_indices.shape[0] ),
## column indices
tile( value_constraint_indices, value_constraint_values.shape[1] ) ) ),
## shape is value_constraint_values.shape[1] x N
shape = ( value_constraint_values.shape[1], system.shape[0] )
)
R = R.tocsr()
rhs = rhs - ( R * system ).T
rhs = asarray( rhs )
## Set corresponding entries of the right-hand-side vector to the constraint value.
#for index, value in value_constraints_dict.iteritems(): rhs[ index ] = value
rhs[ value_constraint_indices, : ] = value_constraint_values
## Zero the constrained rows and columns, and set the diagonal to 1.
## Zero the rows and columns.
diag = ones( rhs.shape[0] )
diag[ value_constraint_indices ] = 0.
#D = sparse.identity( system.shape[0] )
#D.setdiag( diag )
D = sparse.coo_matrix( ( diag, ( arange( system.shape[0] ), arange( system.shape[0] ) ) ) ).tocsr()
system = D * system * D
## Set the constraints' diagonals to 1.
#diag = zeros( rhs.shape[0] )
#diag[ value_constraint_indices ] = 1.
#D.setdiag( diag )
#D.setdiag( 1. - diag )
D = sparse.coo_matrix( ( ones( value_constraint_indices.shape[0] ), ( value_constraint_indices, value_constraint_indices ) ), shape = system.shape )
system = system + D
return system, rhs
## Use 'system_and_rhs_with_value_constraints2', since it is much faster.
system_and_rhs_with_value_constraints = system_and_rhs_with_value_constraints2
def system_with_value_constraints3( system, value_constraints, cols ):
'''
Like system_and_rhs_with_value_constraints2_multiple_rhs()
except value_constraints are only (i,j) tuples and the resulting system
does not stay symmetric. This is so that when the right-hand side changes, we don't
have to change the system matrix.
'''
from scipy import sparse
## Now incorporate value_constraints by setting some identity rows and changing the right-hand-side.
## Set constraint rows to identity rows.
## There shouldn't be duplicate constraints.
assert len( set( value_constraints ) ) == len( value_constraints )
value_constraint_indices = asarray( value_constraints )
value_constraint_indices = value_constraint_indices[:,0] * cols + value_constraint_indices[:,1]
## Zero the rows.
diag = ones( system.shape[0] )
diag[ value_constraint_indices ] = 0.
#D = sparse.identity( system.shape[0] )
#D.setdiag( diag )
D = sparse.coo_matrix( ( diag, ( arange( system.shape[0] ), arange( system.shape[0] ) ) ) ).tocsr()
system = D * system
## Set the constraints' diagonals to 1.
D = sparse.coo_matrix( ( ones( value_constraint_indices.shape[0] ), ( value_constraint_indices, value_constraint_indices ) ), shape = system.shape )
system = system + D
return system
def rhs_with_value_constraints3( rhs, value_constraint_indices, value_constraint_values, cols ):
'''
Like system_and_rhs_with_value_constraints2_multiple_rhs()
except the value constraints are passed as
a sequence of (i,j) 'value_constraint_indices' and
a sequence of scalar or k-dimensional values 'value_constraints_values'.
'''
## Now incorporate value_constraints by setting some identity rows and changing the right-hand-side.
## Set constraint rows to identity rows.
## There shouldn't be duplicate constraints.
## Convert value_constraint_indices entries to tuples so they can be added to a set().
assert len( set([ (i,j) for i,j in value_constraint_indices ]) ) == len( value_constraint_indices )
## We should have the same number of values as indices.
assert len( value_constraint_indices ) == len( value_constraint_values )
value_constraint_indices = asarray( value_constraint_indices )
value_constraint_indices = value_constraint_indices[:,0] * cols + value_constraint_indices[:,1]
rhs = array( rhs )
rhs[ value_constraint_indices ] = value_constraint_values
return rhs
def solve_grid_linear( rows, cols, dx_constraints = None, dy_constraints = None, value_constraints = None, bilaplacian = False, iterative = False, cut_edges = None, w_lsq = None ):
'''
Given dimensions of a 'rows' by 'cols' 2D grid,
an optional sequence 'dx_constraints' of tuples ( i, j, dx ) specifying the value of grid[ i+1,j ] - grid[ i,j ],
an optional sequence 'dy_constraints' of tuples ( i, j, dy ) specifying the value of grid[ i,j+1 ] - grid[ i,j ],
an optional sequence 'value_constraints' of tuples ( i, j, value ) specifying the value of grid[ i,j ],
returns the solution to the laplace equation on the domain given by 'mesh' with derivatives
given by 'dx_constraints' and 'dy_constraints' and values given by 'value_constraints'.
If 'bilaplacian' is true, this will solve bilaplacian f = 0, and boundaries will be reflected.
If 'iterative' is true, an iterative solver will be used instead of a direct method.
Optional parameter 'cut_edges' specifies edges that should be disconnected
in the laplacian/bilaplacian system. The parameter must be in the format
suitable for passing to 'gen_symmetric_grid_laplacian().'
The returned solution is almost guaranteed to be discontinuous across the edges.
The convenience function 'cut_edges_from_mask()' can be used to create
the 'cut_edges' parameter from a mask.
To allow derivative discontinuity but still require connectedness across edges,
places those edges into 'dx_constraints' and 'dy_constraints' with
constraint value 0. The convenience function
'zero_dx_dy_constraints_from_cut_edges()' can be used for this.
Optional parameter 'w_lsq' determines the weight for the
least-squares constraints (dx/dy_constraints).
tested
(see also test_cut_edges())
'''
if dx_constraints is None: dx_constraints = []
if dy_constraints is None: dy_constraints = []
if value_constraints is None: value_constraints = []
## Smoothness term.
w_smooth = 1.
#w_smooth = 10.
## Constraints (normals and z0 = 0) term.
if w_lsq is None:
w_gradients = 1e5
## Reconstruct with more weight on the smoothing:
#w_gradients = 1e-1
else:
assert float( w_lsq ) == w_lsq
w_gradients = float( w_lsq )
print('solve_grid_linear( rows = %s, cols = %s, |constraints| = %s, iterative = %s, bilaplacian = %s, |cut_edges| = %s, w_lsq = %s )' % (
rows, cols,
len( dx_constraints ) + len( dy_constraints ) + len( value_constraints ),
iterative,
bilaplacian,
len( cut_edges ) if cut_edges is not None else None,
w_gradients
))
## The grid can get large; splitting the building of L and C into seperate functions
## means that the temporaries go away as soon as we build the matrices.
from tictoc import tic, toc
tic( 'build energy:' )
#L = gen_symmetric_grid_laplacian1( rows, cols, cut_edges )
#L = gen_symmetric_grid_laplacian2( rows, cols, cut_edges )
## NOTE: I would always call gen_grid_laplacian2_with_boundary_reflection(),
## but the matrices are too large to solve with LU decomposition,
## our unsymmetric solver. This isn't a problem with the bilaplacian,
## because we multiply the matrix by its transpose, which
## always results in a symmetric matrix.
#debugger()
## Bi-Laplacian instead of Laplacian.
if bilaplacian:
L = gen_grid_laplacian2_with_boundary_reflection( rows, cols, cut_edges )
L = L.T*L
else:
L = gen_symmetric_grid_laplacian( rows, cols, cut_edges )
toc()
tic( 'add constraints:' )
tic( 'dx dy constraints:' )
if len( dx_constraints ) + len( dy_constraints ) > 0:
C, Crhs = gen_constraint_system( rows, cols, dx_constraints, dy_constraints, [] if ( 0 < len(value_constraints) ) else [ (0,0,0.) ] )
#debugger()
## The system to solve:
## w_smooth * ( L * grid.ravel() ) + w_gradients * ( C.T * C * grid.ravel() ) = w_gradients * C.T * Crhs
system = ( w_smooth * L + ( w_gradients * C.T ) * C )
rhs = ( ( w_gradients * C.T ) * Crhs )
del C
del Crhs
else:
system = w_smooth * L
rhs = zeros( L.shape[0] )
del L
toc()
tic( 'value constraints (fast):' )
if len( value_constraints ) > 0:
#system_grads = system
system, rhs = system_and_rhs_with_value_constraints2( system, rhs, value_constraints, cols )
toc()
'''
tic( 'value constraints (slow):' )
s2, r2 = system_and_rhs_with_value_constraints1( system, rhs, value_constraints, cols )
toc()
## We pass these tests.
assert ( system - s2 ).nnz == 0
assert len( where( rhs - r2 )[0] ) == 0
'''
toc()
if iterative:
## Iterative solution.
import scipy.sparse
tic( 'convert matrix for solving' )
#system = scipy.sparse.coo_matrix( ( system_vij[0], system_vij[1:] ) ).tocsr()
#del system_vij
system = system.tocsr()
toc()
import scipy.sparse.linalg
tic( 'solve system of equations cg:' )
result, info = scipy.sparse.linalg.cg( system, rhs )
toc()
assert 0 == info
result = array( result ).ravel()
else:
import cvxopt, cvxopt.cholmod, cvxopt.umfpack
tic( 'convert matrix for solving:' )
system = system.tocoo()
## Q: Why does cvxopt compiled on Mac OS X 10.6 instead of 10.5 complains about invalid array types?
## A: Because it insists on having the row and col parameters with type 'int', not 'int32'.
#system = cvxopt.spmatrix( system.data, system.row, system.col )
#system_scipy = system
system = cvxopt.spmatrix( system.data, asarray( system.row, dtype = int ), asarray( system.col, dtype = int ) )
#system = cvxopt.spmatrix( system_vij[0], system_vij[1] , system_vij[2] )
#system = cvxopt.spmatrix( *zip( *system_vij ) )
#del system_vij
toc()
rhs = cvxopt.matrix( rhs )
tic( 'solve system of equations direct:' )
## Works
cvxopt.cholmod.linsolve( system, rhs )
## Eats all memory then dies:
#cvxopt.umfpack.linsolve( system, rhs )
toc()
result = array( rhs ).ravel()
## Eats all memory then dies (based on SuperLU):
#import scipy.sparse as sparse