-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathtrain_autoencoder.py
51 lines (33 loc) · 1.37 KB
/
train_autoencoder.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
from shapemaker import *
from pytorch3d.loss import (
chamfer_distance,
mesh_edge_loss,
mesh_laplacian_smoothing,
mesh_normal_consistency,
)
NUM_TRAINING_SESSIONS = 5000
num_points = 1000
Batch_size = 40
autoencoder = PCAutoEncoder64(3, num_points)
autoencoder.to(device)
dataset = np.load(open("dataset1k.npy", "rb"))
optimizer = optim.Adam(autoencoder.parameters(), lr=0.001 )
scheduler = ReduceLROnPlateau(optimizer, 'min', patience=40, verbose=False)
for epoch in range(NUM_TRAINING_SESSIONS+1):
indices = np.random.choice(len(dataset), Batch_size, False)
pointcloud = dataset[indices]#[:,:num_points]
points = Variable( Tensor(pointcloud) , requires_grad=False).to(device)
autoencoder.zero_grad()
inputs = torch.transpose(points, 1, 2)
reconstructed_points, global_feat = autoencoder(inputs)
dist = chamfer_distance(points, torch.transpose(reconstructed_points, 1, 2))
# dist = torch.mean(torch.abs(points - torch.transpose(reconstructed_points, 1, 2)),2)
train_loss = dist[0]
# Calculate the gradients using Back Propogation
train_loss.backward()
# Update the weights and biases
optimizer.step()
report_progress(epoch, NUM_TRAINING_SESSIONS , train_loss.detach().cpu().numpy() )
scheduler.step(train_loss)
torch.save(autoencoder.state_dict(), 'autoencoder64.pth')
print("Finished")