-
Notifications
You must be signed in to change notification settings - Fork 1
/
Copy pathexample.py
89 lines (74 loc) · 3.46 KB
/
example.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
r"""
Test the system with an example IMU measurement sequence.
"""
import torch
from net import PoseNet
from config import paths, joint_set
from utils import normalize_and_concat
import os
import articulate as art
sample_idx = 2
def Our():
isMatrix = False
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
net = PoseNet(isMatrix=isMatrix, device=device).to(device)
net.load_state_dict(torch.load("weights.tar"))
net.eval()
data = torch.load(os.path.join(paths.dipimu_dir, 'test.pt'))
acc = data['acc'][sample_idx]
ori = data['ori'][sample_idx]
# print(acc[0].shape, ori[0].shape)
# acc = torch.load(os.path.join(paths.example_dir, 'acc.pt'))
# ori = torch.load(os.path.join(paths.example_dir, 'ori.pt'))
# print(acc.shape, ori.shape)
x = normalize_and_concat(acc, ori,isMatrix=isMatrix).to(device)
x = x.unsqueeze(1)
pose, tran, contact_probability = net.forward_offline(x) # offline
# pose, tran = [torch.stack(_) for _ in zip(*[net.forward_online(f) for f in x])] # online
# tran = torch.zeros((len(pose), 3)).to(device)
art.ParametricModel(paths.male_smpl_file, device=device).view_motion([pose], [tran], contact=contact_probability)
# clone Transpose git
# def tranPose():
# # Transose
# isMatrix = True
# device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
# net = TransPoseNet().to(device)
# checkpoint = torch.load("data/weights.pt")
# net.load_state_dict(checkpoint)
# net.eval()
# data = torch.load(os.path.join(paths.dipimu_dir, 'test.pt'))
# acc = data['acc'][sample_idx]
# ori = data['ori'][sample_idx]
# x = normalize_and_concat(acc, ori,isMatrix=isMatrix).to(device)
# x = x.unsqueeze(1)
# # pose, tran = net.forward_offline(x) # offline
# # pose = pose.cuda()
# pose, tran = [torch.stack(_) for _ in zip(*[net.forward_online(f) for f in x])] # online
# tran = torch.zeros((len(pose), 3)).to(device)
# art.ParametricModel(paths.male_smpl_file, device=device).view_motion([pose], [tran])
def DIP():
# DIP
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
data = torch.load(os.path.join(paths.dipimu_dir, 'test.pt'))
pose = data['pose'][sample_idx]
root_rotation = data['ori'][sample_idx][:, -1]
pose = art.math.axis_angle_to_rotation_matrix(pose).view(-1, 24, 3, 3).to(device)
m = art.ParametricModel(paths.male_smpl_file, device=device)
global_to_local_pose = m.inverse_kinematics_R
local_to_global_pose = m.forward_kinematics_R
global_pose = local_to_global_pose(pose)
glb_reduced_pose = global_pose[:, joint_set.reduced]
def _reduced_glb_6d_to_full_local_mat(root_rotation, glb_reduced_pose):
global_full_pose = torch.eye(3, device=glb_reduced_pose.device).repeat(glb_reduced_pose.shape[0], 24, 1, 1)
global_full_pose[:,joint_set.reduced] = glb_reduced_pose
pose = global_to_local_pose(global_full_pose).view(-1, 24, 3, 3)
pose[:, joint_set.ignored] = torch.eye(3, device=pose.device)
pose[:, 0] = root_rotation.view(-1, 3, 3)
return pose
pose = _reduced_glb_6d_to_full_local_mat(root_rotation, glb_reduced_pose)
print(pose.shape)
tran = torch.zeros((len(pose), 3)).to(device)
# pose, tran = [torch.stack(_) for _ in zip(*[net.forward_online(f) for f in x])] # online
art.ParametricModel(paths.male_smpl_file, device=device).view_motion([pose], [tran])
# tranPose()
Our()