Jialin Yang
Initial release on Huggingface Spaces with Gradio UI
352b049
import torch
import numpy as np
import os
from mesh.io import save_obj, to_mesh
from mesh.smpl2mesh import SMPL2Mesh
from skeleton import SkeletonAMASS, convert2humanml
from skeleton2smpl.skeleton2smpl import Skeleton2Obj
import json
def save_mesh(vertices, faces, npy_file):
def npy_path_to_obj_path(npy_path: str) -> str:
return os.path.join(os.path.dirname(npy_path) , f"{npy_path}_obj")
results_dir = npy_path_to_obj_path(npy_file)
os.makedirs(results_dir, exist_ok=True)
for frame_i in range(vertices.shape[-1]):
# first 30 frames save to ./hanyu_obj/obs_obj
if frame_i < 30:
os.makedirs(results_dir+"/obs_obj", exist_ok=True)
file_path = os.path.join(results_dir+"/obs_obj", f"frame{frame_i:03d}.obj")
mesh = to_mesh(vertices[..., frame_i], faces)
save_obj(mesh, file_path)
else:
os.makedirs(results_dir+"/pred_obj", exist_ok=True)
file_path = os.path.join(results_dir+"/pred_obj", f"frame{frame_i:03d}.obj")
mesh = to_mesh(vertices[..., frame_i], faces)
save_obj(mesh, file_path)
print(f"Saved obj files to [{results_dir}]")
def main():
num_smplify_iters = 20 # This is what requires most time. It can be decreased or increasd depending on the output quality we want (or how quick we canr each it)
device = "cuda"
# get observation smpl params
json_file_path = "./smpl_params.json"
with open(json_file_path, "r") as json_file:
loaded_data = json.load(json_file)
person_idx = 0
smpl_dict_last_obs = loaded_data[-1]
smpl_dict_last_obs = {k: torch.from_numpy(np.array(v)).float().to(device) for k,v in smpl_dict_last_obs.items()}
# get predictions
#pred_motions = torch.from_numpy(np.load("./predictions/joints3d.npy", allow_pickle=True)).to(device)
pred_motions = torch.from_numpy(np.load("src_joints2smpl_demo/joints2smpl/joints3d.npy", allow_pickle=True)).to(device)
# remove bacth dimension, add a zero hip joint
pred_motions = pred_motions.squeeze(0)
# pred_motions = torch.cat([torch.zeros(*pred_motions.shape[:2], 1, 3).to(device), pred_motions], dim=-2)
# select just some of the motions
# TO DO use the previous code with the limb length variance error to choose the sample
# Or pick the most diverse
# pred_motions = pred_motions[:1]
pred_motions = pred_motions.view(-1, 22, 3)
skeleton = SkeletonAMASS
pred_motions = convert2humanml(pred_motions, skeleton.LANDMARKS, skeleton.TO_HUMANML_NAMES)
print(pred_motions)
print(pred_motions.shape)
init_params = {}
init_params["betas"] = smpl_dict_last_obs["betas"][person_idx].unsqueeze(0).expand(pred_motions.shape[0], -1)
init_params["pose"] = smpl_dict_last_obs["body_pose"][person_idx].view(-1, 3)
init_params["pose"] = torch.stack([init_params["pose"][..., 0], init_params["pose"][..., 2], -init_params["pose"][..., 1]], dim=-1)
assert init_params["pose"].shape[0] == 24, "the body pose should have 24 joints, it is the output of NLF"
init_params["pose"] = init_params["pose"].unsqueeze(0).expand(pred_motions.shape[0], -1, -1).view(pred_motions.shape[0], -1).to(device)
init_params["cam"] = smpl_dict_last_obs["transl"][person_idx].unsqueeze(0).unsqueeze(-2).expand(pred_motions.shape[0], -1, -1).to(device)
skeleton2obj = Skeleton2Obj(
device=device, num_smplify_iters=num_smplify_iters,
smpl_model_dir="./models/body_models/", #path to smpl body models
gmm_model_dir="./models/joint2smpl_models/", #path to gmm model
)
# rot_motions, smpl_dict = skeleton2obj.convert_motion_2smpl(pred_motions, hmp=True, init_params=init_params, fix_betas=True)
thetas, rot_motions = skeleton2obj.convert_motion_2smpl(pred_motions, hmp=True, init_params=init_params, fix_betas=True)
smpl2mesh = SMPL2Mesh(device)
vertices, faces = smpl2mesh.convert_smpl_to_mesh(rot_motions, pred_motions)
pred_files = [('./hanyu')]
vertices = vertices.reshape(*vertices.shape[:2], len(pred_files), -1)
for v, npy_file in zip(np.moveaxis(vertices, 2, 0), pred_files):
save_mesh(v, faces, npy_file)
def process_motion(smpl_params_path, pred_motions_path, device):
num_smplify_iters = 100 # This is what requires most time. It can be decreased or increasd depending on the output quality we want (or how quick we canr each it)
device = "cuda"
# get observation smpl params
json_file_path = smpl_params_path
with open(json_file_path, "r") as json_file:
loaded_data = json.load(json_file)
person_idx = 0
smpl_dict_last_obs = loaded_data[-1]
smpl_dict_last_obs = {k: torch.from_numpy(np.array(v)).float().to(device) for k,v in smpl_dict_last_obs.items()}
# get predictions
pred_motions = torch.from_numpy(np.load(pred_motions_path, allow_pickle=True)).to(device)
# remove bacth dimension, add a zero hip joint
pred_motions = pred_motions.squeeze(0)
# pred_motions = torch.cat([torch.zeros(*pred_motions.shape[:2], 1, 3).to(device), pred_motions], dim=-2)
# select just some of the motions
# TO DO use the previous code with the limb length variance error to choose the sample
# Or pick the most diverse
pred_motions = pred_motions[:1]
pred_motions = pred_motions.view(-1, 22, 3)
skeleton = SkeletonAMASS
pred_motions = convert2humanml(pred_motions, skeleton.LANDMARKS, skeleton.TO_HUMANML_NAMES)
# pred_motions = torch.cat([get_humanml_motion(npy_file, skeleton=skeleton, remove_global_translation=True) for npy_file in pred_files], dim=0)
print(pred_motions)
print(pred_motions.shape)
pred_files = ['pred_closest_GT.npy']
pred_motions = torch.from_numpy(np.load(pred_files[0], allow_pickle=True)).to(device)
init_params = {}
init_params["betas"] = smpl_dict_last_obs["betas"][person_idx].unsqueeze(0).expand(pred_motions.shape[0], -1)
init_params["pose"] = smpl_dict_last_obs["body_pose"][person_idx].view(-1, 3)
init_params["pose"] = torch.stack([init_params["pose"][..., 0], init_params["pose"][..., 2], -init_params["pose"][..., 1]], dim=-1)
assert init_params["pose"].shape[0] == 24, "the body pose should have 24 joints, it is the output of NLF"
init_params["pose"] = init_params["pose"].unsqueeze(0).expand(pred_motions.shape[0], -1, -1).view(pred_motions.shape[0], -1).to(device)
init_params["cam"] = smpl_dict_last_obs["transl"][person_idx].unsqueeze(0).unsqueeze(-2).expand(pred_motions.shape[0], -1, -1).to(device)
# Create a new context for optimization
loaded_data = np.load("obs_data.npz", allow_pickle=True)
rot_motions_obs = loaded_data["rot_motions_obs"]
smpl_dict_obs = loaded_data['smpl_dict_obs'].item()
smpl_dict_obs = {k: torch.from_numpy(v).to(device) for k,v in smpl_dict_obs.items()}
init_params = {}
init_params["betas"] = smpl_dict_obs["betas"][-1].unsqueeze(0).expand(pred_motions.shape[0], -1).to(device)
init_params["pose"] = smpl_dict_obs["pose"][-1].unsqueeze(0).expand(pred_motions.shape[0], -1, -1).view(pred_motions.shape[0], -1).to(device)
init_params["cam"] = smpl_dict_obs["cam"][-1].unsqueeze(0).expand(pred_motions.shape[0], -1, -1).to(device)
with torch.set_grad_enabled(True):
skeleton2obj = Skeleton2Obj(
device=device, num_smplify_iters=num_smplify_iters,
smpl_model_dir="./models/body_models/", #path to smpl body models
gmm_model_dir="./models/joint2smpl_models/", #path to gmm model
)
# rot_motions, smpl_dict = skeleton2obj.convert_motion_2smpl(pred_motions, hmp=True, init_params=init_params, fix_betas=True)
thetas, rot_motions = skeleton2obj.convert_motion_2smpl(pred_motions, hmp=True, init_params=init_params, fix_betas=True)
smpl2mesh = SMPL2Mesh(device)
vertices, faces = smpl2mesh.convert_smpl_to_mesh(rot_motions, pred_motions)
pred_files = [('./hanyu')]
vertices = vertices.reshape(*vertices.shape[:2], len(pred_files), -1)
for v, npy_file in zip(np.moveaxis(vertices, 2, 0), pred_files):
save_mesh(v, faces, npy_file)
if __name__ == "__main__":
process_motion("./smpl_params.json", "./predictions/joints3d.npy", "cuda")
# process_motion("./smpl_params.json", "./pred_closest_GT_joints3d.npy", "cuda")