Support TEXTure
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- .gitignore +2 -1
- README.md +1 -1
- apps/IFGeo.py +4 -3
- apps/Normal.py +5 -4
- apps/avatarizer.py +172 -19
- apps/benchmark.py +18 -13
- apps/infer.py +64 -73
- apps/multi_render.py +4 -2
- configs/econ.yaml +1 -1
- docs/tricks.md +1 -1
- lib/common/BNI.py +8 -5
- lib/common/BNI_utils.py +42 -38
- lib/common/blender_utils.py +0 -383
- lib/common/cloth_extraction.py +9 -10
- lib/common/config.py +2 -1
- lib/common/imutils.py +24 -25
- lib/common/libmesh/inside_mesh.py +3 -4
- lib/common/libmesh/setup.py +1 -1
- lib/common/libmesh/triangle_hash.cpp +103 -103
- lib/common/libmesh/triangle_hash.pyx +4 -2
- lib/common/libvoxelize/voxelize.c +57 -57
- lib/common/libvoxelize/voxelize.pyx +2 -1
- lib/common/local_affine.py +9 -10
- lib/common/render.py +56 -63
- lib/common/render_utils.py +22 -7
- lib/common/seg3d_lossless.py +11 -21
- lib/common/seg3d_utils.py +7 -10
- lib/common/train_util.py +15 -41
- lib/common/voxelize.py +60 -74
- lib/dataset/EvalDataset.py +48 -54
- lib/dataset/Evaluator.py +13 -14
- lib/dataset/NormalDataset.py +22 -31
- lib/dataset/NormalModule.py +3 -3
- lib/dataset/PointFeat.py +2 -1
- lib/dataset/TestDataset.py +19 -23
- lib/dataset/body_model.py +33 -40
- lib/dataset/mesh_util.py +105 -75
- lib/net/BasePIFuNet.py +1 -1
- lib/net/Discriminator.py +7 -2
- lib/net/FBNet.py +22 -29
- lib/net/GANLoss.py +2 -1
- lib/net/IFGeoNet.py +7 -7
- lib/net/IFGeoNet_nobody.py +7 -7
- lib/net/NormalNet.py +10 -10
- lib/net/geometry.py +29 -37
- lib/net/net_util.py +3 -2
- lib/net/voxelize.py +3 -3
- lib/pixielib/models/FLAME.py +3 -2
- lib/pixielib/models/SMPLX.py +453 -468
- lib/pixielib/models/encoders.py +1 -1
.gitignore
CHANGED
|
@@ -16,4 +16,5 @@ build
|
|
| 16 |
dist
|
| 17 |
*egg-info
|
| 18 |
*.so
|
| 19 |
-
run.sh
|
|
|
|
|
|
| 16 |
dist
|
| 17 |
*egg-info
|
| 18 |
*.so
|
| 19 |
+
run.sh
|
| 20 |
+
*.log
|
README.md
CHANGED
|
@@ -25,6 +25,7 @@
|
|
| 25 |
<a href="https://pytorchlightning.ai/"><img alt="Lightning" src="https://img.shields.io/badge/-Lightning-792ee5?logo=pytorchlightning&logoColor=white"></a>
|
| 26 |
<a href="https://cupy.dev/"><img alt="cupy" src="https://img.shields.io/badge/-Cupy-46C02B?logo=numpy&logoColor=white"></a>
|
| 27 |
<a href="https://twitter.com/yuliangxiu"><img alt='Twitter' src="https://img.shields.io/twitter/follow/yuliangxiu?label=%40yuliangxiu"></a>
|
|
|
|
| 28 |
<br></br>
|
| 29 |
<a href='https://colab.research.google.com/drive/1YRgwoRCZIrSB2e7auEWFyG10Xzjbrbno?usp=sharing'><img src='https://colab.research.google.com/assets/colab-badge.svg' alt='Google Colab'></a>
|
| 30 |
<a href='https://github.com/YuliangXiu/ECON/blob/master/docs/installation-docker.md'><img src='https://img.shields.io/badge/Docker-9cf.svg?logo=Docker' alt='Docker'></a>
|
|
@@ -35,7 +36,6 @@
|
|
| 35 |
</a>
|
| 36 |
<a href='https://xiuyuliang.cn/econ/'>
|
| 37 |
<img src='https://img.shields.io/badge/ECON-Page-orange?style=for-the-badge&logo=Google%20chrome&logoColor=white&labelColor=D35400' alt='Project Page'></a>
|
| 38 |
-
<a href="https://discord.gg/Vqa7KBGRyk"><img src="https://img.shields.io/discord/940240966844035082?color=7289DA&labelColor=4a64bd&logo=discord&logoColor=white&style=for-the-badge"></a>
|
| 39 |
<a href="https://youtu.be/j5hw4tsWpoY"><img alt="youtube views" title="Subscribe to my YouTube channel" src="https://img.shields.io/youtube/views/j5hw4tsWpoY?logo=youtube&labelColor=ce4630&style=for-the-badge"/></a>
|
| 40 |
</p>
|
| 41 |
</p>
|
|
|
|
| 25 |
<a href="https://pytorchlightning.ai/"><img alt="Lightning" src="https://img.shields.io/badge/-Lightning-792ee5?logo=pytorchlightning&logoColor=white"></a>
|
| 26 |
<a href="https://cupy.dev/"><img alt="cupy" src="https://img.shields.io/badge/-Cupy-46C02B?logo=numpy&logoColor=white"></a>
|
| 27 |
<a href="https://twitter.com/yuliangxiu"><img alt='Twitter' src="https://img.shields.io/twitter/follow/yuliangxiu?label=%40yuliangxiu"></a>
|
| 28 |
+
<a href="https://discord.gg/Vqa7KBGRyk"><img alt="discord invitation link" src="https://dcbadge.vercel.app/api/server/Vqa7KBGRyk?style=flat"></a>
|
| 29 |
<br></br>
|
| 30 |
<a href='https://colab.research.google.com/drive/1YRgwoRCZIrSB2e7auEWFyG10Xzjbrbno?usp=sharing'><img src='https://colab.research.google.com/assets/colab-badge.svg' alt='Google Colab'></a>
|
| 31 |
<a href='https://github.com/YuliangXiu/ECON/blob/master/docs/installation-docker.md'><img src='https://img.shields.io/badge/Docker-9cf.svg?logo=Docker' alt='Docker'></a>
|
|
|
|
| 36 |
</a>
|
| 37 |
<a href='https://xiuyuliang.cn/econ/'>
|
| 38 |
<img src='https://img.shields.io/badge/ECON-Page-orange?style=for-the-badge&logo=Google%20chrome&logoColor=white&labelColor=D35400' alt='Project Page'></a>
|
|
|
|
| 39 |
<a href="https://youtu.be/j5hw4tsWpoY"><img alt="youtube views" title="Subscribe to my YouTube channel" src="https://img.shields.io/youtube/views/j5hw4tsWpoY?logo=youtube&labelColor=ce4630&style=for-the-badge"/></a>
|
| 40 |
</p>
|
| 41 |
</p>
|
apps/IFGeo.py
CHANGED
|
@@ -14,11 +14,12 @@
|
|
| 14 |
#
|
| 15 |
# Contact: ps-license@tuebingen.mpg.de
|
| 16 |
|
| 17 |
-
from lib.common.seg3d_lossless import Seg3dLossless
|
| 18 |
-
from lib.common.train_util import *
|
| 19 |
-
import torch
|
| 20 |
import numpy as np
|
| 21 |
import pytorch_lightning as pl
|
|
|
|
|
|
|
|
|
|
|
|
|
| 22 |
|
| 23 |
torch.backends.cudnn.benchmark = True
|
| 24 |
|
|
|
|
| 14 |
#
|
| 15 |
# Contact: ps-license@tuebingen.mpg.de
|
| 16 |
|
|
|
|
|
|
|
|
|
|
| 17 |
import numpy as np
|
| 18 |
import pytorch_lightning as pl
|
| 19 |
+
import torch
|
| 20 |
+
|
| 21 |
+
from lib.common.seg3d_lossless import Seg3dLossless
|
| 22 |
+
from lib.common.train_util import *
|
| 23 |
|
| 24 |
torch.backends.cudnn.benchmark = True
|
| 25 |
|
apps/Normal.py
CHANGED
|
@@ -1,9 +1,10 @@
|
|
| 1 |
-
from lib.net import NormalNet
|
| 2 |
-
from lib.common.train_util import batch_mean
|
| 3 |
-
import torch
|
| 4 |
import numpy as np
|
| 5 |
-
from skimage.transform import resize
|
| 6 |
import pytorch_lightning as pl
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 7 |
|
| 8 |
|
| 9 |
class Normal(pl.LightningModule):
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
import numpy as np
|
|
|
|
| 2 |
import pytorch_lightning as pl
|
| 3 |
+
import torch
|
| 4 |
+
from skimage.transform import resize
|
| 5 |
+
|
| 6 |
+
from lib.common.train_util import batch_mean
|
| 7 |
+
from lib.net import NormalNet
|
| 8 |
|
| 9 |
|
| 10 |
class Normal(pl.LightningModule):
|
apps/avatarizer.py
CHANGED
|
@@ -1,17 +1,25 @@
|
|
| 1 |
-
import numpy as np
|
| 2 |
-
import trimesh
|
| 3 |
-
import torch
|
| 4 |
import argparse
|
|
|
|
| 5 |
import os.path as osp
|
| 6 |
-
|
|
|
|
|
|
|
|
|
|
| 7 |
from pytorch3d.ops import SubdivideMeshes
|
| 8 |
from pytorch3d.structures import Meshes
|
| 9 |
-
|
| 10 |
-
from lib.smplx.lbs import general_lbs
|
| 11 |
-
from lib.dataset.mesh_util import keep_largest, poisson
|
| 12 |
from scipy.spatial import cKDTree
|
| 13 |
-
|
|
|
|
| 14 |
from lib.common.local_affine import register
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 15 |
|
| 16 |
# loading cfg file
|
| 17 |
parser = argparse.ArgumentParser()
|
|
@@ -22,12 +30,18 @@ args = parser.parse_args()
|
|
| 22 |
smplx_container = SMPLX()
|
| 23 |
device = torch.device(f"cuda:{args.gpu}")
|
| 24 |
|
|
|
|
| 25 |
prefix = f"./results/econ/obj/{args.name}"
|
| 26 |
smpl_path = f"{prefix}_smpl_00.npy"
|
| 27 |
-
econ_path = f"{prefix}_0_full.obj"
|
| 28 |
-
|
| 29 |
smplx_param = np.load(smpl_path, allow_pickle=True).item()
|
|
|
|
|
|
|
|
|
|
| 30 |
econ_obj = trimesh.load(econ_path)
|
|
|
|
|
|
|
|
|
|
|
|
|
| 31 |
econ_obj.vertices *= np.array([1.0, -1.0, -1.0])
|
| 32 |
econ_obj.vertices /= smplx_param["scale"].cpu().numpy()
|
| 33 |
econ_obj.vertices -= smplx_param["transl"].cpu().numpy()
|
|
@@ -49,6 +63,7 @@ smpl_model = smplx.create(
|
|
| 49 |
|
| 50 |
smpl_out_lst = []
|
| 51 |
|
|
|
|
| 52 |
for pose_type in ["t-pose", "da-pose", "pose"]:
|
| 53 |
smpl_out_lst.append(
|
| 54 |
smpl_model(
|
|
@@ -67,6 +82,12 @@ for pose_type in ["t-pose", "da-pose", "pose"]:
|
|
| 67 |
)
|
| 68 |
)
|
| 69 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 70 |
smpl_verts = smpl_out_lst[2].vertices.detach()[0]
|
| 71 |
smpl_tree = cKDTree(smpl_verts.cpu().numpy())
|
| 72 |
dist, idx = smpl_tree.query(econ_obj.vertices, k=5)
|
|
@@ -143,14 +164,25 @@ if not osp.exists(f"{prefix}_econ_da.obj") or not osp.exists(f"{prefix}_smpl_da.
|
|
| 143 |
smpl_da_body.remove_unreferenced_vertices()
|
| 144 |
|
| 145 |
smpl_hand = smpl_da.copy()
|
| 146 |
-
smpl_hand.update_faces(
|
|
|
|
|
|
|
| 147 |
smpl_hand.remove_unreferenced_vertices()
|
| 148 |
econ_da = sum([smpl_hand, smpl_da_body, econ_da_body])
|
| 149 |
-
econ_da = poisson(econ_da, f"{prefix}_econ_da.obj", depth=10,
|
|
|
|
| 150 |
else:
|
| 151 |
econ_da = trimesh.load(f"{prefix}_econ_da.obj")
|
| 152 |
smpl_da = trimesh.load(f"{prefix}_smpl_da.obj", maintain_orders=True, process=False)
|
| 153 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 154 |
smpl_tree = cKDTree(smpl_da.vertices)
|
| 155 |
dist, idx = smpl_tree.query(econ_da.vertices, k=5)
|
| 156 |
knn_weights = np.exp(-dist**2)
|
|
@@ -167,19 +199,137 @@ econ_posedirs = (
|
|
| 167 |
econ_J_regressor /= econ_J_regressor.sum(dim=1, keepdims=True).clip(min=1e-10)
|
| 168 |
econ_lbs_weights /= econ_lbs_weights.sum(dim=1, keepdims=True)
|
| 169 |
|
| 170 |
-
# re-compute da-pose rot_mat for ECON
|
| 171 |
rot_mat_da = smpl_out_lst[1].vertex_transformation.detach()[0][idx[:, 0]]
|
| 172 |
econ_da_verts = torch.tensor(econ_da.vertices).float()
|
| 173 |
-
econ_cano_verts = torch.inverse(rot_mat_da) @ torch.cat(
|
| 174 |
-
|
| 175 |
-
|
|
|
|
| 176 |
econ_cano_verts = econ_cano_verts[:, :3, 0].double()
|
| 177 |
|
| 178 |
# ----------------------------------------------------
|
| 179 |
-
# use
|
| 180 |
# ----------------------------------------------------
|
| 181 |
|
| 182 |
new_pose = smpl_out_lst[2].full_pose
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 183 |
new_pose[:, :3] = 0.
|
| 184 |
|
| 185 |
posed_econ_verts, _ = general_lbs(
|
|
@@ -191,5 +341,8 @@ posed_econ_verts, _ = general_lbs(
|
|
| 191 |
lbs_weights=econ_lbs_weights
|
| 192 |
)
|
| 193 |
|
| 194 |
-
|
| 195 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
import argparse
|
| 2 |
+
import os
|
| 3 |
import os.path as osp
|
| 4 |
+
|
| 5 |
+
import numpy as np
|
| 6 |
+
import torch
|
| 7 |
+
import trimesh
|
| 8 |
from pytorch3d.ops import SubdivideMeshes
|
| 9 |
from pytorch3d.structures import Meshes
|
|
|
|
|
|
|
|
|
|
| 10 |
from scipy.spatial import cKDTree
|
| 11 |
+
|
| 12 |
+
import lib.smplx as smplx
|
| 13 |
from lib.common.local_affine import register
|
| 14 |
+
from lib.dataset.mesh_util import (
|
| 15 |
+
SMPLX,
|
| 16 |
+
export_obj,
|
| 17 |
+
keep_largest,
|
| 18 |
+
o3d_ransac,
|
| 19 |
+
poisson,
|
| 20 |
+
remesh_laplacian,
|
| 21 |
+
)
|
| 22 |
+
from lib.smplx.lbs import general_lbs
|
| 23 |
|
| 24 |
# loading cfg file
|
| 25 |
parser = argparse.ArgumentParser()
|
|
|
|
| 30 |
smplx_container = SMPLX()
|
| 31 |
device = torch.device(f"cuda:{args.gpu}")
|
| 32 |
|
| 33 |
+
# loading SMPL-X and econ objs inferred with ECON
|
| 34 |
prefix = f"./results/econ/obj/{args.name}"
|
| 35 |
smpl_path = f"{prefix}_smpl_00.npy"
|
|
|
|
|
|
|
| 36 |
smplx_param = np.load(smpl_path, allow_pickle=True).item()
|
| 37 |
+
|
| 38 |
+
# export econ obj with pre-computed normals
|
| 39 |
+
econ_path = f"{prefix}_0_full.obj"
|
| 40 |
econ_obj = trimesh.load(econ_path)
|
| 41 |
+
assert (econ_obj.vertex_normals.shape[1] == 3)
|
| 42 |
+
econ_obj.export(f"{prefix}_econ_raw.ply")
|
| 43 |
+
|
| 44 |
+
# align econ with SMPL-X
|
| 45 |
econ_obj.vertices *= np.array([1.0, -1.0, -1.0])
|
| 46 |
econ_obj.vertices /= smplx_param["scale"].cpu().numpy()
|
| 47 |
econ_obj.vertices -= smplx_param["transl"].cpu().numpy()
|
|
|
|
| 63 |
|
| 64 |
smpl_out_lst = []
|
| 65 |
|
| 66 |
+
# obtain the pose params of T-pose, DA-pose, and the original pose
|
| 67 |
for pose_type in ["t-pose", "da-pose", "pose"]:
|
| 68 |
smpl_out_lst.append(
|
| 69 |
smpl_model(
|
|
|
|
| 82 |
)
|
| 83 |
)
|
| 84 |
|
| 85 |
+
# -------------------------- align econ and SMPL-X in DA-pose space ------------------------- #
|
| 86 |
+
# 1. find the vertex-correspondence between SMPL-X and econ
|
| 87 |
+
# 2. ECON + SMPL-X: posed space --> T-pose space --> DA-pose space
|
| 88 |
+
# 3. ECON (w/o hands & over-streched faces) + SMPL-X (w/ hands & registered inpainting parts)
|
| 89 |
+
# ------------------------------------------------------------------------------------------- #
|
| 90 |
+
|
| 91 |
smpl_verts = smpl_out_lst[2].vertices.detach()[0]
|
| 92 |
smpl_tree = cKDTree(smpl_verts.cpu().numpy())
|
| 93 |
dist, idx = smpl_tree.query(econ_obj.vertices, k=5)
|
|
|
|
| 164 |
smpl_da_body.remove_unreferenced_vertices()
|
| 165 |
|
| 166 |
smpl_hand = smpl_da.copy()
|
| 167 |
+
smpl_hand.update_faces(
|
| 168 |
+
smplx_container.smplx_mano_vertex_mask.numpy()[smpl_hand.faces].all(axis=1)
|
| 169 |
+
)
|
| 170 |
smpl_hand.remove_unreferenced_vertices()
|
| 171 |
econ_da = sum([smpl_hand, smpl_da_body, econ_da_body])
|
| 172 |
+
econ_da = poisson(econ_da, f"{prefix}_econ_da.obj", depth=10, face_count=50000)
|
| 173 |
+
econ_da = remesh_laplacian(econ_da, f"{prefix}_econ_da.obj")
|
| 174 |
else:
|
| 175 |
econ_da = trimesh.load(f"{prefix}_econ_da.obj")
|
| 176 |
smpl_da = trimesh.load(f"{prefix}_smpl_da.obj", maintain_orders=True, process=False)
|
| 177 |
|
| 178 |
+
# ---------------------- SMPL-X compatible ECON ---------------------- #
|
| 179 |
+
# 1. Find the new vertex-correspondence between NEW ECON and SMPL-X
|
| 180 |
+
# 2. Build the new J_regressor, lbs_weights, posedirs
|
| 181 |
+
# 3. canonicalize the NEW ECON
|
| 182 |
+
# ------------------------------------------------------------------- #
|
| 183 |
+
|
| 184 |
+
print("Start building the SMPL-X compatible ECON model...")
|
| 185 |
+
|
| 186 |
smpl_tree = cKDTree(smpl_da.vertices)
|
| 187 |
dist, idx = smpl_tree.query(econ_da.vertices, k=5)
|
| 188 |
knn_weights = np.exp(-dist**2)
|
|
|
|
| 199 |
econ_J_regressor /= econ_J_regressor.sum(dim=1, keepdims=True).clip(min=1e-10)
|
| 200 |
econ_lbs_weights /= econ_lbs_weights.sum(dim=1, keepdims=True)
|
| 201 |
|
|
|
|
| 202 |
rot_mat_da = smpl_out_lst[1].vertex_transformation.detach()[0][idx[:, 0]]
|
| 203 |
econ_da_verts = torch.tensor(econ_da.vertices).float()
|
| 204 |
+
econ_cano_verts = torch.inverse(rot_mat_da) @ torch.cat([
|
| 205 |
+
econ_da_verts, torch.ones_like(econ_da_verts)[..., :1]
|
| 206 |
+
],
|
| 207 |
+
dim=1).unsqueeze(-1)
|
| 208 |
econ_cano_verts = econ_cano_verts[:, :3, 0].double()
|
| 209 |
|
| 210 |
# ----------------------------------------------------
|
| 211 |
+
# use original pose to animate ECON reconstruction
|
| 212 |
# ----------------------------------------------------
|
| 213 |
|
| 214 |
new_pose = smpl_out_lst[2].full_pose
|
| 215 |
+
# new_pose[:, :3] = 0.
|
| 216 |
+
|
| 217 |
+
posed_econ_verts, _ = general_lbs(
|
| 218 |
+
pose=new_pose,
|
| 219 |
+
v_template=econ_cano_verts.unsqueeze(0),
|
| 220 |
+
posedirs=econ_posedirs,
|
| 221 |
+
J_regressor=econ_J_regressor,
|
| 222 |
+
parents=smpl_model.parents,
|
| 223 |
+
lbs_weights=econ_lbs_weights
|
| 224 |
+
)
|
| 225 |
+
|
| 226 |
+
aligned_econ_verts = posed_econ_verts[0].detach().cpu().numpy()
|
| 227 |
+
aligned_econ_verts += smplx_param["transl"].cpu().numpy()
|
| 228 |
+
aligned_econ_verts *= smplx_param["scale"].cpu().numpy() * np.array([1.0, -1.0, -1.0])
|
| 229 |
+
econ_pose = trimesh.Trimesh(aligned_econ_verts, econ_da.faces)
|
| 230 |
+
assert (econ_pose.vertex_normals.shape[1] == 3)
|
| 231 |
+
econ_pose.export(f"{prefix}_econ_pose.ply")
|
| 232 |
+
|
| 233 |
+
# -------------------------------------------------------------------------
|
| 234 |
+
# Align posed ECON with original ECON, for pixel-aligned texture extraction
|
| 235 |
+
# -------------------------------------------------------------------------
|
| 236 |
+
|
| 237 |
+
print("Start ICP registration between posed & original ECON...")
|
| 238 |
+
import open3d as o3d
|
| 239 |
+
|
| 240 |
+
source = o3d.io.read_point_cloud(f"{prefix}_econ_pose.ply")
|
| 241 |
+
target = o3d.io.read_point_cloud(f"{prefix}_econ_raw.ply")
|
| 242 |
+
trans_init = o3d_ransac(source, target)
|
| 243 |
+
icp_criteria = o3d.pipelines.registration.ICPConvergenceCriteria(
|
| 244 |
+
relative_fitness=0.000001, relative_rmse=0.000001, max_iteration=100
|
| 245 |
+
)
|
| 246 |
+
|
| 247 |
+
reg_p2l = o3d.pipelines.registration.registration_icp(
|
| 248 |
+
source,
|
| 249 |
+
target,
|
| 250 |
+
0.1,
|
| 251 |
+
trans_init,
|
| 252 |
+
o3d.pipelines.registration.TransformationEstimationPointToPlane(),
|
| 253 |
+
criteria=icp_criteria
|
| 254 |
+
)
|
| 255 |
+
econ_pose.apply_transform(reg_p2l.transformation)
|
| 256 |
+
|
| 257 |
+
cache_path = f"{prefix.replace('obj','cache')}"
|
| 258 |
+
os.makedirs(cache_path, exist_ok=True)
|
| 259 |
+
|
| 260 |
+
# -----------------------------------------------------------------
|
| 261 |
+
# create UV texture (.obj .mtl .png) from posed ECON reconstruction
|
| 262 |
+
# -----------------------------------------------------------------
|
| 263 |
+
|
| 264 |
+
print("Start Color mapping...")
|
| 265 |
+
from PIL import Image
|
| 266 |
+
from torchvision import transforms
|
| 267 |
+
|
| 268 |
+
from lib.common.render import query_color
|
| 269 |
+
from lib.common.render_utils import Pytorch3dRasterizer
|
| 270 |
+
|
| 271 |
+
if not osp.exists(f"{prefix}_econ_icp_rgb.ply"):
|
| 272 |
+
masked_image = f"./results/econ/png/{args.name}_cloth.png"
|
| 273 |
+
tensor_image = transforms.ToTensor()(Image.open(masked_image))[:, :, :512]
|
| 274 |
+
final_colors = query_color(
|
| 275 |
+
torch.tensor(econ_pose.vertices).float(),
|
| 276 |
+
torch.tensor(econ_pose.faces).long(),
|
| 277 |
+
((tensor_image - 0.5) * 2.0).unsqueeze(0).to(device),
|
| 278 |
+
device=device,
|
| 279 |
+
paint_normal=False,
|
| 280 |
+
)
|
| 281 |
+
final_colors[final_colors == tensor_image[:, 0, 0] * 255.0] = 0.0
|
| 282 |
+
final_colors = final_colors.detach().cpu().numpy()
|
| 283 |
+
econ_pose.visual.vertex_colors = final_colors
|
| 284 |
+
econ_pose.export(f"{prefix}_econ_icp_rgb.ply")
|
| 285 |
+
else:
|
| 286 |
+
mesh = trimesh.load(f"{prefix}_econ_icp_rgb.ply")
|
| 287 |
+
final_colors = mesh.visual.vertex_colors[:, :3]
|
| 288 |
+
|
| 289 |
+
print("Start UV texture generation...")
|
| 290 |
+
|
| 291 |
+
# Generate UV coords
|
| 292 |
+
v_np = econ_pose.vertices
|
| 293 |
+
f_np = econ_pose.faces
|
| 294 |
+
|
| 295 |
+
vt_cache = osp.join(cache_path, "vt.pt")
|
| 296 |
+
ft_cache = osp.join(cache_path, "ft.pt")
|
| 297 |
+
|
| 298 |
+
if osp.exists(vt_cache) and osp.exists(ft_cache):
|
| 299 |
+
vt = torch.load(vt_cache).to(device)
|
| 300 |
+
ft = torch.load(ft_cache).to(device)
|
| 301 |
+
else:
|
| 302 |
+
import xatlas
|
| 303 |
+
atlas = xatlas.Atlas()
|
| 304 |
+
atlas.add_mesh(v_np, f_np)
|
| 305 |
+
chart_options = xatlas.ChartOptions()
|
| 306 |
+
chart_options.max_iterations = 4
|
| 307 |
+
atlas.generate(chart_options=chart_options)
|
| 308 |
+
vmapping, ft_np, vt_np = atlas[0]
|
| 309 |
+
|
| 310 |
+
vt = torch.from_numpy(vt_np.astype(np.float32)).float().to(device)
|
| 311 |
+
ft = torch.from_numpy(ft_np.astype(np.int64)).int().to(device)
|
| 312 |
+
torch.save(vt.cpu(), vt_cache)
|
| 313 |
+
torch.save(ft.cpu(), ft_cache)
|
| 314 |
+
|
| 315 |
+
# UV texture rendering
|
| 316 |
+
uv_rasterizer = Pytorch3dRasterizer(image_size=512, device=device)
|
| 317 |
+
texture_npy = uv_rasterizer.get_texture(
|
| 318 |
+
torch.cat([(vt - 0.5) * 2.0, torch.ones_like(vt[:, :1])], dim=1),
|
| 319 |
+
ft,
|
| 320 |
+
torch.tensor(v_np).unsqueeze(0).float(),
|
| 321 |
+
torch.tensor(f_np).unsqueeze(0).long(),
|
| 322 |
+
torch.tensor(final_colors).unsqueeze(0).float() / 255.0,
|
| 323 |
+
)
|
| 324 |
+
|
| 325 |
+
Image.fromarray((texture_npy * 255.0).astype(np.uint8)).save(f"{cache_path}/texture.png")
|
| 326 |
+
|
| 327 |
+
# UV mask for TEXTure (https://readpaper.com/paper/4720151447010820097)
|
| 328 |
+
texture_npy[texture_npy.sum(axis=2) == 0.0] = 1.0
|
| 329 |
+
Image.fromarray((texture_npy * 255.0).astype(np.uint8)).save(f"{cache_path}/mask.png")
|
| 330 |
+
|
| 331 |
+
# generate da-pose vertices
|
| 332 |
+
new_pose = smpl_out_lst[1].full_pose
|
| 333 |
new_pose[:, :3] = 0.
|
| 334 |
|
| 335 |
posed_econ_verts, _ = general_lbs(
|
|
|
|
| 341 |
lbs_weights=econ_lbs_weights
|
| 342 |
)
|
| 343 |
|
| 344 |
+
# export mtl file
|
| 345 |
+
mtl_string = f"newmtl mat0 \nKa 1.000000 1.000000 1.000000 \nKd 1.000000 1.000000 1.000000 \nKs 0.000000 0.000000 0.000000 \nTr 1.000000 \nillum 1 \nNs 0.000000\nmap_Kd texture.png"
|
| 346 |
+
with open(f"{cache_path}/material.mtl", 'w') as file:
|
| 347 |
+
file.write(mtl_string)
|
| 348 |
+
export_obj(posed_econ_verts[0].detach().cpu().numpy(), f_np, vt, ft, f"{cache_path}/mesh.obj")
|
apps/benchmark.py
CHANGED
|
@@ -14,28 +14,29 @@
|
|
| 14 |
#
|
| 15 |
# Contact: ps-license@tuebingen.mpg.de
|
| 16 |
|
| 17 |
-
import warnings
|
| 18 |
import logging
|
|
|
|
| 19 |
|
| 20 |
warnings.filterwarnings("ignore")
|
| 21 |
logging.getLogger("lightning").setLevel(logging.ERROR)
|
| 22 |
logging.getLogger("trimesh").setLevel(logging.ERROR)
|
| 23 |
|
| 24 |
-
import torch
|
| 25 |
import argparse
|
| 26 |
import os
|
| 27 |
|
|
|
|
| 28 |
from termcolor import colored
|
| 29 |
from tqdm.auto import tqdm
|
| 30 |
-
|
| 31 |
from apps.IFGeo import IFGeo
|
| 32 |
-
from
|
| 33 |
from lib.common.BNI import BNI
|
| 34 |
from lib.common.BNI_utils import save_normal_tensor
|
|
|
|
|
|
|
| 35 |
from lib.dataset.EvalDataset import EvalDataset
|
| 36 |
from lib.dataset.Evaluator import Evaluator
|
| 37 |
from lib.dataset.mesh_util import *
|
| 38 |
-
from lib.common.voxelize import VoxelGrid
|
| 39 |
|
| 40 |
torch.backends.cudnn.benchmark = True
|
| 41 |
speed_analysis = False
|
|
@@ -62,8 +63,14 @@ if __name__ == "__main__":
|
|
| 62 |
device = torch.device("cuda:0")
|
| 63 |
|
| 64 |
cfg_test_list = [
|
| 65 |
-
"dataset.rotation_num",
|
| 66 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 67 |
]
|
| 68 |
|
| 69 |
# # if w/ RenderPeople+CAPE
|
|
@@ -176,12 +183,10 @@ if __name__ == "__main__":
|
|
| 176 |
|
| 177 |
# mesh completion via IF-net
|
| 178 |
in_tensor.update(
|
| 179 |
-
dataset.depth_to_voxel(
|
| 180 |
-
|
| 181 |
-
|
| 182 |
-
|
| 183 |
-
}
|
| 184 |
-
)
|
| 185 |
)
|
| 186 |
|
| 187 |
occupancies = VoxelGrid.from_mesh(side_mesh, cfg.vol_res, loc=[
|
|
|
|
| 14 |
#
|
| 15 |
# Contact: ps-license@tuebingen.mpg.de
|
| 16 |
|
|
|
|
| 17 |
import logging
|
| 18 |
+
import warnings
|
| 19 |
|
| 20 |
warnings.filterwarnings("ignore")
|
| 21 |
logging.getLogger("lightning").setLevel(logging.ERROR)
|
| 22 |
logging.getLogger("trimesh").setLevel(logging.ERROR)
|
| 23 |
|
|
|
|
| 24 |
import argparse
|
| 25 |
import os
|
| 26 |
|
| 27 |
+
import torch
|
| 28 |
from termcolor import colored
|
| 29 |
from tqdm.auto import tqdm
|
| 30 |
+
|
| 31 |
from apps.IFGeo import IFGeo
|
| 32 |
+
from apps.Normal import Normal
|
| 33 |
from lib.common.BNI import BNI
|
| 34 |
from lib.common.BNI_utils import save_normal_tensor
|
| 35 |
+
from lib.common.config import cfg
|
| 36 |
+
from lib.common.voxelize import VoxelGrid
|
| 37 |
from lib.dataset.EvalDataset import EvalDataset
|
| 38 |
from lib.dataset.Evaluator import Evaluator
|
| 39 |
from lib.dataset.mesh_util import *
|
|
|
|
| 40 |
|
| 41 |
torch.backends.cudnn.benchmark = True
|
| 42 |
speed_analysis = False
|
|
|
|
| 63 |
device = torch.device("cuda:0")
|
| 64 |
|
| 65 |
cfg_test_list = [
|
| 66 |
+
"dataset.rotation_num",
|
| 67 |
+
3,
|
| 68 |
+
"bni.use_smpl",
|
| 69 |
+
["hand"],
|
| 70 |
+
"bni.use_ifnet",
|
| 71 |
+
args.ifnet,
|
| 72 |
+
"bni.cut_intersection",
|
| 73 |
+
True,
|
| 74 |
]
|
| 75 |
|
| 76 |
# # if w/ RenderPeople+CAPE
|
|
|
|
| 183 |
|
| 184 |
# mesh completion via IF-net
|
| 185 |
in_tensor.update(
|
| 186 |
+
dataset.depth_to_voxel({
|
| 187 |
+
"depth_F": BNI_object.F_depth.unsqueeze(0).to(device), "depth_B":
|
| 188 |
+
BNI_object.B_depth.unsqueeze(0).to(device)
|
| 189 |
+
})
|
|
|
|
|
|
|
| 190 |
)
|
| 191 |
|
| 192 |
occupancies = VoxelGrid.from_mesh(side_mesh, cfg.vol_res, loc=[
|
apps/infer.py
CHANGED
|
@@ -14,35 +14,37 @@
|
|
| 14 |
#
|
| 15 |
# Contact: ps-license@tuebingen.mpg.de
|
| 16 |
|
| 17 |
-
import warnings
|
| 18 |
import logging
|
|
|
|
| 19 |
|
| 20 |
warnings.filterwarnings("ignore")
|
| 21 |
logging.getLogger("lightning").setLevel(logging.ERROR)
|
| 22 |
logging.getLogger("trimesh").setLevel(logging.ERROR)
|
| 23 |
|
| 24 |
-
import torch, torchvision
|
| 25 |
-
import trimesh
|
| 26 |
-
import numpy as np
|
| 27 |
import argparse
|
| 28 |
import os
|
| 29 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 30 |
from termcolor import colored
|
| 31 |
from tqdm.auto import tqdm
|
| 32 |
-
|
| 33 |
from apps.IFGeo import IFGeo
|
| 34 |
-
from
|
| 35 |
-
from lib.common.config import cfg
|
| 36 |
-
from lib.common.render import query_color
|
| 37 |
-
from lib.common.train_util import init_loss, Format
|
| 38 |
-
from lib.common.imutils import blend_rgb_norm
|
| 39 |
from lib.common.BNI import BNI
|
| 40 |
from lib.common.BNI_utils import save_normal_tensor
|
| 41 |
-
from lib.
|
|
|
|
| 42 |
from lib.common.local_affine import register
|
| 43 |
-
from lib.
|
| 44 |
-
from lib.
|
| 45 |
from lib.common.voxelize import VoxelGrid
|
|
|
|
|
|
|
|
|
|
| 46 |
|
| 47 |
torch.backends.cudnn.benchmark = True
|
| 48 |
|
|
@@ -146,9 +148,8 @@ if __name__ == "__main__":
|
|
| 146 |
os.makedirs(osp.join(args.out_dir, cfg.name, "obj"), exist_ok=True)
|
| 147 |
|
| 148 |
in_tensor = {
|
| 149 |
-
"smpl_faces": data["smpl_faces"],
|
| 150 |
-
|
| 151 |
-
"mask": data["img_mask"].to(device)
|
| 152 |
}
|
| 153 |
|
| 154 |
# The optimizer and variables
|
|
@@ -157,9 +158,11 @@ if __name__ == "__main__":
|
|
| 157 |
optimed_betas = data["betas"].requires_grad_(True)
|
| 158 |
optimed_orient = data["global_orient"].requires_grad_(True)
|
| 159 |
|
| 160 |
-
optimizer_smpl = torch.optim.Adam(
|
| 161 |
-
|
| 162 |
-
|
|
|
|
|
|
|
| 163 |
scheduler_smpl = torch.optim.lr_scheduler.ReduceLROnPlateau(
|
| 164 |
optimizer_smpl,
|
| 165 |
mode="min",
|
|
@@ -234,9 +237,9 @@ if __name__ == "__main__":
|
|
| 234 |
)
|
| 235 |
|
| 236 |
smpl_verts = (smpl_verts + optimed_trans) * data["scale"]
|
| 237 |
-
smpl_joints = (smpl_joints + optimed_trans) * data["scale"] * torch.tensor(
|
| 238 |
-
|
| 239 |
-
).to(device)
|
| 240 |
|
| 241 |
# landmark errors
|
| 242 |
smpl_joints_3d = (
|
|
@@ -280,13 +283,11 @@ if __name__ == "__main__":
|
|
| 280 |
|
| 281 |
# BUG: PyTorch3D silhouette renderer generates dilated mask
|
| 282 |
bg_value = in_tensor["T_normal_F"][0, 0, 0, 0]
|
| 283 |
-
smpl_arr_fake = torch.cat(
|
| 284 |
-
[
|
| 285 |
-
|
| 286 |
-
|
| 287 |
-
|
| 288 |
-
dim=-1
|
| 289 |
-
)
|
| 290 |
|
| 291 |
body_overlap = (gt_arr * smpl_arr_fake.gt(0.0)
|
| 292 |
).sum(dim=[1, 2]) / smpl_arr_fake.gt(0.0).sum(dim=[1, 2])
|
|
@@ -322,22 +323,18 @@ if __name__ == "__main__":
|
|
| 322 |
# save intermediate results
|
| 323 |
if (i == args.loop_smpl - 1) and (not args.novis):
|
| 324 |
|
| 325 |
-
per_loop_lst.extend(
|
| 326 |
-
[
|
| 327 |
-
|
| 328 |
-
|
| 329 |
-
|
| 330 |
-
|
| 331 |
-
|
| 332 |
-
|
| 333 |
-
|
| 334 |
-
[
|
| 335 |
-
|
| 336 |
-
|
| 337 |
-
in_tensor["normal_B"],
|
| 338 |
-
diff_S[:, :, 512:].unsqueeze(1).repeat(1, 3, 1, 1),
|
| 339 |
-
]
|
| 340 |
-
)
|
| 341 |
per_data_lst.append(
|
| 342 |
get_optim_grid_image(per_loop_lst, None, nrow=N_body * 2, type="smpl")
|
| 343 |
)
|
|
@@ -357,13 +354,11 @@ if __name__ == "__main__":
|
|
| 357 |
if not args.novis:
|
| 358 |
img_crop_path = osp.join(args.out_dir, cfg.name, "png", f"{data['name']}_crop.png")
|
| 359 |
torchvision.utils.save_image(
|
| 360 |
-
torch.cat(
|
| 361 |
-
[
|
| 362 |
-
|
| 363 |
-
|
| 364 |
-
|
| 365 |
-
dim=3
|
| 366 |
-
), img_crop_path
|
| 367 |
)
|
| 368 |
|
| 369 |
rgb_norm_F = blend_rgb_norm(in_tensor["normal_F"], data)
|
|
@@ -392,27 +387,25 @@ if __name__ == "__main__":
|
|
| 392 |
smpl_obj.export(smpl_obj_path)
|
| 393 |
smpl_info = {
|
| 394 |
"betas":
|
| 395 |
-
|
| 396 |
"body_pose":
|
| 397 |
-
|
| 398 |
-
|
| 399 |
"global_orient":
|
| 400 |
-
|
| 401 |
-
|
| 402 |
"transl":
|
| 403 |
-
|
| 404 |
"expression":
|
| 405 |
-
|
| 406 |
"jaw_pose":
|
| 407 |
-
|
| 408 |
"left_hand_pose":
|
| 409 |
-
|
| 410 |
-
).cpu().unsqueeze(0),
|
| 411 |
"right_hand_pose":
|
| 412 |
-
|
| 413 |
-
).cpu().unsqueeze(0),
|
| 414 |
"scale":
|
| 415 |
-
|
| 416 |
}
|
| 417 |
np.save(
|
| 418 |
smpl_obj_path.replace(".obj", ".npy"),
|
|
@@ -434,8 +427,8 @@ if __name__ == "__main__":
|
|
| 434 |
|
| 435 |
per_data_lst = []
|
| 436 |
|
| 437 |
-
batch_smpl_verts = in_tensor["smpl_verts"].detach(
|
| 438 |
-
|
| 439 |
batch_smpl_faces = in_tensor["smpl_faces"].detach()[:, :, [0, 2, 1]]
|
| 440 |
|
| 441 |
in_tensor["depth_F"], in_tensor["depth_B"] = dataset.render_depth(
|
|
@@ -491,12 +484,10 @@ if __name__ == "__main__":
|
|
| 491 |
|
| 492 |
# mesh completion via IF-net
|
| 493 |
in_tensor.update(
|
| 494 |
-
dataset.depth_to_voxel(
|
| 495 |
-
|
| 496 |
-
|
| 497 |
-
|
| 498 |
-
}
|
| 499 |
-
)
|
| 500 |
)
|
| 501 |
|
| 502 |
occupancies = VoxelGrid.from_mesh(side_mesh, cfg.vol_res, loc=[
|
|
|
|
| 14 |
#
|
| 15 |
# Contact: ps-license@tuebingen.mpg.de
|
| 16 |
|
|
|
|
| 17 |
import logging
|
| 18 |
+
import warnings
|
| 19 |
|
| 20 |
warnings.filterwarnings("ignore")
|
| 21 |
logging.getLogger("lightning").setLevel(logging.ERROR)
|
| 22 |
logging.getLogger("trimesh").setLevel(logging.ERROR)
|
| 23 |
|
|
|
|
|
|
|
|
|
|
| 24 |
import argparse
|
| 25 |
import os
|
| 26 |
|
| 27 |
+
import numpy as np
|
| 28 |
+
import torch
|
| 29 |
+
import torchvision
|
| 30 |
+
import trimesh
|
| 31 |
+
from pytorch3d.ops import SubdivideMeshes
|
| 32 |
from termcolor import colored
|
| 33 |
from tqdm.auto import tqdm
|
| 34 |
+
|
| 35 |
from apps.IFGeo import IFGeo
|
| 36 |
+
from apps.Normal import Normal
|
|
|
|
|
|
|
|
|
|
|
|
|
| 37 |
from lib.common.BNI import BNI
|
| 38 |
from lib.common.BNI_utils import save_normal_tensor
|
| 39 |
+
from lib.common.config import cfg
|
| 40 |
+
from lib.common.imutils import blend_rgb_norm
|
| 41 |
from lib.common.local_affine import register
|
| 42 |
+
from lib.common.render import query_color
|
| 43 |
+
from lib.common.train_util import Format, init_loss
|
| 44 |
from lib.common.voxelize import VoxelGrid
|
| 45 |
+
from lib.dataset.mesh_util import *
|
| 46 |
+
from lib.dataset.TestDataset import TestDataset
|
| 47 |
+
from lib.net.geometry import rot6d_to_rotmat, rotation_matrix_to_angle_axis
|
| 48 |
|
| 49 |
torch.backends.cudnn.benchmark = True
|
| 50 |
|
|
|
|
| 148 |
os.makedirs(osp.join(args.out_dir, cfg.name, "obj"), exist_ok=True)
|
| 149 |
|
| 150 |
in_tensor = {
|
| 151 |
+
"smpl_faces": data["smpl_faces"], "image": data["img_icon"].to(device), "mask":
|
| 152 |
+
data["img_mask"].to(device)
|
|
|
|
| 153 |
}
|
| 154 |
|
| 155 |
# The optimizer and variables
|
|
|
|
| 158 |
optimed_betas = data["betas"].requires_grad_(True)
|
| 159 |
optimed_orient = data["global_orient"].requires_grad_(True)
|
| 160 |
|
| 161 |
+
optimizer_smpl = torch.optim.Adam([
|
| 162 |
+
optimed_pose, optimed_trans, optimed_betas, optimed_orient
|
| 163 |
+
],
|
| 164 |
+
lr=1e-2,
|
| 165 |
+
amsgrad=True)
|
| 166 |
scheduler_smpl = torch.optim.lr_scheduler.ReduceLROnPlateau(
|
| 167 |
optimizer_smpl,
|
| 168 |
mode="min",
|
|
|
|
| 237 |
)
|
| 238 |
|
| 239 |
smpl_verts = (smpl_verts + optimed_trans) * data["scale"]
|
| 240 |
+
smpl_joints = (smpl_joints + optimed_trans) * data["scale"] * torch.tensor([
|
| 241 |
+
1.0, 1.0, -1.0
|
| 242 |
+
]).to(device)
|
| 243 |
|
| 244 |
# landmark errors
|
| 245 |
smpl_joints_3d = (
|
|
|
|
| 283 |
|
| 284 |
# BUG: PyTorch3D silhouette renderer generates dilated mask
|
| 285 |
bg_value = in_tensor["T_normal_F"][0, 0, 0, 0]
|
| 286 |
+
smpl_arr_fake = torch.cat([
|
| 287 |
+
in_tensor["T_normal_F"][:, 0].ne(bg_value).float(),
|
| 288 |
+
in_tensor["T_normal_B"][:, 0].ne(bg_value).float()
|
| 289 |
+
],
|
| 290 |
+
dim=-1)
|
|
|
|
|
|
|
| 291 |
|
| 292 |
body_overlap = (gt_arr * smpl_arr_fake.gt(0.0)
|
| 293 |
).sum(dim=[1, 2]) / smpl_arr_fake.gt(0.0).sum(dim=[1, 2])
|
|
|
|
| 323 |
# save intermediate results
|
| 324 |
if (i == args.loop_smpl - 1) and (not args.novis):
|
| 325 |
|
| 326 |
+
per_loop_lst.extend([
|
| 327 |
+
in_tensor["image"],
|
| 328 |
+
in_tensor["T_normal_F"],
|
| 329 |
+
in_tensor["normal_F"],
|
| 330 |
+
diff_S[:, :, :512].unsqueeze(1).repeat(1, 3, 1, 1),
|
| 331 |
+
])
|
| 332 |
+
per_loop_lst.extend([
|
| 333 |
+
in_tensor["image"],
|
| 334 |
+
in_tensor["T_normal_B"],
|
| 335 |
+
in_tensor["normal_B"],
|
| 336 |
+
diff_S[:, :, 512:].unsqueeze(1).repeat(1, 3, 1, 1),
|
| 337 |
+
])
|
|
|
|
|
|
|
|
|
|
|
|
|
| 338 |
per_data_lst.append(
|
| 339 |
get_optim_grid_image(per_loop_lst, None, nrow=N_body * 2, type="smpl")
|
| 340 |
)
|
|
|
|
| 354 |
if not args.novis:
|
| 355 |
img_crop_path = osp.join(args.out_dir, cfg.name, "png", f"{data['name']}_crop.png")
|
| 356 |
torchvision.utils.save_image(
|
| 357 |
+
torch.cat([
|
| 358 |
+
data["img_crop"][:, :3], (in_tensor['normal_F'].detach().cpu() + 1.0) * 0.5,
|
| 359 |
+
(in_tensor['normal_B'].detach().cpu() + 1.0) * 0.5
|
| 360 |
+
],
|
| 361 |
+
dim=3), img_crop_path
|
|
|
|
|
|
|
| 362 |
)
|
| 363 |
|
| 364 |
rgb_norm_F = blend_rgb_norm(in_tensor["normal_F"], data)
|
|
|
|
| 387 |
smpl_obj.export(smpl_obj_path)
|
| 388 |
smpl_info = {
|
| 389 |
"betas":
|
| 390 |
+
optimed_betas[idx].detach().cpu().unsqueeze(0),
|
| 391 |
"body_pose":
|
| 392 |
+
rotation_matrix_to_angle_axis(optimed_pose_mat[idx].detach()
|
| 393 |
+
).cpu().unsqueeze(0),
|
| 394 |
"global_orient":
|
| 395 |
+
rotation_matrix_to_angle_axis(optimed_orient_mat[idx].detach()
|
| 396 |
+
).cpu().unsqueeze(0),
|
| 397 |
"transl":
|
| 398 |
+
optimed_trans[idx].detach().cpu(),
|
| 399 |
"expression":
|
| 400 |
+
data["exp"][idx].cpu().unsqueeze(0),
|
| 401 |
"jaw_pose":
|
| 402 |
+
rotation_matrix_to_angle_axis(data["jaw_pose"][idx]).cpu().unsqueeze(0),
|
| 403 |
"left_hand_pose":
|
| 404 |
+
rotation_matrix_to_angle_axis(data["left_hand_pose"][idx]).cpu().unsqueeze(0),
|
|
|
|
| 405 |
"right_hand_pose":
|
| 406 |
+
rotation_matrix_to_angle_axis(data["right_hand_pose"][idx]).cpu().unsqueeze(0),
|
|
|
|
| 407 |
"scale":
|
| 408 |
+
data["scale"][idx].cpu(),
|
| 409 |
}
|
| 410 |
np.save(
|
| 411 |
smpl_obj_path.replace(".obj", ".npy"),
|
|
|
|
| 427 |
|
| 428 |
per_data_lst = []
|
| 429 |
|
| 430 |
+
batch_smpl_verts = in_tensor["smpl_verts"].detach() * torch.tensor([1.0, -1.0, 1.0],
|
| 431 |
+
device=device)
|
| 432 |
batch_smpl_faces = in_tensor["smpl_faces"].detach()[:, :, [0, 2, 1]]
|
| 433 |
|
| 434 |
in_tensor["depth_F"], in_tensor["depth_B"] = dataset.render_depth(
|
|
|
|
| 484 |
|
| 485 |
# mesh completion via IF-net
|
| 486 |
in_tensor.update(
|
| 487 |
+
dataset.depth_to_voxel({
|
| 488 |
+
"depth_F": BNI_object.F_depth.unsqueeze(0), "depth_B":
|
| 489 |
+
BNI_object.B_depth.unsqueeze(0)
|
| 490 |
+
})
|
|
|
|
|
|
|
| 491 |
)
|
| 492 |
|
| 493 |
occupancies = VoxelGrid.from_mesh(side_mesh, cfg.vol_res, loc=[
|
apps/multi_render.py
CHANGED
|
@@ -1,7 +1,9 @@
|
|
| 1 |
-
from lib.common.render import Render
|
| 2 |
-
import torch
|
| 3 |
import argparse
|
| 4 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 5 |
root = "./results/econ/vid"
|
| 6 |
|
| 7 |
# loading cfg file
|
|
|
|
|
|
|
|
|
|
| 1 |
import argparse
|
| 2 |
|
| 3 |
+
import torch
|
| 4 |
+
|
| 5 |
+
from lib.common.render import Render
|
| 6 |
+
|
| 7 |
root = "./results/econ/vid"
|
| 8 |
|
| 9 |
# loading cfg file
|
configs/econ.yaml
CHANGED
|
@@ -28,7 +28,7 @@ bni:
|
|
| 28 |
lambda1: 1e-4
|
| 29 |
boundary_consist: 1e-6
|
| 30 |
poisson_depth: 10
|
| 31 |
-
use_smpl: ["hand"
|
| 32 |
use_ifnet: False
|
| 33 |
use_poisson: True
|
| 34 |
hand_thres: 8e-2
|
|
|
|
| 28 |
lambda1: 1e-4
|
| 29 |
boundary_consist: 1e-6
|
| 30 |
poisson_depth: 10
|
| 31 |
+
use_smpl: ["hand"]
|
| 32 |
use_ifnet: False
|
| 33 |
use_poisson: True
|
| 34 |
hand_thres: 8e-2
|
docs/tricks.md
CHANGED
|
@@ -2,7 +2,7 @@
|
|
| 2 |
|
| 3 |
### If the reconstructed geometry is not satisfying, play with the adjustable parameters in _config/econ.yaml_
|
| 4 |
|
| 5 |
-
- `use_smpl: ["hand"
|
| 6 |
- [ ]: don't use either hands or face parts from SMPL-X
|
| 7 |
- ["hand"]: only use the **visible** hands from SMPL-X
|
| 8 |
- ["hand", "face"]: use both **visible** hands and face from SMPL-X
|
|
|
|
| 2 |
|
| 3 |
### If the reconstructed geometry is not satisfying, play with the adjustable parameters in _config/econ.yaml_
|
| 4 |
|
| 5 |
+
- `use_smpl: ["hand"]`
|
| 6 |
- [ ]: don't use either hands or face parts from SMPL-X
|
| 7 |
- ["hand"]: only use the **visible** hands from SMPL-X
|
| 8 |
- ["hand", "face"]: use both **visible** hands and face from SMPL-X
|
lib/common/BNI.py
CHANGED
|
@@ -1,10 +1,12 @@
|
|
| 1 |
-
from lib.common.BNI_utils import (
|
| 2 |
-
verts_inverse_transform, depth_inverse_transform, double_side_bilateral_normal_integration
|
| 3 |
-
)
|
| 4 |
-
|
| 5 |
import torch
|
| 6 |
import trimesh
|
| 7 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 8 |
|
| 9 |
class BNI:
|
| 10 |
def __init__(self, dir_path, name, BNI_dict, cfg, device):
|
|
@@ -84,8 +86,9 @@ class BNI:
|
|
| 84 |
|
| 85 |
if __name__ == "__main__":
|
| 86 |
|
| 87 |
-
import numpy as np
|
| 88 |
import os.path as osp
|
|
|
|
|
|
|
| 89 |
from tqdm import tqdm
|
| 90 |
|
| 91 |
root = "/home/yxiu/Code/ECON/results/examples/BNI"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
import torch
|
| 2 |
import trimesh
|
| 3 |
|
| 4 |
+
from lib.common.BNI_utils import (
|
| 5 |
+
depth_inverse_transform,
|
| 6 |
+
double_side_bilateral_normal_integration,
|
| 7 |
+
verts_inverse_transform,
|
| 8 |
+
)
|
| 9 |
+
|
| 10 |
|
| 11 |
class BNI:
|
| 12 |
def __init__(self, dir_path, name, BNI_dict, cfg, device):
|
|
|
|
| 86 |
|
| 87 |
if __name__ == "__main__":
|
| 88 |
|
|
|
|
| 89 |
import os.path as osp
|
| 90 |
+
|
| 91 |
+
import numpy as np
|
| 92 |
from tqdm import tqdm
|
| 93 |
|
| 94 |
root = "/home/yxiu/Code/ECON/results/examples/BNI"
|
lib/common/BNI_utils.py
CHANGED
|
@@ -1,13 +1,23 @@
|
|
| 1 |
-
import
|
| 2 |
-
import trimesh
|
| 3 |
-
import cv2, os
|
| 4 |
-
from PIL import Image
|
| 5 |
import os.path as osp
|
|
|
|
| 6 |
import cupy as cp
|
|
|
|
| 7 |
import numpy as np
|
| 8 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 9 |
from cupyx.scipy.sparse.linalg import cg
|
|
|
|
| 10 |
from tqdm.auto import tqdm
|
|
|
|
| 11 |
from lib.dataset.mesh_util import clean_floats
|
| 12 |
|
| 13 |
|
|
@@ -68,13 +78,11 @@ def mean_value_cordinates(inner_pts, contour_pts):
|
|
| 68 |
body_edges_c = np.roll(body_edges_a, shift=-1, axis=1)
|
| 69 |
body_edges_b = np.sqrt(((contour_pts - np.roll(contour_pts, shift=-1, axis=0))**2).sum(axis=1))
|
| 70 |
|
| 71 |
-
body_edges = np.concatenate(
|
| 72 |
-
[
|
| 73 |
-
|
| 74 |
-
|
| 75 |
-
|
| 76 |
-
axis=-1
|
| 77 |
-
)
|
| 78 |
|
| 79 |
body_cos = (body_edges[:, :, 0]**2 + body_edges[:, :, 1]**2 -
|
| 80 |
body_edges[:, :, 2]**2) / (2 * body_edges[:, :, 0] * body_edges[:, :, 1])
|
|
@@ -167,9 +175,9 @@ def verts_transform(t, depth_scale):
|
|
| 167 |
t_copy = t.clone()
|
| 168 |
t_copy *= depth_scale * 0.5
|
| 169 |
t_copy += depth_scale * 0.5
|
| 170 |
-
t_copy = t_copy[:, [1, 0, 2]] * torch.Tensor([2.0, 2.0, -2.0]) + torch.Tensor(
|
| 171 |
-
|
| 172 |
-
)
|
| 173 |
|
| 174 |
return t_copy
|
| 175 |
|
|
@@ -342,15 +350,13 @@ def construct_facets_from(mask):
|
|
| 342 |
facet_bottom_left_mask = move_bottom(facet_top_left_mask)
|
| 343 |
facet_bottom_right_mask = move_bottom_right(facet_top_left_mask)
|
| 344 |
|
| 345 |
-
return cp.hstack(
|
| 346 |
-
(
|
| 347 |
-
|
| 348 |
-
|
| 349 |
-
|
| 350 |
-
|
| 351 |
-
|
| 352 |
-
)
|
| 353 |
-
).astype(int)
|
| 354 |
|
| 355 |
|
| 356 |
def map_depth_map_to_point_clouds(depth_map, mask, K=None, step_size=1):
|
|
@@ -614,7 +620,7 @@ def double_side_bilateral_normal_integration(
|
|
| 614 |
|
| 615 |
energy_list.append(energy)
|
| 616 |
relative_energy = cp.abs(energy - energy_old) / energy_old
|
| 617 |
-
|
| 618 |
# print(f"step {i + 1}/{max_iter} energy: {energy:.3e}"
|
| 619 |
# f" relative energy: {relative_energy:.3e}")
|
| 620 |
|
|
@@ -640,13 +646,11 @@ def double_side_bilateral_normal_integration(
|
|
| 640 |
B_verts = verts_inverse_transform(torch.as_tensor(vertices_back).float(), 256.0)
|
| 641 |
|
| 642 |
F_B_verts = torch.cat((F_verts, B_verts), dim=0)
|
| 643 |
-
F_B_faces = torch.cat(
|
| 644 |
-
(
|
| 645 |
-
|
| 646 |
-
|
| 647 |
-
|
| 648 |
-
dim=0
|
| 649 |
-
)
|
| 650 |
|
| 651 |
front_surf = trimesh.Trimesh(F_verts, faces_front_)
|
| 652 |
back_surf = trimesh.Trimesh(B_verts, faces_back_)
|
|
@@ -690,12 +694,12 @@ def double_side_bilateral_normal_integration(
|
|
| 690 |
back_mesh = clean_floats(trimesh.Trimesh(vertices_back, faces_back))
|
| 691 |
|
| 692 |
result = {
|
| 693 |
-
"F_verts": torch.as_tensor(front_mesh.vertices).float(),
|
| 694 |
-
|
| 695 |
-
"B_verts": torch.as_tensor(back_mesh.vertices).float(),
|
| 696 |
-
|
| 697 |
-
|
| 698 |
-
|
| 699 |
}
|
| 700 |
|
| 701 |
return result
|
|
|
|
| 1 |
+
import os
|
|
|
|
|
|
|
|
|
|
| 2 |
import os.path as osp
|
| 3 |
+
|
| 4 |
import cupy as cp
|
| 5 |
+
import cv2
|
| 6 |
import numpy as np
|
| 7 |
+
import torch
|
| 8 |
+
import trimesh
|
| 9 |
+
from cupyx.scipy.sparse import (
|
| 10 |
+
coo_matrix,
|
| 11 |
+
csr_matrix,
|
| 12 |
+
diags,
|
| 13 |
+
hstack,
|
| 14 |
+
spdiags,
|
| 15 |
+
vstack,
|
| 16 |
+
)
|
| 17 |
from cupyx.scipy.sparse.linalg import cg
|
| 18 |
+
from PIL import Image
|
| 19 |
from tqdm.auto import tqdm
|
| 20 |
+
|
| 21 |
from lib.dataset.mesh_util import clean_floats
|
| 22 |
|
| 23 |
|
|
|
|
| 78 |
body_edges_c = np.roll(body_edges_a, shift=-1, axis=1)
|
| 79 |
body_edges_b = np.sqrt(((contour_pts - np.roll(contour_pts, shift=-1, axis=0))**2).sum(axis=1))
|
| 80 |
|
| 81 |
+
body_edges = np.concatenate([
|
| 82 |
+
body_edges_a[..., None], body_edges_c[..., None],
|
| 83 |
+
np.repeat(body_edges_b[None, :, None], axis=0, repeats=len(inner_pts))
|
| 84 |
+
],
|
| 85 |
+
axis=-1)
|
|
|
|
|
|
|
| 86 |
|
| 87 |
body_cos = (body_edges[:, :, 0]**2 + body_edges[:, :, 1]**2 -
|
| 88 |
body_edges[:, :, 2]**2) / (2 * body_edges[:, :, 0] * body_edges[:, :, 1])
|
|
|
|
| 175 |
t_copy = t.clone()
|
| 176 |
t_copy *= depth_scale * 0.5
|
| 177 |
t_copy += depth_scale * 0.5
|
| 178 |
+
t_copy = t_copy[:, [1, 0, 2]] * torch.Tensor([2.0, 2.0, -2.0]) + torch.Tensor([
|
| 179 |
+
0.0, 0.0, depth_scale
|
| 180 |
+
])
|
| 181 |
|
| 182 |
return t_copy
|
| 183 |
|
|
|
|
| 350 |
facet_bottom_left_mask = move_bottom(facet_top_left_mask)
|
| 351 |
facet_bottom_right_mask = move_bottom_right(facet_top_left_mask)
|
| 352 |
|
| 353 |
+
return cp.hstack((
|
| 354 |
+
4 * cp.ones((cp.sum(facet_top_left_mask).item(), 1)),
|
| 355 |
+
idx[facet_top_left_mask][:, None],
|
| 356 |
+
idx[facet_bottom_left_mask][:, None],
|
| 357 |
+
idx[facet_bottom_right_mask][:, None],
|
| 358 |
+
idx[facet_top_right_mask][:, None],
|
| 359 |
+
)).astype(int)
|
|
|
|
|
|
|
| 360 |
|
| 361 |
|
| 362 |
def map_depth_map_to_point_clouds(depth_map, mask, K=None, step_size=1):
|
|
|
|
| 620 |
|
| 621 |
energy_list.append(energy)
|
| 622 |
relative_energy = cp.abs(energy - energy_old) / energy_old
|
| 623 |
+
|
| 624 |
# print(f"step {i + 1}/{max_iter} energy: {energy:.3e}"
|
| 625 |
# f" relative energy: {relative_energy:.3e}")
|
| 626 |
|
|
|
|
| 646 |
B_verts = verts_inverse_transform(torch.as_tensor(vertices_back).float(), 256.0)
|
| 647 |
|
| 648 |
F_B_verts = torch.cat((F_verts, B_verts), dim=0)
|
| 649 |
+
F_B_faces = torch.cat((
|
| 650 |
+
torch.as_tensor(faces_front_).long(),
|
| 651 |
+
torch.as_tensor(faces_back_).long() + faces_front_.max() + 1
|
| 652 |
+
),
|
| 653 |
+
dim=0)
|
|
|
|
|
|
|
| 654 |
|
| 655 |
front_surf = trimesh.Trimesh(F_verts, faces_front_)
|
| 656 |
back_surf = trimesh.Trimesh(B_verts, faces_back_)
|
|
|
|
| 694 |
back_mesh = clean_floats(trimesh.Trimesh(vertices_back, faces_back))
|
| 695 |
|
| 696 |
result = {
|
| 697 |
+
"F_verts": torch.as_tensor(front_mesh.vertices).float(), "F_faces": torch.as_tensor(
|
| 698 |
+
front_mesh.faces
|
| 699 |
+
).long(), "B_verts": torch.as_tensor(back_mesh.vertices).float(), "B_faces":
|
| 700 |
+
torch.as_tensor(back_mesh.faces).long(), "F_depth":
|
| 701 |
+
torch.as_tensor(depth_map_front_est).float(), "B_depth":
|
| 702 |
+
torch.as_tensor(depth_map_back_est).float()
|
| 703 |
}
|
| 704 |
|
| 705 |
return result
|
lib/common/blender_utils.py
DELETED
|
@@ -1,383 +0,0 @@
|
|
| 1 |
-
import bpy
|
| 2 |
-
import sys, os
|
| 3 |
-
from math import radians
|
| 4 |
-
import mathutils
|
| 5 |
-
import bmesh
|
| 6 |
-
|
| 7 |
-
print(sys.exec_prefix)
|
| 8 |
-
from tqdm import tqdm
|
| 9 |
-
import numpy as np
|
| 10 |
-
|
| 11 |
-
##################################################
|
| 12 |
-
# Globals
|
| 13 |
-
##################################################
|
| 14 |
-
|
| 15 |
-
views = 120
|
| 16 |
-
|
| 17 |
-
render = 'eevee'
|
| 18 |
-
cycles_gpu = False
|
| 19 |
-
|
| 20 |
-
quality_preview = False
|
| 21 |
-
samples_preview = 16
|
| 22 |
-
samples_final = 256
|
| 23 |
-
|
| 24 |
-
resolution_x = 512
|
| 25 |
-
resolution_y = 512
|
| 26 |
-
|
| 27 |
-
shadows = False
|
| 28 |
-
|
| 29 |
-
# diffuse_color = (57.0/255.0, 108.0/255.0, 189.0/255.0, 1.0)
|
| 30 |
-
# diffuse_color = (18/255., 139/255., 142/255.,1) #correct
|
| 31 |
-
# diffuse_color = (251/255., 60/255., 60/255.,1) #wrong
|
| 32 |
-
|
| 33 |
-
smooth = False
|
| 34 |
-
|
| 35 |
-
wireframe = False
|
| 36 |
-
line_thickness = 0.1
|
| 37 |
-
quads = False
|
| 38 |
-
|
| 39 |
-
object_transparent = False
|
| 40 |
-
mouth_transparent = False
|
| 41 |
-
|
| 42 |
-
compositor_background_image = False
|
| 43 |
-
compositor_image_scale = 1.0
|
| 44 |
-
compositor_alpha = 0.7
|
| 45 |
-
|
| 46 |
-
##################################################
|
| 47 |
-
# Helper functions
|
| 48 |
-
##################################################
|
| 49 |
-
|
| 50 |
-
|
| 51 |
-
def blender_print(*args, **kwargs):
|
| 52 |
-
print(*args, **kwargs, file=sys.stderr)
|
| 53 |
-
|
| 54 |
-
|
| 55 |
-
def using_app():
|
| 56 |
-
''' Returns if script is running through Blender application (GUI or background processing)'''
|
| 57 |
-
return (not sys.argv[0].endswith('.py'))
|
| 58 |
-
|
| 59 |
-
|
| 60 |
-
def setup_diffuse_transparent_material(target, color, object_transparent, backface_transparent):
|
| 61 |
-
''' Sets up diffuse/transparent material with backface culling in cycles'''
|
| 62 |
-
|
| 63 |
-
mat = target.active_material
|
| 64 |
-
if mat is None:
|
| 65 |
-
# Create material
|
| 66 |
-
mat = bpy.data.materials.new(name='Material')
|
| 67 |
-
target.data.materials.append(mat)
|
| 68 |
-
|
| 69 |
-
mat.use_nodes = True
|
| 70 |
-
nodes = mat.node_tree.nodes
|
| 71 |
-
for node in nodes:
|
| 72 |
-
nodes.remove(node)
|
| 73 |
-
|
| 74 |
-
node_geometry = nodes.new('ShaderNodeNewGeometry')
|
| 75 |
-
|
| 76 |
-
node_diffuse = nodes.new('ShaderNodeBsdfDiffuse')
|
| 77 |
-
node_diffuse.inputs[0].default_value = color
|
| 78 |
-
|
| 79 |
-
node_transparent = nodes.new('ShaderNodeBsdfTransparent')
|
| 80 |
-
node_transparent.inputs[0].default_value = (1.0, 1.0, 1.0, 1.0)
|
| 81 |
-
|
| 82 |
-
node_emission = nodes.new('ShaderNodeEmission')
|
| 83 |
-
node_emission.inputs[0].default_value = (0.0, 0.0, 0.0, 1.0)
|
| 84 |
-
|
| 85 |
-
node_mix = nodes.new(type='ShaderNodeMixShader')
|
| 86 |
-
if object_transparent:
|
| 87 |
-
node_mix.inputs[0].default_value = 1.0
|
| 88 |
-
else:
|
| 89 |
-
node_mix.inputs[0].default_value = 0.0
|
| 90 |
-
|
| 91 |
-
node_mix_mouth = nodes.new(type='ShaderNodeMixShader')
|
| 92 |
-
if object_transparent or backface_transparent:
|
| 93 |
-
node_mix_mouth.inputs[0].default_value = 1.0
|
| 94 |
-
else:
|
| 95 |
-
node_mix_mouth.inputs[0].default_value = 0.0
|
| 96 |
-
|
| 97 |
-
node_mix_backface = nodes.new(type='ShaderNodeMixShader')
|
| 98 |
-
|
| 99 |
-
node_output = nodes.new(type='ShaderNodeOutputMaterial')
|
| 100 |
-
|
| 101 |
-
links = mat.node_tree.links
|
| 102 |
-
|
| 103 |
-
links.new(node_geometry.outputs[6], node_mix_backface.inputs[0])
|
| 104 |
-
|
| 105 |
-
links.new(node_diffuse.outputs[0], node_mix.inputs[1])
|
| 106 |
-
links.new(node_transparent.outputs[0], node_mix.inputs[2])
|
| 107 |
-
links.new(node_mix.outputs[0], node_mix_backface.inputs[1])
|
| 108 |
-
|
| 109 |
-
links.new(node_emission.outputs[0], node_mix_mouth.inputs[1])
|
| 110 |
-
links.new(node_transparent.outputs[0], node_mix_mouth.inputs[2])
|
| 111 |
-
links.new(node_mix_mouth.outputs[0], node_mix_backface.inputs[2])
|
| 112 |
-
|
| 113 |
-
links.new(node_mix_backface.outputs[0], node_output.inputs[0])
|
| 114 |
-
return
|
| 115 |
-
|
| 116 |
-
|
| 117 |
-
##################################################
|
| 118 |
-
|
| 119 |
-
|
| 120 |
-
def setup_scene():
|
| 121 |
-
global render
|
| 122 |
-
global cycles_gpu
|
| 123 |
-
global quality_preview
|
| 124 |
-
global resolution_x
|
| 125 |
-
global resolution_y
|
| 126 |
-
global shadows
|
| 127 |
-
global wireframe
|
| 128 |
-
global line_thickness
|
| 129 |
-
global compositor_background_image
|
| 130 |
-
|
| 131 |
-
# Remove default cube
|
| 132 |
-
if 'Cube' in bpy.data.objects:
|
| 133 |
-
bpy.data.objects['Cube'].select_set(True)
|
| 134 |
-
bpy.ops.object.delete()
|
| 135 |
-
|
| 136 |
-
scene = bpy.data.scenes['Scene']
|
| 137 |
-
|
| 138 |
-
# Setup render engine
|
| 139 |
-
if render == 'cycles':
|
| 140 |
-
scene.render.engine = 'CYCLES'
|
| 141 |
-
else:
|
| 142 |
-
scene.render.engine = 'BLENDER_EEVEE'
|
| 143 |
-
|
| 144 |
-
scene.render.resolution_x = resolution_x
|
| 145 |
-
scene.render.resolution_y = resolution_y
|
| 146 |
-
scene.render.resolution_percentage = 100
|
| 147 |
-
scene.render.film_transparent = True
|
| 148 |
-
if quality_preview:
|
| 149 |
-
scene.cycles.samples = samples_preview
|
| 150 |
-
else:
|
| 151 |
-
scene.cycles.samples = samples_final
|
| 152 |
-
|
| 153 |
-
# Setup Cycles CUDA GPU acceleration if requested
|
| 154 |
-
if render == 'cycles':
|
| 155 |
-
if cycles_gpu:
|
| 156 |
-
print('Activating GPU acceleration')
|
| 157 |
-
bpy.context.preferences.addons['cycles'].preferences.compute_device_type = 'CUDA'
|
| 158 |
-
|
| 159 |
-
if bpy.app.version[0] >= 3:
|
| 160 |
-
cuda_devices = bpy.context.preferences.addons[
|
| 161 |
-
'cycles'].preferences.get_devices_for_type(compute_device_type='CUDA')
|
| 162 |
-
else:
|
| 163 |
-
(cuda_devices, opencl_devices
|
| 164 |
-
) = bpy.context.preferences.addons['cycles'].preferences.get_devices()
|
| 165 |
-
|
| 166 |
-
if (len(cuda_devices) < 1):
|
| 167 |
-
print('ERROR: CUDA GPU acceleration not available')
|
| 168 |
-
sys.exit(1)
|
| 169 |
-
|
| 170 |
-
for cuda_device in cuda_devices:
|
| 171 |
-
if cuda_device.type == 'CUDA':
|
| 172 |
-
cuda_device.use = True
|
| 173 |
-
print('Using CUDA device: ' + str(cuda_device.name))
|
| 174 |
-
else:
|
| 175 |
-
cuda_device.use = False
|
| 176 |
-
print('Igoring CUDA device: ' + str(cuda_device.name))
|
| 177 |
-
|
| 178 |
-
scene.cycles.device = 'GPU'
|
| 179 |
-
if bpy.app.version[0] < 3:
|
| 180 |
-
scene.render.tile_x = 256
|
| 181 |
-
scene.render.tile_y = 256
|
| 182 |
-
else:
|
| 183 |
-
scene.cycles.device = 'CPU'
|
| 184 |
-
if bpy.app.version[0] < 3:
|
| 185 |
-
scene.render.tile_x = 64
|
| 186 |
-
scene.render.tile_y = 64
|
| 187 |
-
|
| 188 |
-
# Disable Blender 3 denoiser to properly measure Cycles render speed
|
| 189 |
-
if bpy.app.version[0] >= 3:
|
| 190 |
-
scene.cycles.use_denoising = False
|
| 191 |
-
|
| 192 |
-
# Setup camera
|
| 193 |
-
camera = bpy.data.objects['Camera']
|
| 194 |
-
camera.location = (0.0, -3, 1.8)
|
| 195 |
-
camera.rotation_euler = (radians(74), 0.0, 0)
|
| 196 |
-
bpy.data.cameras['Camera'].lens = 55
|
| 197 |
-
|
| 198 |
-
# Setup light
|
| 199 |
-
|
| 200 |
-
# Setup lights
|
| 201 |
-
light = bpy.data.objects['Light']
|
| 202 |
-
light.location = (-2, -3.0, 0.0)
|
| 203 |
-
light.rotation_euler = (radians(90.0), 0.0, 0.0)
|
| 204 |
-
bpy.data.lights['Light'].type = 'POINT'
|
| 205 |
-
bpy.data.lights['Light'].energy = 2
|
| 206 |
-
light.data.cycles.cast_shadow = False
|
| 207 |
-
|
| 208 |
-
if 'Sun' not in bpy.data.objects:
|
| 209 |
-
bpy.ops.object.light_add(type='SUN')
|
| 210 |
-
light_sun = bpy.context.active_object
|
| 211 |
-
light_sun.location = (0.0, -3, 0.0)
|
| 212 |
-
light_sun.rotation_euler = (radians(45.0), 0.0, radians(30))
|
| 213 |
-
bpy.data.lights['Sun'].energy = 2
|
| 214 |
-
light_sun.data.cycles.cast_shadow = shadows
|
| 215 |
-
else:
|
| 216 |
-
light_sun = bpy.data.objects['Sun']
|
| 217 |
-
|
| 218 |
-
if shadows:
|
| 219 |
-
# Setup shadow catcher
|
| 220 |
-
bpy.ops.mesh.primitive_plane_add()
|
| 221 |
-
plane = bpy.context.active_object
|
| 222 |
-
plane.scale = (5.0, 5.0, 1)
|
| 223 |
-
|
| 224 |
-
plane.cycles.is_shadow_catcher = True
|
| 225 |
-
|
| 226 |
-
# Exclude plane from diffuse cycles contribution to avoid bright pixel noise in body rendering
|
| 227 |
-
# plane.cycles_visibility.diffuse = False
|
| 228 |
-
|
| 229 |
-
if wireframe:
|
| 230 |
-
# Unmark freestyle edges
|
| 231 |
-
bpy.ops.object.mode_set(mode='EDIT')
|
| 232 |
-
bpy.ops.mesh.mark_freestyle_edge(clear=True)
|
| 233 |
-
bpy.ops.object.mode_set(mode='OBJECT')
|
| 234 |
-
|
| 235 |
-
# Setup freestyle mode for wireframe overlay rendering
|
| 236 |
-
if wireframe:
|
| 237 |
-
scene.render.use_freestyle = True
|
| 238 |
-
scene.render.line_thickness = line_thickness
|
| 239 |
-
bpy.context.view_layer.freestyle_settings.linesets[0].select_edge_mark = True
|
| 240 |
-
|
| 241 |
-
# Disable border edges so that we don't see contour of shadow catcher plane
|
| 242 |
-
bpy.context.view_layer.freestyle_settings.linesets[0].select_border = False
|
| 243 |
-
else:
|
| 244 |
-
scene.render.use_freestyle = False
|
| 245 |
-
|
| 246 |
-
if compositor_background_image:
|
| 247 |
-
# Setup compositing when using background image
|
| 248 |
-
setup_compositing()
|
| 249 |
-
else:
|
| 250 |
-
# Output transparent image when no background is used
|
| 251 |
-
scene.render.image_settings.color_mode = 'RGBA'
|
| 252 |
-
|
| 253 |
-
|
| 254 |
-
##################################################
|
| 255 |
-
|
| 256 |
-
|
| 257 |
-
def setup_compositing():
|
| 258 |
-
|
| 259 |
-
global compositor_image_scale
|
| 260 |
-
global compositor_alpha
|
| 261 |
-
|
| 262 |
-
# Node editor compositing setup
|
| 263 |
-
bpy.context.scene.use_nodes = True
|
| 264 |
-
tree = bpy.context.scene.node_tree
|
| 265 |
-
|
| 266 |
-
# Create input image node
|
| 267 |
-
image_node = tree.nodes.new(type='CompositorNodeImage')
|
| 268 |
-
|
| 269 |
-
scale_node = tree.nodes.new(type='CompositorNodeScale')
|
| 270 |
-
scale_node.inputs[1].default_value = compositor_image_scale
|
| 271 |
-
scale_node.inputs[2].default_value = compositor_image_scale
|
| 272 |
-
|
| 273 |
-
blend_node = tree.nodes.new(type='CompositorNodeAlphaOver')
|
| 274 |
-
blend_node.inputs[0].default_value = compositor_alpha
|
| 275 |
-
|
| 276 |
-
# Link nodes
|
| 277 |
-
links = tree.links
|
| 278 |
-
links.new(image_node.outputs[0], scale_node.inputs[0])
|
| 279 |
-
|
| 280 |
-
links.new(scale_node.outputs[0], blend_node.inputs[1])
|
| 281 |
-
links.new(tree.nodes['Render Layers'].outputs[0], blend_node.inputs[2])
|
| 282 |
-
|
| 283 |
-
links.new(blend_node.outputs[0], tree.nodes['Composite'].inputs[0])
|
| 284 |
-
|
| 285 |
-
|
| 286 |
-
def render_file(input_file, input_dir, output_file, output_dir, yaw, correct):
|
| 287 |
-
'''Render image of given model file'''
|
| 288 |
-
global smooth
|
| 289 |
-
global object_transparent
|
| 290 |
-
global mouth_transparent
|
| 291 |
-
global compositor_background_image
|
| 292 |
-
global quads
|
| 293 |
-
|
| 294 |
-
path = input_dir + input_file
|
| 295 |
-
|
| 296 |
-
# Import object into scene
|
| 297 |
-
bpy.ops.import_scene.obj(filepath=path)
|
| 298 |
-
object = bpy.context.selected_objects[0]
|
| 299 |
-
|
| 300 |
-
object.rotation_euler = (radians(90.0), 0.0, radians(yaw))
|
| 301 |
-
z_bottom = np.min(np.array([vert.co for vert in object.data.vertices])[:, 1])
|
| 302 |
-
# z_top = np.max(np.array([vert.co for vert in object.data.vertices])[:,1])
|
| 303 |
-
# blender_print(radians(90.0), z_bottom, z_top)
|
| 304 |
-
object.location -= mathutils.Vector((0.0, 0.0, z_bottom))
|
| 305 |
-
|
| 306 |
-
if quads:
|
| 307 |
-
bpy.context.view_layer.objects.active = object
|
| 308 |
-
bpy.ops.object.mode_set(mode='EDIT')
|
| 309 |
-
bpy.ops.mesh.tris_convert_to_quads()
|
| 310 |
-
bpy.ops.object.mode_set(mode='OBJECT')
|
| 311 |
-
|
| 312 |
-
if smooth:
|
| 313 |
-
bpy.ops.object.shade_smooth()
|
| 314 |
-
|
| 315 |
-
# Mark freestyle edges
|
| 316 |
-
bpy.context.view_layer.objects.active = object
|
| 317 |
-
bpy.ops.object.mode_set(mode='EDIT')
|
| 318 |
-
bpy.ops.mesh.mark_freestyle_edge(clear=False)
|
| 319 |
-
bpy.ops.object.mode_set(mode='OBJECT')
|
| 320 |
-
|
| 321 |
-
if correct:
|
| 322 |
-
diffuse_color = (18 / 255., 139 / 255., 142 / 255., 1) #correct
|
| 323 |
-
else:
|
| 324 |
-
diffuse_color = (251 / 255., 60 / 255., 60 / 255., 1) #wrong
|
| 325 |
-
|
| 326 |
-
setup_diffuse_transparent_material(object, diffuse_color, object_transparent, mouth_transparent)
|
| 327 |
-
|
| 328 |
-
if compositor_background_image:
|
| 329 |
-
# Set background image
|
| 330 |
-
image_path = input_dir + input_file.replace('.obj', '_original.png')
|
| 331 |
-
bpy.context.scene.node_tree.nodes['Image'].image = bpy.data.images.load(image_path)
|
| 332 |
-
|
| 333 |
-
# Render
|
| 334 |
-
bpy.context.scene.render.filepath = os.path.join(output_dir, output_file)
|
| 335 |
-
|
| 336 |
-
# Silence console output of bpy.ops.render.render by redirecting stdout to file
|
| 337 |
-
# Note: Does not actually write the output to file (Windows 7)
|
| 338 |
-
sys.stdout.flush()
|
| 339 |
-
old = os.dup(1)
|
| 340 |
-
os.close(1)
|
| 341 |
-
os.open('blender_render.log', os.O_WRONLY | os.O_CREAT)
|
| 342 |
-
|
| 343 |
-
# Render
|
| 344 |
-
bpy.ops.render.render(write_still=True)
|
| 345 |
-
|
| 346 |
-
# Remove temporary output redirection
|
| 347 |
-
# sys.stdout.flush()
|
| 348 |
-
# os.close(1)
|
| 349 |
-
# os.dup(old)
|
| 350 |
-
# os.close(old)
|
| 351 |
-
|
| 352 |
-
# Delete last selected object from scene
|
| 353 |
-
object.select_set(True)
|
| 354 |
-
bpy.ops.object.delete()
|
| 355 |
-
|
| 356 |
-
|
| 357 |
-
def process_file(input_file, input_dir, output_file, output_dir, correct=True):
|
| 358 |
-
global views
|
| 359 |
-
global quality_preview
|
| 360 |
-
|
| 361 |
-
if not input_file.endswith('.obj'):
|
| 362 |
-
print('ERROR: Invalid input: ' + input_file)
|
| 363 |
-
return
|
| 364 |
-
|
| 365 |
-
print('Processing: ' + input_file)
|
| 366 |
-
if output_file == '':
|
| 367 |
-
output_file = input_file[:-4]
|
| 368 |
-
|
| 369 |
-
if quality_preview:
|
| 370 |
-
output_file = output_file.replace('.png', '-preview.png')
|
| 371 |
-
|
| 372 |
-
angle = 360.0 / views
|
| 373 |
-
pbar = tqdm(range(0, views))
|
| 374 |
-
for view in pbar:
|
| 375 |
-
pbar.set_description(f"{os.path.basename(output_file)} | View:{str(view)}")
|
| 376 |
-
yaw = view * angle
|
| 377 |
-
output_file_view = f"{output_file}/{view:03d}.png"
|
| 378 |
-
if not os.path.exists(os.path.join(output_dir, output_file_view)):
|
| 379 |
-
render_file(input_file, input_dir, output_file_view, output_dir, yaw, correct)
|
| 380 |
-
|
| 381 |
-
cmd = "ffmpeg -loglevel quiet -r 30 -f lavfi -i color=c=white:s=512x512 -i " + os.path.join(output_dir, output_file, '%3d.png') + \
|
| 382 |
-
" -shortest -filter_complex \"[0:v][1:v]overlay=shortest=1,format=yuv420p[out]\" -map \"[out]\" -y " + output_dir+"/"+output_file+".mp4"
|
| 383 |
-
os.system(cmd)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
lib/common/cloth_extraction.py
CHANGED
|
@@ -1,10 +1,11 @@
|
|
| 1 |
-
import
|
| 2 |
import json
|
| 3 |
import os
|
| 4 |
-
import
|
|
|
|
|
|
|
| 5 |
import trimesh
|
| 6 |
from matplotlib.path import Path
|
| 7 |
-
from collections import Counter
|
| 8 |
from sklearn.neighbors import KNeighborsClassifier
|
| 9 |
|
| 10 |
|
|
@@ -36,13 +37,11 @@ def load_segmentation(path, shape):
|
|
| 36 |
xy = np.vstack((x, y)).T
|
| 37 |
coordinates.append(xy)
|
| 38 |
|
| 39 |
-
segmentations.append(
|
| 40 |
-
|
| 41 |
-
|
| 42 |
-
|
| 43 |
-
|
| 44 |
-
}
|
| 45 |
-
)
|
| 46 |
|
| 47 |
return segmentations
|
| 48 |
|
|
|
|
| 1 |
+
import itertools
|
| 2 |
import json
|
| 3 |
import os
|
| 4 |
+
from collections import Counter
|
| 5 |
+
|
| 6 |
+
import numpy as np
|
| 7 |
import trimesh
|
| 8 |
from matplotlib.path import Path
|
|
|
|
| 9 |
from sklearn.neighbors import KNeighborsClassifier
|
| 10 |
|
| 11 |
|
|
|
|
| 37 |
xy = np.vstack((x, y)).T
|
| 38 |
coordinates.append(xy)
|
| 39 |
|
| 40 |
+
segmentations.append({
|
| 41 |
+
"type": val["category_name"],
|
| 42 |
+
"type_id": val["category_id"],
|
| 43 |
+
"coordinates": coordinates,
|
| 44 |
+
})
|
|
|
|
|
|
|
| 45 |
|
| 46 |
return segmentations
|
| 47 |
|
lib/common/config.py
CHANGED
|
@@ -14,9 +14,10 @@
|
|
| 14 |
#
|
| 15 |
# Contact: ps-license@tuebingen.mpg.de
|
| 16 |
|
| 17 |
-
from yacs.config import CfgNode as CN
|
| 18 |
import os
|
| 19 |
|
|
|
|
|
|
|
| 20 |
_C = CN(new_allowed=True)
|
| 21 |
|
| 22 |
# needed by trainer
|
|
|
|
| 14 |
#
|
| 15 |
# Contact: ps-license@tuebingen.mpg.de
|
| 16 |
|
|
|
|
| 17 |
import os
|
| 18 |
|
| 19 |
+
from yacs.config import CfgNode as CN
|
| 20 |
+
|
| 21 |
_C = CN(new_allowed=True)
|
| 22 |
|
| 23 |
# needed by trainer
|
lib/common/imutils.py
CHANGED
|
@@ -1,17 +1,18 @@
|
|
| 1 |
import os
|
| 2 |
-
|
|
|
|
| 3 |
import cv2
|
| 4 |
import mediapipe as mp
|
| 5 |
-
import torch
|
| 6 |
import numpy as np
|
|
|
|
| 7 |
import torch.nn.functional as F
|
|
|
|
| 8 |
from PIL import Image
|
| 9 |
-
from lib.pymafx.core import constants
|
| 10 |
-
|
| 11 |
from rembg import remove
|
| 12 |
from rembg.session_factory import new_session
|
| 13 |
from torchvision import transforms
|
| 14 |
-
|
|
|
|
| 15 |
|
| 16 |
|
| 17 |
def transform_to_tensor(res, mean=None, std=None, is_tensor=False):
|
|
@@ -40,13 +41,14 @@ def get_affine_matrix_box(boxes, w2, h2):
|
|
| 40 |
# boxes [left, top, right, bottom]
|
| 41 |
width = boxes[:, 2] - boxes[:, 0] #(N,)
|
| 42 |
height = boxes[:, 3] - boxes[:, 1] #(N,)
|
| 43 |
-
center = torch.tensor(
|
| 44 |
-
|
| 45 |
-
).T #(N,2)
|
| 46 |
scale = torch.min(torch.tensor([w2 / width, h2 / height]),
|
| 47 |
dim=0)[0].unsqueeze(1).repeat(1, 2) * 0.9 #(N,2)
|
| 48 |
-
transl = torch.cat([w2 / 2.0 - center[:, 0:1], h2 / 2.0 - center[:, 1:2]], dim=1)
|
| 49 |
-
M = get_affine_matrix2d(transl, center, scale, angle=torch.tensor([
|
|
|
|
|
|
|
| 50 |
|
| 51 |
return M
|
| 52 |
|
|
@@ -54,12 +56,12 @@ def get_affine_matrix_box(boxes, w2, h2):
|
|
| 54 |
def load_img(img_file):
|
| 55 |
|
| 56 |
if img_file.endswith("exr"):
|
| 57 |
-
img = cv2.imread(img_file, cv2.IMREAD_ANYCOLOR | cv2.IMREAD_ANYDEPTH)
|
| 58 |
-
else
|
| 59 |
img = cv2.imread(img_file, cv2.IMREAD_UNCHANGED)
|
| 60 |
|
| 61 |
# considering non 8-bit image
|
| 62 |
-
if img.dtype != np.uint8
|
| 63 |
img = cv2.normalize(img, None, 0, 255, cv2.NORM_MINMAX, dtype=cv2.CV_8U)
|
| 64 |
|
| 65 |
if len(img.shape) == 2:
|
|
@@ -112,8 +114,8 @@ def get_pymafx(image, landmarks):
|
|
| 112 |
# image [3,512,512]
|
| 113 |
|
| 114 |
item = {
|
| 115 |
-
'img_body':
|
| 116 |
-
|
| 117 |
}
|
| 118 |
|
| 119 |
for part in ['lhand', 'rhand', 'face']:
|
|
@@ -211,11 +213,8 @@ def process_image(img_file, hps_type, single, input_res, detector):
|
|
| 211 |
img_pymafx_lst = []
|
| 212 |
|
| 213 |
uncrop_param = {
|
| 214 |
-
"ori_shape": [in_height, in_width],
|
| 215 |
-
"
|
| 216 |
-
"square_shape": [tgt_res, tgt_res],
|
| 217 |
-
"M_square": M_square,
|
| 218 |
-
"M_crop": M_crop
|
| 219 |
}
|
| 220 |
|
| 221 |
for idx in range(len(boxes)):
|
|
@@ -226,11 +225,11 @@ def process_image(img_file, hps_type, single, input_res, detector):
|
|
| 226 |
else:
|
| 227 |
mask_detection = masks[0] * 0.
|
| 228 |
|
| 229 |
-
img_square_rgba = torch.cat(
|
| 230 |
-
|
| 231 |
-
|
| 232 |
-
|
| 233 |
-
|
| 234 |
|
| 235 |
img_crop = warp_affine(
|
| 236 |
img_square_rgba.unsqueeze(0).permute(0, 3, 1, 2),
|
|
|
|
| 1 |
import os
|
| 2 |
+
|
| 3 |
+
os.environ["OPENCV_IO_ENABLE_OPENEXR"] = "1"
|
| 4 |
import cv2
|
| 5 |
import mediapipe as mp
|
|
|
|
| 6 |
import numpy as np
|
| 7 |
+
import torch
|
| 8 |
import torch.nn.functional as F
|
| 9 |
+
from kornia.geometry.transform import get_affine_matrix2d, warp_affine
|
| 10 |
from PIL import Image
|
|
|
|
|
|
|
| 11 |
from rembg import remove
|
| 12 |
from rembg.session_factory import new_session
|
| 13 |
from torchvision import transforms
|
| 14 |
+
|
| 15 |
+
from lib.pymafx.core import constants
|
| 16 |
|
| 17 |
|
| 18 |
def transform_to_tensor(res, mean=None, std=None, is_tensor=False):
|
|
|
|
| 41 |
# boxes [left, top, right, bottom]
|
| 42 |
width = boxes[:, 2] - boxes[:, 0] #(N,)
|
| 43 |
height = boxes[:, 3] - boxes[:, 1] #(N,)
|
| 44 |
+
center = torch.tensor([(boxes[:, 0] + boxes[:, 2]) / 2.0,
|
| 45 |
+
(boxes[:, 1] + boxes[:, 3]) / 2.0]).T #(N,2)
|
|
|
|
| 46 |
scale = torch.min(torch.tensor([w2 / width, h2 / height]),
|
| 47 |
dim=0)[0].unsqueeze(1).repeat(1, 2) * 0.9 #(N,2)
|
| 48 |
+
transl = torch.cat([w2 / 2.0 - center[:, 0:1], h2 / 2.0 - center[:, 1:2]], dim=1) #(N,2)
|
| 49 |
+
M = get_affine_matrix2d(transl, center, scale, angle=torch.tensor([
|
| 50 |
+
0.,
|
| 51 |
+
] * transl.shape[0]))
|
| 52 |
|
| 53 |
return M
|
| 54 |
|
|
|
|
| 56 |
def load_img(img_file):
|
| 57 |
|
| 58 |
if img_file.endswith("exr"):
|
| 59 |
+
img = cv2.imread(img_file, cv2.IMREAD_ANYCOLOR | cv2.IMREAD_ANYDEPTH)
|
| 60 |
+
else:
|
| 61 |
img = cv2.imread(img_file, cv2.IMREAD_UNCHANGED)
|
| 62 |
|
| 63 |
# considering non 8-bit image
|
| 64 |
+
if img.dtype != np.uint8:
|
| 65 |
img = cv2.normalize(img, None, 0, 255, cv2.NORM_MINMAX, dtype=cv2.CV_8U)
|
| 66 |
|
| 67 |
if len(img.shape) == 2:
|
|
|
|
| 114 |
# image [3,512,512]
|
| 115 |
|
| 116 |
item = {
|
| 117 |
+
'img_body': F.interpolate(image.unsqueeze(0), size=224, mode='bicubic',
|
| 118 |
+
align_corners=True)[0]
|
| 119 |
}
|
| 120 |
|
| 121 |
for part in ['lhand', 'rhand', 'face']:
|
|
|
|
| 213 |
img_pymafx_lst = []
|
| 214 |
|
| 215 |
uncrop_param = {
|
| 216 |
+
"ori_shape": [in_height, in_width], "box_shape": [input_res, input_res], "square_shape":
|
| 217 |
+
[tgt_res, tgt_res], "M_square": M_square, "M_crop": M_crop
|
|
|
|
|
|
|
|
|
|
| 218 |
}
|
| 219 |
|
| 220 |
for idx in range(len(boxes)):
|
|
|
|
| 225 |
else:
|
| 226 |
mask_detection = masks[0] * 0.
|
| 227 |
|
| 228 |
+
img_square_rgba = torch.cat([
|
| 229 |
+
img_square.squeeze(0).permute(1, 2, 0),
|
| 230 |
+
torch.tensor(mask_detection < 0.4) * 255
|
| 231 |
+
],
|
| 232 |
+
dim=2)
|
| 233 |
|
| 234 |
img_crop = warp_affine(
|
| 235 |
img_square_rgba.unsqueeze(0).permute(0, 3, 1, 2),
|
lib/common/libmesh/inside_mesh.py
CHANGED
|
@@ -1,4 +1,5 @@
|
|
| 1 |
import numpy as np
|
|
|
|
| 2 |
from .triangle_hash import TriangleHash as _TriangleHash
|
| 3 |
|
| 4 |
|
|
@@ -147,8 +148,6 @@ class TriangleIntersector2d:
|
|
| 147 |
v = (-A[:, 1, 0] * y[:, 0] + A[:, 0, 0] * y[:, 1]) * s_detA
|
| 148 |
|
| 149 |
sum_uv = u + v
|
| 150 |
-
contains[mask] = (
|
| 151 |
-
|
| 152 |
-
(sum_uv < abs_detA)
|
| 153 |
-
)
|
| 154 |
return contains
|
|
|
|
| 1 |
import numpy as np
|
| 2 |
+
|
| 3 |
from .triangle_hash import TriangleHash as _TriangleHash
|
| 4 |
|
| 5 |
|
|
|
|
| 148 |
v = (-A[:, 1, 0] * y[:, 0] + A[:, 0, 0] * y[:, 1]) * s_detA
|
| 149 |
|
| 150 |
sum_uv = u + v
|
| 151 |
+
contains[mask] = ((0 < u) & (u < abs_detA) & (0 < v) & (v < abs_detA) & (0 < sum_uv) &
|
| 152 |
+
(sum_uv < abs_detA))
|
|
|
|
|
|
|
| 153 |
return contains
|
lib/common/libmesh/setup.py
CHANGED
|
@@ -1,5 +1,5 @@
|
|
|
|
|
| 1 |
from setuptools import setup
|
| 2 |
from Cython.Build import cythonize
|
| 3 |
-
import numpy
|
| 4 |
|
| 5 |
setup(name='libmesh', ext_modules=cythonize("*.pyx"), include_dirs=[numpy.get_include()])
|
|
|
|
| 1 |
+
import numpy
|
| 2 |
from setuptools import setup
|
| 3 |
from Cython.Build import cythonize
|
|
|
|
| 4 |
|
| 5 |
setup(name='libmesh', ext_modules=cythonize("*.pyx"), include_dirs=[numpy.get_include()])
|
lib/common/libmesh/triangle_hash.cpp
CHANGED
|
@@ -720,12 +720,12 @@ static CYTHON_INLINE float __PYX_NAN() {
|
|
| 720 |
|
| 721 |
/* NumPy API declarations from "numpy/__init__.pxd" */
|
| 722 |
|
|
|
|
| 723 |
#include "ios"
|
| 724 |
#include "new"
|
| 725 |
#include "stdexcept"
|
| 726 |
#include "typeinfo"
|
| 727 |
#include <vector>
|
| 728 |
-
#include <math.h>
|
| 729 |
#include "pythread.h"
|
| 730 |
#include <stdlib.h>
|
| 731 |
#include "pystate.h"
|
|
@@ -1330,8 +1330,8 @@ typedef npy_clongdouble __pyx_t_5numpy_clongdouble_t;
|
|
| 1330 |
*/
|
| 1331 |
typedef npy_cdouble __pyx_t_5numpy_complex_t;
|
| 1332 |
|
| 1333 |
-
/* "triangle_hash.pyx":
|
| 1334 |
-
*
|
| 1335 |
*
|
| 1336 |
* cdef class TriangleHash: # <<<<<<<<<<<<<<
|
| 1337 |
* cdef vector[vector[int]] spatial_hash
|
|
@@ -1423,8 +1423,8 @@ struct __pyx_memoryviewslice_obj {
|
|
| 1423 |
|
| 1424 |
|
| 1425 |
|
| 1426 |
-
/* "triangle_hash.pyx":
|
| 1427 |
-
*
|
| 1428 |
*
|
| 1429 |
* cdef class TriangleHash: # <<<<<<<<<<<<<<
|
| 1430 |
* cdef vector[vector[int]] spatial_hash
|
|
@@ -2279,6 +2279,10 @@ static PyObject *__pyx_memoryview_assign_item_from_object(struct __pyx_memoryvie
|
|
| 2279 |
static PyObject *__pyx_memoryviewslice_convert_item_to_object(struct __pyx_memoryviewslice_obj *__pyx_v_self, char *__pyx_v_itemp); /* proto*/
|
| 2280 |
static PyObject *__pyx_memoryviewslice_assign_item_from_object(struct __pyx_memoryviewslice_obj *__pyx_v_self, char *__pyx_v_itemp, PyObject *__pyx_v_value); /* proto*/
|
| 2281 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 2282 |
/* Module declarations from 'cpython.buffer' */
|
| 2283 |
|
| 2284 |
/* Module declarations from 'libc.string' */
|
|
@@ -2317,14 +2321,10 @@ static PyTypeObject *__pyx_ptype_5numpy_flexible = 0;
|
|
| 2317 |
static PyTypeObject *__pyx_ptype_5numpy_character = 0;
|
| 2318 |
static PyTypeObject *__pyx_ptype_5numpy_ufunc = 0;
|
| 2319 |
|
| 2320 |
-
/* Module declarations from '
|
| 2321 |
-
|
| 2322 |
-
/* Module declarations from 'cython' */
|
| 2323 |
|
| 2324 |
/* Module declarations from 'libcpp.vector' */
|
| 2325 |
|
| 2326 |
-
/* Module declarations from 'libc.math' */
|
| 2327 |
-
|
| 2328 |
/* Module declarations from 'triangle_hash' */
|
| 2329 |
static PyTypeObject *__pyx_ptype_13triangle_hash_TriangleHash = 0;
|
| 2330 |
static PyTypeObject *__pyx_array_type = 0;
|
|
@@ -2667,7 +2667,7 @@ static PyObject *__pyx_tuple__28;
|
|
| 2667 |
static PyObject *__pyx_codeobj__29;
|
| 2668 |
/* Late includes */
|
| 2669 |
|
| 2670 |
-
/* "triangle_hash.pyx":
|
| 2671 |
* cdef int resolution
|
| 2672 |
*
|
| 2673 |
* def __cinit__(self, double[:, :, :] triangles, int resolution): # <<<<<<<<<<<<<<
|
|
@@ -2709,11 +2709,11 @@ static int __pyx_pw_13triangle_hash_12TriangleHash_1__cinit__(PyObject *__pyx_v_
|
|
| 2709 |
case 1:
|
| 2710 |
if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_resolution)) != 0)) kw_args--;
|
| 2711 |
else {
|
| 2712 |
-
__Pyx_RaiseArgtupleInvalid("__cinit__", 1, 2, 2, 1); __PYX_ERR(0,
|
| 2713 |
}
|
| 2714 |
}
|
| 2715 |
if (unlikely(kw_args > 0)) {
|
| 2716 |
-
if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__cinit__") < 0)) __PYX_ERR(0,
|
| 2717 |
}
|
| 2718 |
} else if (PyTuple_GET_SIZE(__pyx_args) != 2) {
|
| 2719 |
goto __pyx_L5_argtuple_error;
|
|
@@ -2721,12 +2721,12 @@ static int __pyx_pw_13triangle_hash_12TriangleHash_1__cinit__(PyObject *__pyx_v_
|
|
| 2721 |
values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
|
| 2722 |
values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
|
| 2723 |
}
|
| 2724 |
-
__pyx_v_triangles = __Pyx_PyObject_to_MemoryviewSlice_dsdsds_double(values[0], PyBUF_WRITABLE); if (unlikely(!__pyx_v_triangles.memview)) __PYX_ERR(0,
|
| 2725 |
-
__pyx_v_resolution = __Pyx_PyInt_As_int(values[1]); if (unlikely((__pyx_v_resolution == (int)-1) && PyErr_Occurred())) __PYX_ERR(0,
|
| 2726 |
}
|
| 2727 |
goto __pyx_L4_argument_unpacking_done;
|
| 2728 |
__pyx_L5_argtuple_error:;
|
| 2729 |
-
__Pyx_RaiseArgtupleInvalid("__cinit__", 1, 2, 2, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0,
|
| 2730 |
__pyx_L3_error:;
|
| 2731 |
__Pyx_AddTraceback("triangle_hash.TriangleHash.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename);
|
| 2732 |
__Pyx_RefNannyFinishContext();
|
|
@@ -2747,7 +2747,7 @@ static int __pyx_pf_13triangle_hash_12TriangleHash___cinit__(struct __pyx_obj_13
|
|
| 2747 |
int __pyx_clineno = 0;
|
| 2748 |
__Pyx_RefNannySetupContext("__cinit__", 0);
|
| 2749 |
|
| 2750 |
-
/* "triangle_hash.pyx":
|
| 2751 |
*
|
| 2752 |
* def __cinit__(self, double[:, :, :] triangles, int resolution):
|
| 2753 |
* self.spatial_hash.resize(resolution * resolution) # <<<<<<<<<<<<<<
|
|
@@ -2758,10 +2758,10 @@ static int __pyx_pf_13triangle_hash_12TriangleHash___cinit__(struct __pyx_obj_13
|
|
| 2758 |
__pyx_v_self->spatial_hash.resize((__pyx_v_resolution * __pyx_v_resolution));
|
| 2759 |
} catch(...) {
|
| 2760 |
__Pyx_CppExn2PyErr();
|
| 2761 |
-
__PYX_ERR(0,
|
| 2762 |
}
|
| 2763 |
|
| 2764 |
-
/* "triangle_hash.pyx":
|
| 2765 |
* def __cinit__(self, double[:, :, :] triangles, int resolution):
|
| 2766 |
* self.spatial_hash.resize(resolution * resolution)
|
| 2767 |
* self.resolution = resolution # <<<<<<<<<<<<<<
|
|
@@ -2770,7 +2770,7 @@ static int __pyx_pf_13triangle_hash_12TriangleHash___cinit__(struct __pyx_obj_13
|
|
| 2770 |
*/
|
| 2771 |
__pyx_v_self->resolution = __pyx_v_resolution;
|
| 2772 |
|
| 2773 |
-
/* "triangle_hash.pyx":
|
| 2774 |
* self.spatial_hash.resize(resolution * resolution)
|
| 2775 |
* self.resolution = resolution
|
| 2776 |
* self._build_hash(triangles) # <<<<<<<<<<<<<<
|
|
@@ -2779,7 +2779,7 @@ static int __pyx_pf_13triangle_hash_12TriangleHash___cinit__(struct __pyx_obj_13
|
|
| 2779 |
*/
|
| 2780 |
(void)(((struct __pyx_vtabstruct_13triangle_hash_TriangleHash *)__pyx_v_self->__pyx_vtab)->_build_hash(__pyx_v_self, __pyx_v_triangles));
|
| 2781 |
|
| 2782 |
-
/* "triangle_hash.pyx":
|
| 2783 |
* cdef int resolution
|
| 2784 |
*
|
| 2785 |
* def __cinit__(self, double[:, :, :] triangles, int resolution): # <<<<<<<<<<<<<<
|
|
@@ -2799,7 +2799,7 @@ static int __pyx_pf_13triangle_hash_12TriangleHash___cinit__(struct __pyx_obj_13
|
|
| 2799 |
return __pyx_r;
|
| 2800 |
}
|
| 2801 |
|
| 2802 |
-
/* "triangle_hash.pyx":
|
| 2803 |
* @cython.boundscheck(False) # Deactivate bounds checking
|
| 2804 |
* @cython.wraparound(False) # Deactivate negative indexing.
|
| 2805 |
* cdef int _build_hash(self, double[:, :, :] triangles): # <<<<<<<<<<<<<<
|
|
@@ -2839,7 +2839,7 @@ static int __pyx_f_13triangle_hash_12TriangleHash__build_hash(struct __pyx_obj_1
|
|
| 2839 |
int __pyx_clineno = 0;
|
| 2840 |
__Pyx_RefNannySetupContext("_build_hash", 0);
|
| 2841 |
|
| 2842 |
-
/* "triangle_hash.pyx":
|
| 2843 |
* @cython.wraparound(False) # Deactivate negative indexing.
|
| 2844 |
* cdef int _build_hash(self, double[:, :, :] triangles):
|
| 2845 |
* assert(triangles.shape[1] == 3) # <<<<<<<<<<<<<<
|
|
@@ -2850,12 +2850,12 @@ static int __pyx_f_13triangle_hash_12TriangleHash__build_hash(struct __pyx_obj_1
|
|
| 2850 |
if (unlikely(!Py_OptimizeFlag)) {
|
| 2851 |
if (unlikely(!(((__pyx_v_triangles.shape[1]) == 3) != 0))) {
|
| 2852 |
PyErr_SetNone(PyExc_AssertionError);
|
| 2853 |
-
__PYX_ERR(0,
|
| 2854 |
}
|
| 2855 |
}
|
| 2856 |
#endif
|
| 2857 |
|
| 2858 |
-
/* "triangle_hash.pyx":
|
| 2859 |
* cdef int _build_hash(self, double[:, :, :] triangles):
|
| 2860 |
* assert(triangles.shape[1] == 3)
|
| 2861 |
* assert(triangles.shape[2] == 2) # <<<<<<<<<<<<<<
|
|
@@ -2866,12 +2866,12 @@ static int __pyx_f_13triangle_hash_12TriangleHash__build_hash(struct __pyx_obj_1
|
|
| 2866 |
if (unlikely(!Py_OptimizeFlag)) {
|
| 2867 |
if (unlikely(!(((__pyx_v_triangles.shape[2]) == 2) != 0))) {
|
| 2868 |
PyErr_SetNone(PyExc_AssertionError);
|
| 2869 |
-
__PYX_ERR(0,
|
| 2870 |
}
|
| 2871 |
}
|
| 2872 |
#endif
|
| 2873 |
|
| 2874 |
-
/* "triangle_hash.pyx":
|
| 2875 |
* assert(triangles.shape[2] == 2)
|
| 2876 |
*
|
| 2877 |
* cdef int n_tri = triangles.shape[0] # <<<<<<<<<<<<<<
|
|
@@ -2880,7 +2880,7 @@ static int __pyx_f_13triangle_hash_12TriangleHash__build_hash(struct __pyx_obj_1
|
|
| 2880 |
*/
|
| 2881 |
__pyx_v_n_tri = (__pyx_v_triangles.shape[0]);
|
| 2882 |
|
| 2883 |
-
/* "triangle_hash.pyx":
|
| 2884 |
* cdef int spatial_idx
|
| 2885 |
*
|
| 2886 |
* for i_tri in range(n_tri): # <<<<<<<<<<<<<<
|
|
@@ -2892,7 +2892,7 @@ static int __pyx_f_13triangle_hash_12TriangleHash__build_hash(struct __pyx_obj_1
|
|
| 2892 |
for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_2; __pyx_t_3+=1) {
|
| 2893 |
__pyx_v_i_tri = __pyx_t_3;
|
| 2894 |
|
| 2895 |
-
/* "triangle_hash.pyx":
|
| 2896 |
* for i_tri in range(n_tri):
|
| 2897 |
* # Compute bounding box
|
| 2898 |
* for j in range(2): # <<<<<<<<<<<<<<
|
|
@@ -2902,7 +2902,7 @@ static int __pyx_f_13triangle_hash_12TriangleHash__build_hash(struct __pyx_obj_1
|
|
| 2902 |
for (__pyx_t_4 = 0; __pyx_t_4 < 2; __pyx_t_4+=1) {
|
| 2903 |
__pyx_v_j = __pyx_t_4;
|
| 2904 |
|
| 2905 |
-
/* "triangle_hash.pyx":
|
| 2906 |
* for j in range(2):
|
| 2907 |
* bbox_min[j] = <int> min(
|
| 2908 |
* triangles[i_tri, 0, j], triangles[i_tri, 1, j], triangles[i_tri, 2, j] # <<<<<<<<<<<<<<
|
|
@@ -2933,7 +2933,7 @@ static int __pyx_f_13triangle_hash_12TriangleHash__build_hash(struct __pyx_obj_1
|
|
| 2933 |
__pyx_t_11 = __pyx_t_10;
|
| 2934 |
}
|
| 2935 |
|
| 2936 |
-
/* "triangle_hash.pyx":
|
| 2937 |
* # Compute bounding box
|
| 2938 |
* for j in range(2):
|
| 2939 |
* bbox_min[j] = <int> min( # <<<<<<<<<<<<<<
|
|
@@ -2942,7 +2942,7 @@ static int __pyx_f_13triangle_hash_12TriangleHash__build_hash(struct __pyx_obj_1
|
|
| 2942 |
*/
|
| 2943 |
(__pyx_v_bbox_min[__pyx_v_j]) = ((int)__pyx_t_11);
|
| 2944 |
|
| 2945 |
-
/* "triangle_hash.pyx":
|
| 2946 |
* )
|
| 2947 |
* bbox_max[j] = <int> max(
|
| 2948 |
* triangles[i_tri, 0, j], triangles[i_tri, 1, j], triangles[i_tri, 2, j] # <<<<<<<<<<<<<<
|
|
@@ -2973,7 +2973,7 @@ static int __pyx_f_13triangle_hash_12TriangleHash__build_hash(struct __pyx_obj_1
|
|
| 2973 |
__pyx_t_10 = __pyx_t_9;
|
| 2974 |
}
|
| 2975 |
|
| 2976 |
-
/* "triangle_hash.pyx":
|
| 2977 |
* triangles[i_tri, 0, j], triangles[i_tri, 1, j], triangles[i_tri, 2, j]
|
| 2978 |
* )
|
| 2979 |
* bbox_max[j] = <int> max( # <<<<<<<<<<<<<<
|
|
@@ -2982,7 +2982,7 @@ static int __pyx_f_13triangle_hash_12TriangleHash__build_hash(struct __pyx_obj_1
|
|
| 2982 |
*/
|
| 2983 |
(__pyx_v_bbox_max[__pyx_v_j]) = ((int)__pyx_t_10);
|
| 2984 |
|
| 2985 |
-
/* "triangle_hash.pyx":
|
| 2986 |
* triangles[i_tri, 0, j], triangles[i_tri, 1, j], triangles[i_tri, 2, j]
|
| 2987 |
* )
|
| 2988 |
* bbox_min[j] = min(max(bbox_min[j], 0), self.resolution - 1) # <<<<<<<<<<<<<<
|
|
@@ -3005,7 +3005,7 @@ static int __pyx_f_13triangle_hash_12TriangleHash__build_hash(struct __pyx_obj_1
|
|
| 3005 |
}
|
| 3006 |
(__pyx_v_bbox_min[__pyx_v_j]) = __pyx_t_15;
|
| 3007 |
|
| 3008 |
-
/* "triangle_hash.pyx":
|
| 3009 |
* )
|
| 3010 |
* bbox_min[j] = min(max(bbox_min[j], 0), self.resolution - 1)
|
| 3011 |
* bbox_max[j] = min(max(bbox_max[j], 0), self.resolution - 1) # <<<<<<<<<<<<<<
|
|
@@ -3029,7 +3029,7 @@ static int __pyx_f_13triangle_hash_12TriangleHash__build_hash(struct __pyx_obj_1
|
|
| 3029 |
(__pyx_v_bbox_max[__pyx_v_j]) = __pyx_t_13;
|
| 3030 |
}
|
| 3031 |
|
| 3032 |
-
/* "triangle_hash.pyx":
|
| 3033 |
*
|
| 3034 |
* # Find all voxels where bounding box intersects
|
| 3035 |
* for x in range(bbox_min[0], bbox_max[0] + 1): # <<<<<<<<<<<<<<
|
|
@@ -3041,7 +3041,7 @@ static int __pyx_f_13triangle_hash_12TriangleHash__build_hash(struct __pyx_obj_1
|
|
| 3041 |
for (__pyx_t_4 = (__pyx_v_bbox_min[0]); __pyx_t_4 < __pyx_t_15; __pyx_t_4+=1) {
|
| 3042 |
__pyx_v_x = __pyx_t_4;
|
| 3043 |
|
| 3044 |
-
/* "triangle_hash.pyx":
|
| 3045 |
* # Find all voxels where bounding box intersects
|
| 3046 |
* for x in range(bbox_min[0], bbox_max[0] + 1):
|
| 3047 |
* for y in range(bbox_min[1], bbox_max[1] + 1): # <<<<<<<<<<<<<<
|
|
@@ -3053,7 +3053,7 @@ static int __pyx_f_13triangle_hash_12TriangleHash__build_hash(struct __pyx_obj_1
|
|
| 3053 |
for (__pyx_t_14 = (__pyx_v_bbox_min[1]); __pyx_t_14 < __pyx_t_16; __pyx_t_14+=1) {
|
| 3054 |
__pyx_v_y = __pyx_t_14;
|
| 3055 |
|
| 3056 |
-
/* "triangle_hash.pyx":
|
| 3057 |
* for x in range(bbox_min[0], bbox_max[0] + 1):
|
| 3058 |
* for y in range(bbox_min[1], bbox_max[1] + 1):
|
| 3059 |
* spatial_idx = self.resolution * x + y # <<<<<<<<<<<<<<
|
|
@@ -3062,7 +3062,7 @@ static int __pyx_f_13triangle_hash_12TriangleHash__build_hash(struct __pyx_obj_1
|
|
| 3062 |
*/
|
| 3063 |
__pyx_v_spatial_idx = ((__pyx_v_self->resolution * __pyx_v_x) + __pyx_v_y);
|
| 3064 |
|
| 3065 |
-
/* "triangle_hash.pyx":
|
| 3066 |
* for y in range(bbox_min[1], bbox_max[1] + 1):
|
| 3067 |
* spatial_idx = self.resolution * x + y
|
| 3068 |
* self.spatial_hash[spatial_idx].push_back(i_tri) # <<<<<<<<<<<<<<
|
|
@@ -3073,13 +3073,13 @@ static int __pyx_f_13triangle_hash_12TriangleHash__build_hash(struct __pyx_obj_1
|
|
| 3073 |
(__pyx_v_self->spatial_hash[__pyx_v_spatial_idx]).push_back(__pyx_v_i_tri);
|
| 3074 |
} catch(...) {
|
| 3075 |
__Pyx_CppExn2PyErr();
|
| 3076 |
-
__PYX_ERR(0,
|
| 3077 |
}
|
| 3078 |
}
|
| 3079 |
}
|
| 3080 |
}
|
| 3081 |
|
| 3082 |
-
/* "triangle_hash.pyx":
|
| 3083 |
* @cython.boundscheck(False) # Deactivate bounds checking
|
| 3084 |
* @cython.wraparound(False) # Deactivate negative indexing.
|
| 3085 |
* cdef int _build_hash(self, double[:, :, :] triangles): # <<<<<<<<<<<<<<
|
|
@@ -3098,7 +3098,7 @@ static int __pyx_f_13triangle_hash_12TriangleHash__build_hash(struct __pyx_obj_1
|
|
| 3098 |
return __pyx_r;
|
| 3099 |
}
|
| 3100 |
|
| 3101 |
-
/* "triangle_hash.pyx":
|
| 3102 |
* @cython.boundscheck(False) # Deactivate bounds checking
|
| 3103 |
* @cython.wraparound(False) # Deactivate negative indexing.
|
| 3104 |
* cpdef query(self, double[:, :] points): # <<<<<<<<<<<<<<
|
|
@@ -3155,12 +3155,12 @@ static PyObject *__pyx_f_13triangle_hash_12TriangleHash_query(struct __pyx_obj_1
|
|
| 3155 |
if (unlikely(!__Pyx_object_dict_version_matches(((PyObject *)__pyx_v_self), __pyx_tp_dict_version, __pyx_obj_dict_version))) {
|
| 3156 |
PY_UINT64_T __pyx_type_dict_guard = __Pyx_get_tp_dict_version(((PyObject *)__pyx_v_self));
|
| 3157 |
#endif
|
| 3158 |
-
__pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_query); if (unlikely(!__pyx_t_1)) __PYX_ERR(0,
|
| 3159 |
__Pyx_GOTREF(__pyx_t_1);
|
| 3160 |
if (!PyCFunction_Check(__pyx_t_1) || (PyCFunction_GET_FUNCTION(__pyx_t_1) != (PyCFunction)(void*)__pyx_pw_13triangle_hash_12TriangleHash_3query)) {
|
| 3161 |
__Pyx_XDECREF(__pyx_r);
|
| 3162 |
-
if (unlikely(!__pyx_v_points.memview)) { __Pyx_RaiseUnboundLocalError("points"); __PYX_ERR(0,
|
| 3163 |
-
__pyx_t_3 = __pyx_memoryview_fromslice(__pyx_v_points, 2, (PyObject *(*)(char *)) __pyx_memview_get_double, (int (*)(char *, PyObject *)) __pyx_memview_set_double, 0);; if (unlikely(!__pyx_t_3)) __PYX_ERR(0,
|
| 3164 |
__Pyx_GOTREF(__pyx_t_3);
|
| 3165 |
__Pyx_INCREF(__pyx_t_1);
|
| 3166 |
__pyx_t_4 = __pyx_t_1; __pyx_t_5 = NULL;
|
|
@@ -3176,7 +3176,7 @@ static PyObject *__pyx_f_13triangle_hash_12TriangleHash_query(struct __pyx_obj_1
|
|
| 3176 |
__pyx_t_2 = (__pyx_t_5) ? __Pyx_PyObject_Call2Args(__pyx_t_4, __pyx_t_5, __pyx_t_3) : __Pyx_PyObject_CallOneArg(__pyx_t_4, __pyx_t_3);
|
| 3177 |
__Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0;
|
| 3178 |
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
|
| 3179 |
-
if (unlikely(!__pyx_t_2)) __PYX_ERR(0,
|
| 3180 |
__Pyx_GOTREF(__pyx_t_2);
|
| 3181 |
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
|
| 3182 |
__pyx_r = __pyx_t_2;
|
|
@@ -3197,7 +3197,7 @@ static PyObject *__pyx_f_13triangle_hash_12TriangleHash_query(struct __pyx_obj_1
|
|
| 3197 |
#endif
|
| 3198 |
}
|
| 3199 |
|
| 3200 |
-
/* "triangle_hash.pyx":
|
| 3201 |
* @cython.wraparound(False) # Deactivate negative indexing.
|
| 3202 |
* cpdef query(self, double[:, :] points):
|
| 3203 |
* assert(points.shape[1] == 2) # <<<<<<<<<<<<<<
|
|
@@ -3208,12 +3208,12 @@ static PyObject *__pyx_f_13triangle_hash_12TriangleHash_query(struct __pyx_obj_1
|
|
| 3208 |
if (unlikely(!Py_OptimizeFlag)) {
|
| 3209 |
if (unlikely(!(((__pyx_v_points.shape[1]) == 2) != 0))) {
|
| 3210 |
PyErr_SetNone(PyExc_AssertionError);
|
| 3211 |
-
__PYX_ERR(0,
|
| 3212 |
}
|
| 3213 |
}
|
| 3214 |
#endif
|
| 3215 |
|
| 3216 |
-
/* "triangle_hash.pyx":
|
| 3217 |
* cpdef query(self, double[:, :] points):
|
| 3218 |
* assert(points.shape[1] == 2)
|
| 3219 |
* cdef int n_points = points.shape[0] # <<<<<<<<<<<<<<
|
|
@@ -3222,7 +3222,7 @@ static PyObject *__pyx_f_13triangle_hash_12TriangleHash_query(struct __pyx_obj_1
|
|
| 3222 |
*/
|
| 3223 |
__pyx_v_n_points = (__pyx_v_points.shape[0]);
|
| 3224 |
|
| 3225 |
-
/* "triangle_hash.pyx":
|
| 3226 |
* cdef int spatial_idx
|
| 3227 |
*
|
| 3228 |
* for i_point in range(n_points): # <<<<<<<<<<<<<<
|
|
@@ -3234,7 +3234,7 @@ static PyObject *__pyx_f_13triangle_hash_12TriangleHash_query(struct __pyx_obj_1
|
|
| 3234 |
for (__pyx_t_8 = 0; __pyx_t_8 < __pyx_t_7; __pyx_t_8+=1) {
|
| 3235 |
__pyx_v_i_point = __pyx_t_8;
|
| 3236 |
|
| 3237 |
-
/* "triangle_hash.pyx":
|
| 3238 |
*
|
| 3239 |
* for i_point in range(n_points):
|
| 3240 |
* x = int(points[i_point, 0]) # <<<<<<<<<<<<<<
|
|
@@ -3245,7 +3245,7 @@ static PyObject *__pyx_f_13triangle_hash_12TriangleHash_query(struct __pyx_obj_1
|
|
| 3245 |
__pyx_t_10 = 0;
|
| 3246 |
__pyx_v_x = ((int)(*((double *) ( /* dim=1 */ (( /* dim=0 */ (__pyx_v_points.data + __pyx_t_9 * __pyx_v_points.strides[0]) ) + __pyx_t_10 * __pyx_v_points.strides[1]) ))));
|
| 3247 |
|
| 3248 |
-
/* "triangle_hash.pyx":
|
| 3249 |
* for i_point in range(n_points):
|
| 3250 |
* x = int(points[i_point, 0])
|
| 3251 |
* y = int(points[i_point, 1]) # <<<<<<<<<<<<<<
|
|
@@ -3256,7 +3256,7 @@ static PyObject *__pyx_f_13triangle_hash_12TriangleHash_query(struct __pyx_obj_1
|
|
| 3256 |
__pyx_t_9 = 1;
|
| 3257 |
__pyx_v_y = ((int)(*((double *) ( /* dim=1 */ (( /* dim=0 */ (__pyx_v_points.data + __pyx_t_10 * __pyx_v_points.strides[0]) ) + __pyx_t_9 * __pyx_v_points.strides[1]) ))));
|
| 3258 |
|
| 3259 |
-
/* "triangle_hash.pyx":
|
| 3260 |
* x = int(points[i_point, 0])
|
| 3261 |
* y = int(points[i_point, 1])
|
| 3262 |
* if not (0 <= x < self.resolution and 0 <= y < self.resolution): # <<<<<<<<<<<<<<
|
|
@@ -3283,7 +3283,7 @@ static PyObject *__pyx_f_13triangle_hash_12TriangleHash_query(struct __pyx_obj_1
|
|
| 3283 |
__pyx_t_12 = ((!__pyx_t_11) != 0);
|
| 3284 |
if (__pyx_t_12) {
|
| 3285 |
|
| 3286 |
-
/* "triangle_hash.pyx":
|
| 3287 |
* y = int(points[i_point, 1])
|
| 3288 |
* if not (0 <= x < self.resolution and 0 <= y < self.resolution):
|
| 3289 |
* continue # <<<<<<<<<<<<<<
|
|
@@ -3292,7 +3292,7 @@ static PyObject *__pyx_f_13triangle_hash_12TriangleHash_query(struct __pyx_obj_1
|
|
| 3292 |
*/
|
| 3293 |
goto __pyx_L3_continue;
|
| 3294 |
|
| 3295 |
-
/* "triangle_hash.pyx":
|
| 3296 |
* x = int(points[i_point, 0])
|
| 3297 |
* y = int(points[i_point, 1])
|
| 3298 |
* if not (0 <= x < self.resolution and 0 <= y < self.resolution): # <<<<<<<<<<<<<<
|
|
@@ -3301,7 +3301,7 @@ static PyObject *__pyx_f_13triangle_hash_12TriangleHash_query(struct __pyx_obj_1
|
|
| 3301 |
*/
|
| 3302 |
}
|
| 3303 |
|
| 3304 |
-
/* "triangle_hash.pyx":
|
| 3305 |
* continue
|
| 3306 |
*
|
| 3307 |
* spatial_idx = self.resolution * x + y # <<<<<<<<<<<<<<
|
|
@@ -3310,7 +3310,7 @@ static PyObject *__pyx_f_13triangle_hash_12TriangleHash_query(struct __pyx_obj_1
|
|
| 3310 |
*/
|
| 3311 |
__pyx_v_spatial_idx = ((__pyx_v_self->resolution * __pyx_v_x) + __pyx_v_y);
|
| 3312 |
|
| 3313 |
-
/* "triangle_hash.pyx":
|
| 3314 |
*
|
| 3315 |
* spatial_idx = self.resolution * x + y
|
| 3316 |
* for i_tri in self.spatial_hash[spatial_idx]: # <<<<<<<<<<<<<<
|
|
@@ -3325,7 +3325,7 @@ static PyObject *__pyx_f_13triangle_hash_12TriangleHash_query(struct __pyx_obj_1
|
|
| 3325 |
++__pyx_t_14;
|
| 3326 |
__pyx_v_i_tri = __pyx_t_16;
|
| 3327 |
|
| 3328 |
-
/* "triangle_hash.pyx":
|
| 3329 |
* spatial_idx = self.resolution * x + y
|
| 3330 |
* for i_tri in self.spatial_hash[spatial_idx]:
|
| 3331 |
* points_indices.push_back(i_point) # <<<<<<<<<<<<<<
|
|
@@ -3336,10 +3336,10 @@ static PyObject *__pyx_f_13triangle_hash_12TriangleHash_query(struct __pyx_obj_1
|
|
| 3336 |
__pyx_v_points_indices.push_back(__pyx_v_i_point);
|
| 3337 |
} catch(...) {
|
| 3338 |
__Pyx_CppExn2PyErr();
|
| 3339 |
-
__PYX_ERR(0,
|
| 3340 |
}
|
| 3341 |
|
| 3342 |
-
/* "triangle_hash.pyx":
|
| 3343 |
* for i_tri in self.spatial_hash[spatial_idx]:
|
| 3344 |
* points_indices.push_back(i_point)
|
| 3345 |
* tri_indices.push_back(i_tri) # <<<<<<<<<<<<<<
|
|
@@ -3350,10 +3350,10 @@ static PyObject *__pyx_f_13triangle_hash_12TriangleHash_query(struct __pyx_obj_1
|
|
| 3350 |
__pyx_v_tri_indices.push_back(__pyx_v_i_tri);
|
| 3351 |
} catch(...) {
|
| 3352 |
__Pyx_CppExn2PyErr();
|
| 3353 |
-
__PYX_ERR(0,
|
| 3354 |
}
|
| 3355 |
|
| 3356 |
-
/* "triangle_hash.pyx":
|
| 3357 |
*
|
| 3358 |
* spatial_idx = self.resolution * x + y
|
| 3359 |
* for i_tri in self.spatial_hash[spatial_idx]: # <<<<<<<<<<<<<<
|
|
@@ -3364,35 +3364,35 @@ static PyObject *__pyx_f_13triangle_hash_12TriangleHash_query(struct __pyx_obj_1
|
|
| 3364 |
__pyx_L3_continue:;
|
| 3365 |
}
|
| 3366 |
|
| 3367 |
-
/* "triangle_hash.pyx":
|
| 3368 |
* tri_indices.push_back(i_tri)
|
| 3369 |
*
|
| 3370 |
* points_indices_np = np.zeros(points_indices.size(), dtype=np.int32) # <<<<<<<<<<<<<<
|
| 3371 |
* tri_indices_np = np.zeros(tri_indices.size(), dtype=np.int32)
|
| 3372 |
*
|
| 3373 |
*/
|
| 3374 |
-
__Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_np); if (unlikely(!__pyx_t_1)) __PYX_ERR(0,
|
| 3375 |
__Pyx_GOTREF(__pyx_t_1);
|
| 3376 |
-
__pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_zeros); if (unlikely(!__pyx_t_2)) __PYX_ERR(0,
|
| 3377 |
__Pyx_GOTREF(__pyx_t_2);
|
| 3378 |
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
|
| 3379 |
-
__pyx_t_1 = __Pyx_PyInt_FromSize_t(__pyx_v_points_indices.size()); if (unlikely(!__pyx_t_1)) __PYX_ERR(0,
|
| 3380 |
__Pyx_GOTREF(__pyx_t_1);
|
| 3381 |
-
__pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0,
|
| 3382 |
__Pyx_GOTREF(__pyx_t_4);
|
| 3383 |
__Pyx_GIVEREF(__pyx_t_1);
|
| 3384 |
PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_1);
|
| 3385 |
__pyx_t_1 = 0;
|
| 3386 |
-
__pyx_t_1 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0,
|
| 3387 |
__Pyx_GOTREF(__pyx_t_1);
|
| 3388 |
-
__Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_n_s_np); if (unlikely(!__pyx_t_3)) __PYX_ERR(0,
|
| 3389 |
__Pyx_GOTREF(__pyx_t_3);
|
| 3390 |
-
__pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s_int32); if (unlikely(!__pyx_t_5)) __PYX_ERR(0,
|
| 3391 |
__Pyx_GOTREF(__pyx_t_5);
|
| 3392 |
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
|
| 3393 |
-
if (PyDict_SetItem(__pyx_t_1, __pyx_n_s_dtype, __pyx_t_5) < 0) __PYX_ERR(0,
|
| 3394 |
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
|
| 3395 |
-
__pyx_t_5 = __Pyx_PyObject_Call(__pyx_t_2, __pyx_t_4, __pyx_t_1); if (unlikely(!__pyx_t_5)) __PYX_ERR(0,
|
| 3396 |
__Pyx_GOTREF(__pyx_t_5);
|
| 3397 |
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
|
| 3398 |
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
|
|
@@ -3400,35 +3400,35 @@ static PyObject *__pyx_f_13triangle_hash_12TriangleHash_query(struct __pyx_obj_1
|
|
| 3400 |
__pyx_v_points_indices_np = __pyx_t_5;
|
| 3401 |
__pyx_t_5 = 0;
|
| 3402 |
|
| 3403 |
-
/* "triangle_hash.pyx":
|
| 3404 |
*
|
| 3405 |
* points_indices_np = np.zeros(points_indices.size(), dtype=np.int32)
|
| 3406 |
* tri_indices_np = np.zeros(tri_indices.size(), dtype=np.int32) # <<<<<<<<<<<<<<
|
| 3407 |
*
|
| 3408 |
* cdef int[:] points_indices_view = points_indices_np
|
| 3409 |
*/
|
| 3410 |
-
__Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_n_s_np); if (unlikely(!__pyx_t_5)) __PYX_ERR(0,
|
| 3411 |
__Pyx_GOTREF(__pyx_t_5);
|
| 3412 |
-
__pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_5, __pyx_n_s_zeros); if (unlikely(!__pyx_t_1)) __PYX_ERR(0,
|
| 3413 |
__Pyx_GOTREF(__pyx_t_1);
|
| 3414 |
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
|
| 3415 |
-
__pyx_t_5 = __Pyx_PyInt_FromSize_t(__pyx_v_tri_indices.size()); if (unlikely(!__pyx_t_5)) __PYX_ERR(0,
|
| 3416 |
__Pyx_GOTREF(__pyx_t_5);
|
| 3417 |
-
__pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0,
|
| 3418 |
__Pyx_GOTREF(__pyx_t_4);
|
| 3419 |
__Pyx_GIVEREF(__pyx_t_5);
|
| 3420 |
PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_5);
|
| 3421 |
__pyx_t_5 = 0;
|
| 3422 |
-
__pyx_t_5 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_5)) __PYX_ERR(0,
|
| 3423 |
__Pyx_GOTREF(__pyx_t_5);
|
| 3424 |
-
__Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_n_s_np); if (unlikely(!__pyx_t_2)) __PYX_ERR(0,
|
| 3425 |
__Pyx_GOTREF(__pyx_t_2);
|
| 3426 |
-
__pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_int32); if (unlikely(!__pyx_t_3)) __PYX_ERR(0,
|
| 3427 |
__Pyx_GOTREF(__pyx_t_3);
|
| 3428 |
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
|
| 3429 |
-
if (PyDict_SetItem(__pyx_t_5, __pyx_n_s_dtype, __pyx_t_3) < 0) __PYX_ERR(0,
|
| 3430 |
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
|
| 3431 |
-
__pyx_t_3 = __Pyx_PyObject_Call(__pyx_t_1, __pyx_t_4, __pyx_t_5); if (unlikely(!__pyx_t_3)) __PYX_ERR(0,
|
| 3432 |
__Pyx_GOTREF(__pyx_t_3);
|
| 3433 |
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
|
| 3434 |
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
|
|
@@ -3436,31 +3436,31 @@ static PyObject *__pyx_f_13triangle_hash_12TriangleHash_query(struct __pyx_obj_1
|
|
| 3436 |
__pyx_v_tri_indices_np = __pyx_t_3;
|
| 3437 |
__pyx_t_3 = 0;
|
| 3438 |
|
| 3439 |
-
/* "triangle_hash.pyx":
|
| 3440 |
* tri_indices_np = np.zeros(tri_indices.size(), dtype=np.int32)
|
| 3441 |
*
|
| 3442 |
* cdef int[:] points_indices_view = points_indices_np # <<<<<<<<<<<<<<
|
| 3443 |
* cdef int[:] tri_indices_view = tri_indices_np
|
| 3444 |
*
|
| 3445 |
*/
|
| 3446 |
-
__pyx_t_17 = __Pyx_PyObject_to_MemoryviewSlice_ds_int(__pyx_v_points_indices_np, PyBUF_WRITABLE); if (unlikely(!__pyx_t_17.memview)) __PYX_ERR(0,
|
| 3447 |
__pyx_v_points_indices_view = __pyx_t_17;
|
| 3448 |
__pyx_t_17.memview = NULL;
|
| 3449 |
__pyx_t_17.data = NULL;
|
| 3450 |
|
| 3451 |
-
/* "triangle_hash.pyx":
|
| 3452 |
*
|
| 3453 |
* cdef int[:] points_indices_view = points_indices_np
|
| 3454 |
* cdef int[:] tri_indices_view = tri_indices_np # <<<<<<<<<<<<<<
|
| 3455 |
*
|
| 3456 |
* for k in range(points_indices.size()):
|
| 3457 |
*/
|
| 3458 |
-
__pyx_t_17 = __Pyx_PyObject_to_MemoryviewSlice_ds_int(__pyx_v_tri_indices_np, PyBUF_WRITABLE); if (unlikely(!__pyx_t_17.memview)) __PYX_ERR(0,
|
| 3459 |
__pyx_v_tri_indices_view = __pyx_t_17;
|
| 3460 |
__pyx_t_17.memview = NULL;
|
| 3461 |
__pyx_t_17.data = NULL;
|
| 3462 |
|
| 3463 |
-
/* "triangle_hash.pyx":
|
| 3464 |
* cdef int[:] tri_indices_view = tri_indices_np
|
| 3465 |
*
|
| 3466 |
* for k in range(points_indices.size()): # <<<<<<<<<<<<<<
|
|
@@ -3472,7 +3472,7 @@ static PyObject *__pyx_f_13triangle_hash_12TriangleHash_query(struct __pyx_obj_1
|
|
| 3472 |
for (__pyx_t_6 = 0; __pyx_t_6 < __pyx_t_19; __pyx_t_6+=1) {
|
| 3473 |
__pyx_v_k = __pyx_t_6;
|
| 3474 |
|
| 3475 |
-
/* "triangle_hash.pyx":
|
| 3476 |
*
|
| 3477 |
* for k in range(points_indices.size()):
|
| 3478 |
* points_indices_view[k] = points_indices[k] # <<<<<<<<<<<<<<
|
|
@@ -3483,7 +3483,7 @@ static PyObject *__pyx_f_13triangle_hash_12TriangleHash_query(struct __pyx_obj_1
|
|
| 3483 |
*((int *) ( /* dim=0 */ (__pyx_v_points_indices_view.data + __pyx_t_9 * __pyx_v_points_indices_view.strides[0]) )) = (__pyx_v_points_indices[__pyx_v_k]);
|
| 3484 |
}
|
| 3485 |
|
| 3486 |
-
/* "triangle_hash.pyx":
|
| 3487 |
* points_indices_view[k] = points_indices[k]
|
| 3488 |
*
|
| 3489 |
* for k in range(tri_indices.size()): # <<<<<<<<<<<<<<
|
|
@@ -3495,7 +3495,7 @@ static PyObject *__pyx_f_13triangle_hash_12TriangleHash_query(struct __pyx_obj_1
|
|
| 3495 |
for (__pyx_t_6 = 0; __pyx_t_6 < __pyx_t_19; __pyx_t_6+=1) {
|
| 3496 |
__pyx_v_k = __pyx_t_6;
|
| 3497 |
|
| 3498 |
-
/* "triangle_hash.pyx":
|
| 3499 |
*
|
| 3500 |
* for k in range(tri_indices.size()):
|
| 3501 |
* tri_indices_view[k] = tri_indices[k] # <<<<<<<<<<<<<<
|
|
@@ -3506,13 +3506,13 @@ static PyObject *__pyx_f_13triangle_hash_12TriangleHash_query(struct __pyx_obj_1
|
|
| 3506 |
*((int *) ( /* dim=0 */ (__pyx_v_tri_indices_view.data + __pyx_t_9 * __pyx_v_tri_indices_view.strides[0]) )) = (__pyx_v_tri_indices[__pyx_v_k]);
|
| 3507 |
}
|
| 3508 |
|
| 3509 |
-
/* "triangle_hash.pyx":
|
| 3510 |
* tri_indices_view[k] = tri_indices[k]
|
| 3511 |
*
|
| 3512 |
* return points_indices_np, tri_indices_np # <<<<<<<<<<<<<<
|
| 3513 |
*/
|
| 3514 |
__Pyx_XDECREF(__pyx_r);
|
| 3515 |
-
__pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) __PYX_ERR(0,
|
| 3516 |
__Pyx_GOTREF(__pyx_t_3);
|
| 3517 |
__Pyx_INCREF(__pyx_v_points_indices_np);
|
| 3518 |
__Pyx_GIVEREF(__pyx_v_points_indices_np);
|
|
@@ -3524,7 +3524,7 @@ static PyObject *__pyx_f_13triangle_hash_12TriangleHash_query(struct __pyx_obj_1
|
|
| 3524 |
__pyx_t_3 = 0;
|
| 3525 |
goto __pyx_L0;
|
| 3526 |
|
| 3527 |
-
/* "triangle_hash.pyx":
|
| 3528 |
* @cython.boundscheck(False) # Deactivate bounds checking
|
| 3529 |
* @cython.wraparound(False) # Deactivate negative indexing.
|
| 3530 |
* cpdef query(self, double[:, :] points): # <<<<<<<<<<<<<<
|
|
@@ -3563,7 +3563,7 @@ static PyObject *__pyx_pw_13triangle_hash_12TriangleHash_3query(PyObject *__pyx_
|
|
| 3563 |
__Pyx_RefNannyDeclarations
|
| 3564 |
__Pyx_RefNannySetupContext("query (wrapper)", 0);
|
| 3565 |
assert(__pyx_arg_points); {
|
| 3566 |
-
__pyx_v_points = __Pyx_PyObject_to_MemoryviewSlice_dsds_double(__pyx_arg_points, PyBUF_WRITABLE); if (unlikely(!__pyx_v_points.memview)) __PYX_ERR(0,
|
| 3567 |
}
|
| 3568 |
goto __pyx_L4_argument_unpacking_done;
|
| 3569 |
__pyx_L3_error:;
|
|
@@ -3587,8 +3587,8 @@ static PyObject *__pyx_pf_13triangle_hash_12TriangleHash_2query(struct __pyx_obj
|
|
| 3587 |
int __pyx_clineno = 0;
|
| 3588 |
__Pyx_RefNannySetupContext("query", 0);
|
| 3589 |
__Pyx_XDECREF(__pyx_r);
|
| 3590 |
-
if (unlikely(!__pyx_v_points.memview)) { __Pyx_RaiseUnboundLocalError("points"); __PYX_ERR(0,
|
| 3591 |
-
__pyx_t_1 = __pyx_f_13triangle_hash_12TriangleHash_query(__pyx_v_self, __pyx_v_points, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0,
|
| 3592 |
__Pyx_GOTREF(__pyx_t_1);
|
| 3593 |
__pyx_r = __pyx_t_1;
|
| 3594 |
__pyx_t_1 = 0;
|
|
@@ -18738,7 +18738,7 @@ static __Pyx_StringTabEntry __pyx_string_tab[] = {
|
|
| 18738 |
{0, 0, 0, 0, 0, 0, 0}
|
| 18739 |
};
|
| 18740 |
static CYTHON_SMALL_CODE int __Pyx_InitCachedBuiltins(void) {
|
| 18741 |
-
__pyx_builtin_range = __Pyx_GetBuiltinName(__pyx_n_s_range); if (!__pyx_builtin_range) __PYX_ERR(0,
|
| 18742 |
__pyx_builtin_TypeError = __Pyx_GetBuiltinName(__pyx_n_s_TypeError); if (!__pyx_builtin_TypeError) __PYX_ERR(1, 2, __pyx_L1_error)
|
| 18743 |
__pyx_builtin_ImportError = __Pyx_GetBuiltinName(__pyx_n_s_ImportError); if (!__pyx_builtin_ImportError) __PYX_ERR(2, 945, __pyx_L1_error)
|
| 18744 |
__pyx_builtin_ValueError = __Pyx_GetBuiltinName(__pyx_n_s_ValueError); if (!__pyx_builtin_ValueError) __PYX_ERR(1, 133, __pyx_L1_error)
|
|
@@ -19118,16 +19118,16 @@ static int __Pyx_modinit_type_init_code(void) {
|
|
| 19118 |
__pyx_vtabptr_13triangle_hash_TriangleHash = &__pyx_vtable_13triangle_hash_TriangleHash;
|
| 19119 |
__pyx_vtable_13triangle_hash_TriangleHash._build_hash = (int (*)(struct __pyx_obj_13triangle_hash_TriangleHash *, __Pyx_memviewslice))__pyx_f_13triangle_hash_12TriangleHash__build_hash;
|
| 19120 |
__pyx_vtable_13triangle_hash_TriangleHash.query = (PyObject *(*)(struct __pyx_obj_13triangle_hash_TriangleHash *, __Pyx_memviewslice, int __pyx_skip_dispatch))__pyx_f_13triangle_hash_12TriangleHash_query;
|
| 19121 |
-
if (PyType_Ready(&__pyx_type_13triangle_hash_TriangleHash) < 0) __PYX_ERR(0,
|
| 19122 |
#if PY_VERSION_HEX < 0x030800B1
|
| 19123 |
__pyx_type_13triangle_hash_TriangleHash.tp_print = 0;
|
| 19124 |
#endif
|
| 19125 |
if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_type_13triangle_hash_TriangleHash.tp_dictoffset && __pyx_type_13triangle_hash_TriangleHash.tp_getattro == PyObject_GenericGetAttr)) {
|
| 19126 |
__pyx_type_13triangle_hash_TriangleHash.tp_getattro = __Pyx_PyObject_GenericGetAttr;
|
| 19127 |
}
|
| 19128 |
-
if (__Pyx_SetVtable(__pyx_type_13triangle_hash_TriangleHash.tp_dict, __pyx_vtabptr_13triangle_hash_TriangleHash) < 0) __PYX_ERR(0,
|
| 19129 |
-
if (PyObject_SetAttr(__pyx_m, __pyx_n_s_TriangleHash, (PyObject *)&__pyx_type_13triangle_hash_TriangleHash) < 0) __PYX_ERR(0,
|
| 19130 |
-
if (__Pyx_setup_reduce((PyObject*)&__pyx_type_13triangle_hash_TriangleHash) < 0) __PYX_ERR(0,
|
| 19131 |
__pyx_ptype_13triangle_hash_TriangleHash = &__pyx_type_13triangle_hash_TriangleHash;
|
| 19132 |
__pyx_vtabptr_array = &__pyx_vtable_array;
|
| 19133 |
__pyx_vtable_array.get_memview = (PyObject *(*)(struct __pyx_array_obj *))__pyx_array_get_memview;
|
|
@@ -19468,7 +19468,7 @@ if (!__Pyx_RefNanny) {
|
|
| 19468 |
*
|
| 19469 |
* # distutils: language=c++
|
| 19470 |
* import numpy as np # <<<<<<<<<<<<<<
|
| 19471 |
-
*
|
| 19472 |
* cimport cython
|
| 19473 |
*/
|
| 19474 |
__pyx_t_1 = __Pyx_Import(__pyx_n_s_numpy, 0, -1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3, __pyx_L1_error)
|
|
@@ -19480,7 +19480,7 @@ if (!__Pyx_RefNanny) {
|
|
| 19480 |
*
|
| 19481 |
* # distutils: language=c++ # <<<<<<<<<<<<<<
|
| 19482 |
* import numpy as np
|
| 19483 |
-
*
|
| 19484 |
*/
|
| 19485 |
__pyx_t_1 = __Pyx_PyDict_NewPresized(0); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2, __pyx_L1_error)
|
| 19486 |
__Pyx_GOTREF(__pyx_t_1);
|
|
|
|
| 720 |
|
| 721 |
/* NumPy API declarations from "numpy/__init__.pxd" */
|
| 722 |
|
| 723 |
+
#include <math.h>
|
| 724 |
#include "ios"
|
| 725 |
#include "new"
|
| 726 |
#include "stdexcept"
|
| 727 |
#include "typeinfo"
|
| 728 |
#include <vector>
|
|
|
|
| 729 |
#include "pythread.h"
|
| 730 |
#include <stdlib.h>
|
| 731 |
#include "pystate.h"
|
|
|
|
| 1330 |
*/
|
| 1331 |
typedef npy_cdouble __pyx_t_5numpy_complex_t;
|
| 1332 |
|
| 1333 |
+
/* "triangle_hash.pyx":11
|
| 1334 |
+
*
|
| 1335 |
*
|
| 1336 |
* cdef class TriangleHash: # <<<<<<<<<<<<<<
|
| 1337 |
* cdef vector[vector[int]] spatial_hash
|
|
|
|
| 1423 |
|
| 1424 |
|
| 1425 |
|
| 1426 |
+
/* "triangle_hash.pyx":11
|
| 1427 |
+
*
|
| 1428 |
*
|
| 1429 |
* cdef class TriangleHash: # <<<<<<<<<<<<<<
|
| 1430 |
* cdef vector[vector[int]] spatial_hash
|
|
|
|
| 2279 |
static PyObject *__pyx_memoryviewslice_convert_item_to_object(struct __pyx_memoryviewslice_obj *__pyx_v_self, char *__pyx_v_itemp); /* proto*/
|
| 2280 |
static PyObject *__pyx_memoryviewslice_assign_item_from_object(struct __pyx_memoryviewslice_obj *__pyx_v_self, char *__pyx_v_itemp, PyObject *__pyx_v_value); /* proto*/
|
| 2281 |
|
| 2282 |
+
/* Module declarations from 'cython.view' */
|
| 2283 |
+
|
| 2284 |
+
/* Module declarations from 'cython' */
|
| 2285 |
+
|
| 2286 |
/* Module declarations from 'cpython.buffer' */
|
| 2287 |
|
| 2288 |
/* Module declarations from 'libc.string' */
|
|
|
|
| 2321 |
static PyTypeObject *__pyx_ptype_5numpy_character = 0;
|
| 2322 |
static PyTypeObject *__pyx_ptype_5numpy_ufunc = 0;
|
| 2323 |
|
| 2324 |
+
/* Module declarations from 'libc.math' */
|
|
|
|
|
|
|
| 2325 |
|
| 2326 |
/* Module declarations from 'libcpp.vector' */
|
| 2327 |
|
|
|
|
|
|
|
| 2328 |
/* Module declarations from 'triangle_hash' */
|
| 2329 |
static PyTypeObject *__pyx_ptype_13triangle_hash_TriangleHash = 0;
|
| 2330 |
static PyTypeObject *__pyx_array_type = 0;
|
|
|
|
| 2667 |
static PyObject *__pyx_codeobj__29;
|
| 2668 |
/* Late includes */
|
| 2669 |
|
| 2670 |
+
/* "triangle_hash.pyx":15
|
| 2671 |
* cdef int resolution
|
| 2672 |
*
|
| 2673 |
* def __cinit__(self, double[:, :, :] triangles, int resolution): # <<<<<<<<<<<<<<
|
|
|
|
| 2709 |
case 1:
|
| 2710 |
if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_resolution)) != 0)) kw_args--;
|
| 2711 |
else {
|
| 2712 |
+
__Pyx_RaiseArgtupleInvalid("__cinit__", 1, 2, 2, 1); __PYX_ERR(0, 15, __pyx_L3_error)
|
| 2713 |
}
|
| 2714 |
}
|
| 2715 |
if (unlikely(kw_args > 0)) {
|
| 2716 |
+
if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__cinit__") < 0)) __PYX_ERR(0, 15, __pyx_L3_error)
|
| 2717 |
}
|
| 2718 |
} else if (PyTuple_GET_SIZE(__pyx_args) != 2) {
|
| 2719 |
goto __pyx_L5_argtuple_error;
|
|
|
|
| 2721 |
values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
|
| 2722 |
values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
|
| 2723 |
}
|
| 2724 |
+
__pyx_v_triangles = __Pyx_PyObject_to_MemoryviewSlice_dsdsds_double(values[0], PyBUF_WRITABLE); if (unlikely(!__pyx_v_triangles.memview)) __PYX_ERR(0, 15, __pyx_L3_error)
|
| 2725 |
+
__pyx_v_resolution = __Pyx_PyInt_As_int(values[1]); if (unlikely((__pyx_v_resolution == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 15, __pyx_L3_error)
|
| 2726 |
}
|
| 2727 |
goto __pyx_L4_argument_unpacking_done;
|
| 2728 |
__pyx_L5_argtuple_error:;
|
| 2729 |
+
__Pyx_RaiseArgtupleInvalid("__cinit__", 1, 2, 2, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 15, __pyx_L3_error)
|
| 2730 |
__pyx_L3_error:;
|
| 2731 |
__Pyx_AddTraceback("triangle_hash.TriangleHash.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename);
|
| 2732 |
__Pyx_RefNannyFinishContext();
|
|
|
|
| 2747 |
int __pyx_clineno = 0;
|
| 2748 |
__Pyx_RefNannySetupContext("__cinit__", 0);
|
| 2749 |
|
| 2750 |
+
/* "triangle_hash.pyx":16
|
| 2751 |
*
|
| 2752 |
* def __cinit__(self, double[:, :, :] triangles, int resolution):
|
| 2753 |
* self.spatial_hash.resize(resolution * resolution) # <<<<<<<<<<<<<<
|
|
|
|
| 2758 |
__pyx_v_self->spatial_hash.resize((__pyx_v_resolution * __pyx_v_resolution));
|
| 2759 |
} catch(...) {
|
| 2760 |
__Pyx_CppExn2PyErr();
|
| 2761 |
+
__PYX_ERR(0, 16, __pyx_L1_error)
|
| 2762 |
}
|
| 2763 |
|
| 2764 |
+
/* "triangle_hash.pyx":17
|
| 2765 |
* def __cinit__(self, double[:, :, :] triangles, int resolution):
|
| 2766 |
* self.spatial_hash.resize(resolution * resolution)
|
| 2767 |
* self.resolution = resolution # <<<<<<<<<<<<<<
|
|
|
|
| 2770 |
*/
|
| 2771 |
__pyx_v_self->resolution = __pyx_v_resolution;
|
| 2772 |
|
| 2773 |
+
/* "triangle_hash.pyx":18
|
| 2774 |
* self.spatial_hash.resize(resolution * resolution)
|
| 2775 |
* self.resolution = resolution
|
| 2776 |
* self._build_hash(triangles) # <<<<<<<<<<<<<<
|
|
|
|
| 2779 |
*/
|
| 2780 |
(void)(((struct __pyx_vtabstruct_13triangle_hash_TriangleHash *)__pyx_v_self->__pyx_vtab)->_build_hash(__pyx_v_self, __pyx_v_triangles));
|
| 2781 |
|
| 2782 |
+
/* "triangle_hash.pyx":15
|
| 2783 |
* cdef int resolution
|
| 2784 |
*
|
| 2785 |
* def __cinit__(self, double[:, :, :] triangles, int resolution): # <<<<<<<<<<<<<<
|
|
|
|
| 2799 |
return __pyx_r;
|
| 2800 |
}
|
| 2801 |
|
| 2802 |
+
/* "triangle_hash.pyx":22
|
| 2803 |
* @cython.boundscheck(False) # Deactivate bounds checking
|
| 2804 |
* @cython.wraparound(False) # Deactivate negative indexing.
|
| 2805 |
* cdef int _build_hash(self, double[:, :, :] triangles): # <<<<<<<<<<<<<<
|
|
|
|
| 2839 |
int __pyx_clineno = 0;
|
| 2840 |
__Pyx_RefNannySetupContext("_build_hash", 0);
|
| 2841 |
|
| 2842 |
+
/* "triangle_hash.pyx":23
|
| 2843 |
* @cython.wraparound(False) # Deactivate negative indexing.
|
| 2844 |
* cdef int _build_hash(self, double[:, :, :] triangles):
|
| 2845 |
* assert(triangles.shape[1] == 3) # <<<<<<<<<<<<<<
|
|
|
|
| 2850 |
if (unlikely(!Py_OptimizeFlag)) {
|
| 2851 |
if (unlikely(!(((__pyx_v_triangles.shape[1]) == 3) != 0))) {
|
| 2852 |
PyErr_SetNone(PyExc_AssertionError);
|
| 2853 |
+
__PYX_ERR(0, 23, __pyx_L1_error)
|
| 2854 |
}
|
| 2855 |
}
|
| 2856 |
#endif
|
| 2857 |
|
| 2858 |
+
/* "triangle_hash.pyx":24
|
| 2859 |
* cdef int _build_hash(self, double[:, :, :] triangles):
|
| 2860 |
* assert(triangles.shape[1] == 3)
|
| 2861 |
* assert(triangles.shape[2] == 2) # <<<<<<<<<<<<<<
|
|
|
|
| 2866 |
if (unlikely(!Py_OptimizeFlag)) {
|
| 2867 |
if (unlikely(!(((__pyx_v_triangles.shape[2]) == 2) != 0))) {
|
| 2868 |
PyErr_SetNone(PyExc_AssertionError);
|
| 2869 |
+
__PYX_ERR(0, 24, __pyx_L1_error)
|
| 2870 |
}
|
| 2871 |
}
|
| 2872 |
#endif
|
| 2873 |
|
| 2874 |
+
/* "triangle_hash.pyx":26
|
| 2875 |
* assert(triangles.shape[2] == 2)
|
| 2876 |
*
|
| 2877 |
* cdef int n_tri = triangles.shape[0] # <<<<<<<<<<<<<<
|
|
|
|
| 2880 |
*/
|
| 2881 |
__pyx_v_n_tri = (__pyx_v_triangles.shape[0]);
|
| 2882 |
|
| 2883 |
+
/* "triangle_hash.pyx":33
|
| 2884 |
* cdef int spatial_idx
|
| 2885 |
*
|
| 2886 |
* for i_tri in range(n_tri): # <<<<<<<<<<<<<<
|
|
|
|
| 2892 |
for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_2; __pyx_t_3+=1) {
|
| 2893 |
__pyx_v_i_tri = __pyx_t_3;
|
| 2894 |
|
| 2895 |
+
/* "triangle_hash.pyx":35
|
| 2896 |
* for i_tri in range(n_tri):
|
| 2897 |
* # Compute bounding box
|
| 2898 |
* for j in range(2): # <<<<<<<<<<<<<<
|
|
|
|
| 2902 |
for (__pyx_t_4 = 0; __pyx_t_4 < 2; __pyx_t_4+=1) {
|
| 2903 |
__pyx_v_j = __pyx_t_4;
|
| 2904 |
|
| 2905 |
+
/* "triangle_hash.pyx":37
|
| 2906 |
* for j in range(2):
|
| 2907 |
* bbox_min[j] = <int> min(
|
| 2908 |
* triangles[i_tri, 0, j], triangles[i_tri, 1, j], triangles[i_tri, 2, j] # <<<<<<<<<<<<<<
|
|
|
|
| 2933 |
__pyx_t_11 = __pyx_t_10;
|
| 2934 |
}
|
| 2935 |
|
| 2936 |
+
/* "triangle_hash.pyx":36
|
| 2937 |
* # Compute bounding box
|
| 2938 |
* for j in range(2):
|
| 2939 |
* bbox_min[j] = <int> min( # <<<<<<<<<<<<<<
|
|
|
|
| 2942 |
*/
|
| 2943 |
(__pyx_v_bbox_min[__pyx_v_j]) = ((int)__pyx_t_11);
|
| 2944 |
|
| 2945 |
+
/* "triangle_hash.pyx":40
|
| 2946 |
* )
|
| 2947 |
* bbox_max[j] = <int> max(
|
| 2948 |
* triangles[i_tri, 0, j], triangles[i_tri, 1, j], triangles[i_tri, 2, j] # <<<<<<<<<<<<<<
|
|
|
|
| 2973 |
__pyx_t_10 = __pyx_t_9;
|
| 2974 |
}
|
| 2975 |
|
| 2976 |
+
/* "triangle_hash.pyx":39
|
| 2977 |
* triangles[i_tri, 0, j], triangles[i_tri, 1, j], triangles[i_tri, 2, j]
|
| 2978 |
* )
|
| 2979 |
* bbox_max[j] = <int> max( # <<<<<<<<<<<<<<
|
|
|
|
| 2982 |
*/
|
| 2983 |
(__pyx_v_bbox_max[__pyx_v_j]) = ((int)__pyx_t_10);
|
| 2984 |
|
| 2985 |
+
/* "triangle_hash.pyx":42
|
| 2986 |
* triangles[i_tri, 0, j], triangles[i_tri, 1, j], triangles[i_tri, 2, j]
|
| 2987 |
* )
|
| 2988 |
* bbox_min[j] = min(max(bbox_min[j], 0), self.resolution - 1) # <<<<<<<<<<<<<<
|
|
|
|
| 3005 |
}
|
| 3006 |
(__pyx_v_bbox_min[__pyx_v_j]) = __pyx_t_15;
|
| 3007 |
|
| 3008 |
+
/* "triangle_hash.pyx":43
|
| 3009 |
* )
|
| 3010 |
* bbox_min[j] = min(max(bbox_min[j], 0), self.resolution - 1)
|
| 3011 |
* bbox_max[j] = min(max(bbox_max[j], 0), self.resolution - 1) # <<<<<<<<<<<<<<
|
|
|
|
| 3029 |
(__pyx_v_bbox_max[__pyx_v_j]) = __pyx_t_13;
|
| 3030 |
}
|
| 3031 |
|
| 3032 |
+
/* "triangle_hash.pyx":46
|
| 3033 |
*
|
| 3034 |
* # Find all voxels where bounding box intersects
|
| 3035 |
* for x in range(bbox_min[0], bbox_max[0] + 1): # <<<<<<<<<<<<<<
|
|
|
|
| 3041 |
for (__pyx_t_4 = (__pyx_v_bbox_min[0]); __pyx_t_4 < __pyx_t_15; __pyx_t_4+=1) {
|
| 3042 |
__pyx_v_x = __pyx_t_4;
|
| 3043 |
|
| 3044 |
+
/* "triangle_hash.pyx":47
|
| 3045 |
* # Find all voxels where bounding box intersects
|
| 3046 |
* for x in range(bbox_min[0], bbox_max[0] + 1):
|
| 3047 |
* for y in range(bbox_min[1], bbox_max[1] + 1): # <<<<<<<<<<<<<<
|
|
|
|
| 3053 |
for (__pyx_t_14 = (__pyx_v_bbox_min[1]); __pyx_t_14 < __pyx_t_16; __pyx_t_14+=1) {
|
| 3054 |
__pyx_v_y = __pyx_t_14;
|
| 3055 |
|
| 3056 |
+
/* "triangle_hash.pyx":48
|
| 3057 |
* for x in range(bbox_min[0], bbox_max[0] + 1):
|
| 3058 |
* for y in range(bbox_min[1], bbox_max[1] + 1):
|
| 3059 |
* spatial_idx = self.resolution * x + y # <<<<<<<<<<<<<<
|
|
|
|
| 3062 |
*/
|
| 3063 |
__pyx_v_spatial_idx = ((__pyx_v_self->resolution * __pyx_v_x) + __pyx_v_y);
|
| 3064 |
|
| 3065 |
+
/* "triangle_hash.pyx":49
|
| 3066 |
* for y in range(bbox_min[1], bbox_max[1] + 1):
|
| 3067 |
* spatial_idx = self.resolution * x + y
|
| 3068 |
* self.spatial_hash[spatial_idx].push_back(i_tri) # <<<<<<<<<<<<<<
|
|
|
|
| 3073 |
(__pyx_v_self->spatial_hash[__pyx_v_spatial_idx]).push_back(__pyx_v_i_tri);
|
| 3074 |
} catch(...) {
|
| 3075 |
__Pyx_CppExn2PyErr();
|
| 3076 |
+
__PYX_ERR(0, 49, __pyx_L1_error)
|
| 3077 |
}
|
| 3078 |
}
|
| 3079 |
}
|
| 3080 |
}
|
| 3081 |
|
| 3082 |
+
/* "triangle_hash.pyx":22
|
| 3083 |
* @cython.boundscheck(False) # Deactivate bounds checking
|
| 3084 |
* @cython.wraparound(False) # Deactivate negative indexing.
|
| 3085 |
* cdef int _build_hash(self, double[:, :, :] triangles): # <<<<<<<<<<<<<<
|
|
|
|
| 3098 |
return __pyx_r;
|
| 3099 |
}
|
| 3100 |
|
| 3101 |
+
/* "triangle_hash.pyx":53
|
| 3102 |
* @cython.boundscheck(False) # Deactivate bounds checking
|
| 3103 |
* @cython.wraparound(False) # Deactivate negative indexing.
|
| 3104 |
* cpdef query(self, double[:, :] points): # <<<<<<<<<<<<<<
|
|
|
|
| 3155 |
if (unlikely(!__Pyx_object_dict_version_matches(((PyObject *)__pyx_v_self), __pyx_tp_dict_version, __pyx_obj_dict_version))) {
|
| 3156 |
PY_UINT64_T __pyx_type_dict_guard = __Pyx_get_tp_dict_version(((PyObject *)__pyx_v_self));
|
| 3157 |
#endif
|
| 3158 |
+
__pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_query); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 53, __pyx_L1_error)
|
| 3159 |
__Pyx_GOTREF(__pyx_t_1);
|
| 3160 |
if (!PyCFunction_Check(__pyx_t_1) || (PyCFunction_GET_FUNCTION(__pyx_t_1) != (PyCFunction)(void*)__pyx_pw_13triangle_hash_12TriangleHash_3query)) {
|
| 3161 |
__Pyx_XDECREF(__pyx_r);
|
| 3162 |
+
if (unlikely(!__pyx_v_points.memview)) { __Pyx_RaiseUnboundLocalError("points"); __PYX_ERR(0, 53, __pyx_L1_error) }
|
| 3163 |
+
__pyx_t_3 = __pyx_memoryview_fromslice(__pyx_v_points, 2, (PyObject *(*)(char *)) __pyx_memview_get_double, (int (*)(char *, PyObject *)) __pyx_memview_set_double, 0);; if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 53, __pyx_L1_error)
|
| 3164 |
__Pyx_GOTREF(__pyx_t_3);
|
| 3165 |
__Pyx_INCREF(__pyx_t_1);
|
| 3166 |
__pyx_t_4 = __pyx_t_1; __pyx_t_5 = NULL;
|
|
|
|
| 3176 |
__pyx_t_2 = (__pyx_t_5) ? __Pyx_PyObject_Call2Args(__pyx_t_4, __pyx_t_5, __pyx_t_3) : __Pyx_PyObject_CallOneArg(__pyx_t_4, __pyx_t_3);
|
| 3177 |
__Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0;
|
| 3178 |
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
|
| 3179 |
+
if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 53, __pyx_L1_error)
|
| 3180 |
__Pyx_GOTREF(__pyx_t_2);
|
| 3181 |
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
|
| 3182 |
__pyx_r = __pyx_t_2;
|
|
|
|
| 3197 |
#endif
|
| 3198 |
}
|
| 3199 |
|
| 3200 |
+
/* "triangle_hash.pyx":54
|
| 3201 |
* @cython.wraparound(False) # Deactivate negative indexing.
|
| 3202 |
* cpdef query(self, double[:, :] points):
|
| 3203 |
* assert(points.shape[1] == 2) # <<<<<<<<<<<<<<
|
|
|
|
| 3208 |
if (unlikely(!Py_OptimizeFlag)) {
|
| 3209 |
if (unlikely(!(((__pyx_v_points.shape[1]) == 2) != 0))) {
|
| 3210 |
PyErr_SetNone(PyExc_AssertionError);
|
| 3211 |
+
__PYX_ERR(0, 54, __pyx_L1_error)
|
| 3212 |
}
|
| 3213 |
}
|
| 3214 |
#endif
|
| 3215 |
|
| 3216 |
+
/* "triangle_hash.pyx":55
|
| 3217 |
* cpdef query(self, double[:, :] points):
|
| 3218 |
* assert(points.shape[1] == 2)
|
| 3219 |
* cdef int n_points = points.shape[0] # <<<<<<<<<<<<<<
|
|
|
|
| 3222 |
*/
|
| 3223 |
__pyx_v_n_points = (__pyx_v_points.shape[0]);
|
| 3224 |
|
| 3225 |
+
/* "triangle_hash.pyx":65
|
| 3226 |
* cdef int spatial_idx
|
| 3227 |
*
|
| 3228 |
* for i_point in range(n_points): # <<<<<<<<<<<<<<
|
|
|
|
| 3234 |
for (__pyx_t_8 = 0; __pyx_t_8 < __pyx_t_7; __pyx_t_8+=1) {
|
| 3235 |
__pyx_v_i_point = __pyx_t_8;
|
| 3236 |
|
| 3237 |
+
/* "triangle_hash.pyx":66
|
| 3238 |
*
|
| 3239 |
* for i_point in range(n_points):
|
| 3240 |
* x = int(points[i_point, 0]) # <<<<<<<<<<<<<<
|
|
|
|
| 3245 |
__pyx_t_10 = 0;
|
| 3246 |
__pyx_v_x = ((int)(*((double *) ( /* dim=1 */ (( /* dim=0 */ (__pyx_v_points.data + __pyx_t_9 * __pyx_v_points.strides[0]) ) + __pyx_t_10 * __pyx_v_points.strides[1]) ))));
|
| 3247 |
|
| 3248 |
+
/* "triangle_hash.pyx":67
|
| 3249 |
* for i_point in range(n_points):
|
| 3250 |
* x = int(points[i_point, 0])
|
| 3251 |
* y = int(points[i_point, 1]) # <<<<<<<<<<<<<<
|
|
|
|
| 3256 |
__pyx_t_9 = 1;
|
| 3257 |
__pyx_v_y = ((int)(*((double *) ( /* dim=1 */ (( /* dim=0 */ (__pyx_v_points.data + __pyx_t_10 * __pyx_v_points.strides[0]) ) + __pyx_t_9 * __pyx_v_points.strides[1]) ))));
|
| 3258 |
|
| 3259 |
+
/* "triangle_hash.pyx":68
|
| 3260 |
* x = int(points[i_point, 0])
|
| 3261 |
* y = int(points[i_point, 1])
|
| 3262 |
* if not (0 <= x < self.resolution and 0 <= y < self.resolution): # <<<<<<<<<<<<<<
|
|
|
|
| 3283 |
__pyx_t_12 = ((!__pyx_t_11) != 0);
|
| 3284 |
if (__pyx_t_12) {
|
| 3285 |
|
| 3286 |
+
/* "triangle_hash.pyx":69
|
| 3287 |
* y = int(points[i_point, 1])
|
| 3288 |
* if not (0 <= x < self.resolution and 0 <= y < self.resolution):
|
| 3289 |
* continue # <<<<<<<<<<<<<<
|
|
|
|
| 3292 |
*/
|
| 3293 |
goto __pyx_L3_continue;
|
| 3294 |
|
| 3295 |
+
/* "triangle_hash.pyx":68
|
| 3296 |
* x = int(points[i_point, 0])
|
| 3297 |
* y = int(points[i_point, 1])
|
| 3298 |
* if not (0 <= x < self.resolution and 0 <= y < self.resolution): # <<<<<<<<<<<<<<
|
|
|
|
| 3301 |
*/
|
| 3302 |
}
|
| 3303 |
|
| 3304 |
+
/* "triangle_hash.pyx":71
|
| 3305 |
* continue
|
| 3306 |
*
|
| 3307 |
* spatial_idx = self.resolution * x + y # <<<<<<<<<<<<<<
|
|
|
|
| 3310 |
*/
|
| 3311 |
__pyx_v_spatial_idx = ((__pyx_v_self->resolution * __pyx_v_x) + __pyx_v_y);
|
| 3312 |
|
| 3313 |
+
/* "triangle_hash.pyx":72
|
| 3314 |
*
|
| 3315 |
* spatial_idx = self.resolution * x + y
|
| 3316 |
* for i_tri in self.spatial_hash[spatial_idx]: # <<<<<<<<<<<<<<
|
|
|
|
| 3325 |
++__pyx_t_14;
|
| 3326 |
__pyx_v_i_tri = __pyx_t_16;
|
| 3327 |
|
| 3328 |
+
/* "triangle_hash.pyx":73
|
| 3329 |
* spatial_idx = self.resolution * x + y
|
| 3330 |
* for i_tri in self.spatial_hash[spatial_idx]:
|
| 3331 |
* points_indices.push_back(i_point) # <<<<<<<<<<<<<<
|
|
|
|
| 3336 |
__pyx_v_points_indices.push_back(__pyx_v_i_point);
|
| 3337 |
} catch(...) {
|
| 3338 |
__Pyx_CppExn2PyErr();
|
| 3339 |
+
__PYX_ERR(0, 73, __pyx_L1_error)
|
| 3340 |
}
|
| 3341 |
|
| 3342 |
+
/* "triangle_hash.pyx":74
|
| 3343 |
* for i_tri in self.spatial_hash[spatial_idx]:
|
| 3344 |
* points_indices.push_back(i_point)
|
| 3345 |
* tri_indices.push_back(i_tri) # <<<<<<<<<<<<<<
|
|
|
|
| 3350 |
__pyx_v_tri_indices.push_back(__pyx_v_i_tri);
|
| 3351 |
} catch(...) {
|
| 3352 |
__Pyx_CppExn2PyErr();
|
| 3353 |
+
__PYX_ERR(0, 74, __pyx_L1_error)
|
| 3354 |
}
|
| 3355 |
|
| 3356 |
+
/* "triangle_hash.pyx":72
|
| 3357 |
*
|
| 3358 |
* spatial_idx = self.resolution * x + y
|
| 3359 |
* for i_tri in self.spatial_hash[spatial_idx]: # <<<<<<<<<<<<<<
|
|
|
|
| 3364 |
__pyx_L3_continue:;
|
| 3365 |
}
|
| 3366 |
|
| 3367 |
+
/* "triangle_hash.pyx":76
|
| 3368 |
* tri_indices.push_back(i_tri)
|
| 3369 |
*
|
| 3370 |
* points_indices_np = np.zeros(points_indices.size(), dtype=np.int32) # <<<<<<<<<<<<<<
|
| 3371 |
* tri_indices_np = np.zeros(tri_indices.size(), dtype=np.int32)
|
| 3372 |
*
|
| 3373 |
*/
|
| 3374 |
+
__Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_np); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 76, __pyx_L1_error)
|
| 3375 |
__Pyx_GOTREF(__pyx_t_1);
|
| 3376 |
+
__pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_zeros); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 76, __pyx_L1_error)
|
| 3377 |
__Pyx_GOTREF(__pyx_t_2);
|
| 3378 |
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
|
| 3379 |
+
__pyx_t_1 = __Pyx_PyInt_FromSize_t(__pyx_v_points_indices.size()); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 76, __pyx_L1_error)
|
| 3380 |
__Pyx_GOTREF(__pyx_t_1);
|
| 3381 |
+
__pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 76, __pyx_L1_error)
|
| 3382 |
__Pyx_GOTREF(__pyx_t_4);
|
| 3383 |
__Pyx_GIVEREF(__pyx_t_1);
|
| 3384 |
PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_1);
|
| 3385 |
__pyx_t_1 = 0;
|
| 3386 |
+
__pyx_t_1 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 76, __pyx_L1_error)
|
| 3387 |
__Pyx_GOTREF(__pyx_t_1);
|
| 3388 |
+
__Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_n_s_np); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 76, __pyx_L1_error)
|
| 3389 |
__Pyx_GOTREF(__pyx_t_3);
|
| 3390 |
+
__pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s_int32); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 76, __pyx_L1_error)
|
| 3391 |
__Pyx_GOTREF(__pyx_t_5);
|
| 3392 |
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
|
| 3393 |
+
if (PyDict_SetItem(__pyx_t_1, __pyx_n_s_dtype, __pyx_t_5) < 0) __PYX_ERR(0, 76, __pyx_L1_error)
|
| 3394 |
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
|
| 3395 |
+
__pyx_t_5 = __Pyx_PyObject_Call(__pyx_t_2, __pyx_t_4, __pyx_t_1); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 76, __pyx_L1_error)
|
| 3396 |
__Pyx_GOTREF(__pyx_t_5);
|
| 3397 |
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
|
| 3398 |
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
|
|
|
|
| 3400 |
__pyx_v_points_indices_np = __pyx_t_5;
|
| 3401 |
__pyx_t_5 = 0;
|
| 3402 |
|
| 3403 |
+
/* "triangle_hash.pyx":77
|
| 3404 |
*
|
| 3405 |
* points_indices_np = np.zeros(points_indices.size(), dtype=np.int32)
|
| 3406 |
* tri_indices_np = np.zeros(tri_indices.size(), dtype=np.int32) # <<<<<<<<<<<<<<
|
| 3407 |
*
|
| 3408 |
* cdef int[:] points_indices_view = points_indices_np
|
| 3409 |
*/
|
| 3410 |
+
__Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_n_s_np); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 77, __pyx_L1_error)
|
| 3411 |
__Pyx_GOTREF(__pyx_t_5);
|
| 3412 |
+
__pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_5, __pyx_n_s_zeros); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 77, __pyx_L1_error)
|
| 3413 |
__Pyx_GOTREF(__pyx_t_1);
|
| 3414 |
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
|
| 3415 |
+
__pyx_t_5 = __Pyx_PyInt_FromSize_t(__pyx_v_tri_indices.size()); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 77, __pyx_L1_error)
|
| 3416 |
__Pyx_GOTREF(__pyx_t_5);
|
| 3417 |
+
__pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 77, __pyx_L1_error)
|
| 3418 |
__Pyx_GOTREF(__pyx_t_4);
|
| 3419 |
__Pyx_GIVEREF(__pyx_t_5);
|
| 3420 |
PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_5);
|
| 3421 |
__pyx_t_5 = 0;
|
| 3422 |
+
__pyx_t_5 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 77, __pyx_L1_error)
|
| 3423 |
__Pyx_GOTREF(__pyx_t_5);
|
| 3424 |
+
__Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_n_s_np); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 77, __pyx_L1_error)
|
| 3425 |
__Pyx_GOTREF(__pyx_t_2);
|
| 3426 |
+
__pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_int32); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 77, __pyx_L1_error)
|
| 3427 |
__Pyx_GOTREF(__pyx_t_3);
|
| 3428 |
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
|
| 3429 |
+
if (PyDict_SetItem(__pyx_t_5, __pyx_n_s_dtype, __pyx_t_3) < 0) __PYX_ERR(0, 77, __pyx_L1_error)
|
| 3430 |
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
|
| 3431 |
+
__pyx_t_3 = __Pyx_PyObject_Call(__pyx_t_1, __pyx_t_4, __pyx_t_5); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 77, __pyx_L1_error)
|
| 3432 |
__Pyx_GOTREF(__pyx_t_3);
|
| 3433 |
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
|
| 3434 |
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
|
|
|
|
| 3436 |
__pyx_v_tri_indices_np = __pyx_t_3;
|
| 3437 |
__pyx_t_3 = 0;
|
| 3438 |
|
| 3439 |
+
/* "triangle_hash.pyx":79
|
| 3440 |
* tri_indices_np = np.zeros(tri_indices.size(), dtype=np.int32)
|
| 3441 |
*
|
| 3442 |
* cdef int[:] points_indices_view = points_indices_np # <<<<<<<<<<<<<<
|
| 3443 |
* cdef int[:] tri_indices_view = tri_indices_np
|
| 3444 |
*
|
| 3445 |
*/
|
| 3446 |
+
__pyx_t_17 = __Pyx_PyObject_to_MemoryviewSlice_ds_int(__pyx_v_points_indices_np, PyBUF_WRITABLE); if (unlikely(!__pyx_t_17.memview)) __PYX_ERR(0, 79, __pyx_L1_error)
|
| 3447 |
__pyx_v_points_indices_view = __pyx_t_17;
|
| 3448 |
__pyx_t_17.memview = NULL;
|
| 3449 |
__pyx_t_17.data = NULL;
|
| 3450 |
|
| 3451 |
+
/* "triangle_hash.pyx":80
|
| 3452 |
*
|
| 3453 |
* cdef int[:] points_indices_view = points_indices_np
|
| 3454 |
* cdef int[:] tri_indices_view = tri_indices_np # <<<<<<<<<<<<<<
|
| 3455 |
*
|
| 3456 |
* for k in range(points_indices.size()):
|
| 3457 |
*/
|
| 3458 |
+
__pyx_t_17 = __Pyx_PyObject_to_MemoryviewSlice_ds_int(__pyx_v_tri_indices_np, PyBUF_WRITABLE); if (unlikely(!__pyx_t_17.memview)) __PYX_ERR(0, 80, __pyx_L1_error)
|
| 3459 |
__pyx_v_tri_indices_view = __pyx_t_17;
|
| 3460 |
__pyx_t_17.memview = NULL;
|
| 3461 |
__pyx_t_17.data = NULL;
|
| 3462 |
|
| 3463 |
+
/* "triangle_hash.pyx":82
|
| 3464 |
* cdef int[:] tri_indices_view = tri_indices_np
|
| 3465 |
*
|
| 3466 |
* for k in range(points_indices.size()): # <<<<<<<<<<<<<<
|
|
|
|
| 3472 |
for (__pyx_t_6 = 0; __pyx_t_6 < __pyx_t_19; __pyx_t_6+=1) {
|
| 3473 |
__pyx_v_k = __pyx_t_6;
|
| 3474 |
|
| 3475 |
+
/* "triangle_hash.pyx":83
|
| 3476 |
*
|
| 3477 |
* for k in range(points_indices.size()):
|
| 3478 |
* points_indices_view[k] = points_indices[k] # <<<<<<<<<<<<<<
|
|
|
|
| 3483 |
*((int *) ( /* dim=0 */ (__pyx_v_points_indices_view.data + __pyx_t_9 * __pyx_v_points_indices_view.strides[0]) )) = (__pyx_v_points_indices[__pyx_v_k]);
|
| 3484 |
}
|
| 3485 |
|
| 3486 |
+
/* "triangle_hash.pyx":85
|
| 3487 |
* points_indices_view[k] = points_indices[k]
|
| 3488 |
*
|
| 3489 |
* for k in range(tri_indices.size()): # <<<<<<<<<<<<<<
|
|
|
|
| 3495 |
for (__pyx_t_6 = 0; __pyx_t_6 < __pyx_t_19; __pyx_t_6+=1) {
|
| 3496 |
__pyx_v_k = __pyx_t_6;
|
| 3497 |
|
| 3498 |
+
/* "triangle_hash.pyx":86
|
| 3499 |
*
|
| 3500 |
* for k in range(tri_indices.size()):
|
| 3501 |
* tri_indices_view[k] = tri_indices[k] # <<<<<<<<<<<<<<
|
|
|
|
| 3506 |
*((int *) ( /* dim=0 */ (__pyx_v_tri_indices_view.data + __pyx_t_9 * __pyx_v_tri_indices_view.strides[0]) )) = (__pyx_v_tri_indices[__pyx_v_k]);
|
| 3507 |
}
|
| 3508 |
|
| 3509 |
+
/* "triangle_hash.pyx":88
|
| 3510 |
* tri_indices_view[k] = tri_indices[k]
|
| 3511 |
*
|
| 3512 |
* return points_indices_np, tri_indices_np # <<<<<<<<<<<<<<
|
| 3513 |
*/
|
| 3514 |
__Pyx_XDECREF(__pyx_r);
|
| 3515 |
+
__pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 88, __pyx_L1_error)
|
| 3516 |
__Pyx_GOTREF(__pyx_t_3);
|
| 3517 |
__Pyx_INCREF(__pyx_v_points_indices_np);
|
| 3518 |
__Pyx_GIVEREF(__pyx_v_points_indices_np);
|
|
|
|
| 3524 |
__pyx_t_3 = 0;
|
| 3525 |
goto __pyx_L0;
|
| 3526 |
|
| 3527 |
+
/* "triangle_hash.pyx":53
|
| 3528 |
* @cython.boundscheck(False) # Deactivate bounds checking
|
| 3529 |
* @cython.wraparound(False) # Deactivate negative indexing.
|
| 3530 |
* cpdef query(self, double[:, :] points): # <<<<<<<<<<<<<<
|
|
|
|
| 3563 |
__Pyx_RefNannyDeclarations
|
| 3564 |
__Pyx_RefNannySetupContext("query (wrapper)", 0);
|
| 3565 |
assert(__pyx_arg_points); {
|
| 3566 |
+
__pyx_v_points = __Pyx_PyObject_to_MemoryviewSlice_dsds_double(__pyx_arg_points, PyBUF_WRITABLE); if (unlikely(!__pyx_v_points.memview)) __PYX_ERR(0, 53, __pyx_L3_error)
|
| 3567 |
}
|
| 3568 |
goto __pyx_L4_argument_unpacking_done;
|
| 3569 |
__pyx_L3_error:;
|
|
|
|
| 3587 |
int __pyx_clineno = 0;
|
| 3588 |
__Pyx_RefNannySetupContext("query", 0);
|
| 3589 |
__Pyx_XDECREF(__pyx_r);
|
| 3590 |
+
if (unlikely(!__pyx_v_points.memview)) { __Pyx_RaiseUnboundLocalError("points"); __PYX_ERR(0, 53, __pyx_L1_error) }
|
| 3591 |
+
__pyx_t_1 = __pyx_f_13triangle_hash_12TriangleHash_query(__pyx_v_self, __pyx_v_points, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 53, __pyx_L1_error)
|
| 3592 |
__Pyx_GOTREF(__pyx_t_1);
|
| 3593 |
__pyx_r = __pyx_t_1;
|
| 3594 |
__pyx_t_1 = 0;
|
|
|
|
| 18738 |
{0, 0, 0, 0, 0, 0, 0}
|
| 18739 |
};
|
| 18740 |
static CYTHON_SMALL_CODE int __Pyx_InitCachedBuiltins(void) {
|
| 18741 |
+
__pyx_builtin_range = __Pyx_GetBuiltinName(__pyx_n_s_range); if (!__pyx_builtin_range) __PYX_ERR(0, 33, __pyx_L1_error)
|
| 18742 |
__pyx_builtin_TypeError = __Pyx_GetBuiltinName(__pyx_n_s_TypeError); if (!__pyx_builtin_TypeError) __PYX_ERR(1, 2, __pyx_L1_error)
|
| 18743 |
__pyx_builtin_ImportError = __Pyx_GetBuiltinName(__pyx_n_s_ImportError); if (!__pyx_builtin_ImportError) __PYX_ERR(2, 945, __pyx_L1_error)
|
| 18744 |
__pyx_builtin_ValueError = __Pyx_GetBuiltinName(__pyx_n_s_ValueError); if (!__pyx_builtin_ValueError) __PYX_ERR(1, 133, __pyx_L1_error)
|
|
|
|
| 19118 |
__pyx_vtabptr_13triangle_hash_TriangleHash = &__pyx_vtable_13triangle_hash_TriangleHash;
|
| 19119 |
__pyx_vtable_13triangle_hash_TriangleHash._build_hash = (int (*)(struct __pyx_obj_13triangle_hash_TriangleHash *, __Pyx_memviewslice))__pyx_f_13triangle_hash_12TriangleHash__build_hash;
|
| 19120 |
__pyx_vtable_13triangle_hash_TriangleHash.query = (PyObject *(*)(struct __pyx_obj_13triangle_hash_TriangleHash *, __Pyx_memviewslice, int __pyx_skip_dispatch))__pyx_f_13triangle_hash_12TriangleHash_query;
|
| 19121 |
+
if (PyType_Ready(&__pyx_type_13triangle_hash_TriangleHash) < 0) __PYX_ERR(0, 11, __pyx_L1_error)
|
| 19122 |
#if PY_VERSION_HEX < 0x030800B1
|
| 19123 |
__pyx_type_13triangle_hash_TriangleHash.tp_print = 0;
|
| 19124 |
#endif
|
| 19125 |
if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_type_13triangle_hash_TriangleHash.tp_dictoffset && __pyx_type_13triangle_hash_TriangleHash.tp_getattro == PyObject_GenericGetAttr)) {
|
| 19126 |
__pyx_type_13triangle_hash_TriangleHash.tp_getattro = __Pyx_PyObject_GenericGetAttr;
|
| 19127 |
}
|
| 19128 |
+
if (__Pyx_SetVtable(__pyx_type_13triangle_hash_TriangleHash.tp_dict, __pyx_vtabptr_13triangle_hash_TriangleHash) < 0) __PYX_ERR(0, 11, __pyx_L1_error)
|
| 19129 |
+
if (PyObject_SetAttr(__pyx_m, __pyx_n_s_TriangleHash, (PyObject *)&__pyx_type_13triangle_hash_TriangleHash) < 0) __PYX_ERR(0, 11, __pyx_L1_error)
|
| 19130 |
+
if (__Pyx_setup_reduce((PyObject*)&__pyx_type_13triangle_hash_TriangleHash) < 0) __PYX_ERR(0, 11, __pyx_L1_error)
|
| 19131 |
__pyx_ptype_13triangle_hash_TriangleHash = &__pyx_type_13triangle_hash_TriangleHash;
|
| 19132 |
__pyx_vtabptr_array = &__pyx_vtable_array;
|
| 19133 |
__pyx_vtable_array.get_memview = (PyObject *(*)(struct __pyx_array_obj *))__pyx_array_get_memview;
|
|
|
|
| 19468 |
*
|
| 19469 |
* # distutils: language=c++
|
| 19470 |
* import numpy as np # <<<<<<<<<<<<<<
|
| 19471 |
+
*
|
| 19472 |
* cimport cython
|
| 19473 |
*/
|
| 19474 |
__pyx_t_1 = __Pyx_Import(__pyx_n_s_numpy, 0, -1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3, __pyx_L1_error)
|
|
|
|
| 19480 |
*
|
| 19481 |
* # distutils: language=c++ # <<<<<<<<<<<<<<
|
| 19482 |
* import numpy as np
|
| 19483 |
+
*
|
| 19484 |
*/
|
| 19485 |
__pyx_t_1 = __Pyx_PyDict_NewPresized(0); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2, __pyx_L1_error)
|
| 19486 |
__Pyx_GOTREF(__pyx_t_1);
|
lib/common/libmesh/triangle_hash.pyx
CHANGED
|
@@ -1,10 +1,12 @@
|
|
| 1 |
|
| 2 |
# distutils: language=c++
|
| 3 |
import numpy as np
|
| 4 |
-
|
| 5 |
cimport cython
|
|
|
|
|
|
|
| 6 |
from libcpp.vector cimport vector
|
| 7 |
-
|
| 8 |
|
| 9 |
cdef class TriangleHash:
|
| 10 |
cdef vector[vector[int]] spatial_hash
|
|
|
|
| 1 |
|
| 2 |
# distutils: language=c++
|
| 3 |
import numpy as np
|
| 4 |
+
|
| 5 |
cimport cython
|
| 6 |
+
cimport numpy as np
|
| 7 |
+
from libc.math cimport ceil, floor
|
| 8 |
from libcpp.vector cimport vector
|
| 9 |
+
|
| 10 |
|
| 11 |
cdef class TriangleHash:
|
| 12 |
cdef vector[vector[int]] spatial_hash
|
lib/common/libvoxelize/voxelize.c
CHANGED
|
@@ -2115,7 +2115,7 @@ static PyObject *__pyx_tuple__24;
|
|
| 2115 |
static PyObject *__pyx_codeobj__25;
|
| 2116 |
/* Late includes */
|
| 2117 |
|
| 2118 |
-
/* "voxelize.pyx":
|
| 2119 |
* @cython.boundscheck(False) # Deactivate bounds checking
|
| 2120 |
* @cython.wraparound(False) # Deactivate negative indexing.
|
| 2121 |
* cpdef int voxelize_mesh_(bint[:, :, :] occ, float[:, :, ::1] faces): # <<<<<<<<<<<<<<
|
|
@@ -2138,7 +2138,7 @@ static int __pyx_f_8voxelize_voxelize_mesh_(__Pyx_memviewslice __pyx_v_occ, __Py
|
|
| 2138 |
int __pyx_clineno = 0;
|
| 2139 |
__Pyx_RefNannySetupContext("voxelize_mesh_", 0);
|
| 2140 |
|
| 2141 |
-
/* "voxelize.pyx":
|
| 2142 |
* @cython.wraparound(False) # Deactivate negative indexing.
|
| 2143 |
* cpdef int voxelize_mesh_(bint[:, :, :] occ, float[:, :, ::1] faces):
|
| 2144 |
* assert(faces.shape[1] == 3) # <<<<<<<<<<<<<<
|
|
@@ -2149,12 +2149,12 @@ static int __pyx_f_8voxelize_voxelize_mesh_(__Pyx_memviewslice __pyx_v_occ, __Py
|
|
| 2149 |
if (unlikely(!Py_OptimizeFlag)) {
|
| 2150 |
if (unlikely(!(((__pyx_v_faces.shape[1]) == 3) != 0))) {
|
| 2151 |
PyErr_SetNone(PyExc_AssertionError);
|
| 2152 |
-
__PYX_ERR(0,
|
| 2153 |
}
|
| 2154 |
}
|
| 2155 |
#endif
|
| 2156 |
|
| 2157 |
-
/* "voxelize.pyx":
|
| 2158 |
* cpdef int voxelize_mesh_(bint[:, :, :] occ, float[:, :, ::1] faces):
|
| 2159 |
* assert(faces.shape[1] == 3)
|
| 2160 |
* assert(faces.shape[2] == 3) # <<<<<<<<<<<<<<
|
|
@@ -2165,12 +2165,12 @@ static int __pyx_f_8voxelize_voxelize_mesh_(__Pyx_memviewslice __pyx_v_occ, __Py
|
|
| 2165 |
if (unlikely(!Py_OptimizeFlag)) {
|
| 2166 |
if (unlikely(!(((__pyx_v_faces.shape[2]) == 3) != 0))) {
|
| 2167 |
PyErr_SetNone(PyExc_AssertionError);
|
| 2168 |
-
__PYX_ERR(0,
|
| 2169 |
}
|
| 2170 |
}
|
| 2171 |
#endif
|
| 2172 |
|
| 2173 |
-
/* "voxelize.pyx":
|
| 2174 |
* assert(faces.shape[2] == 3)
|
| 2175 |
*
|
| 2176 |
* n_faces = faces.shape[0] # <<<<<<<<<<<<<<
|
|
@@ -2179,7 +2179,7 @@ static int __pyx_f_8voxelize_voxelize_mesh_(__Pyx_memviewslice __pyx_v_occ, __Py
|
|
| 2179 |
*/
|
| 2180 |
__pyx_v_n_faces = (__pyx_v_faces.shape[0]);
|
| 2181 |
|
| 2182 |
-
/* "voxelize.pyx":
|
| 2183 |
* n_faces = faces.shape[0]
|
| 2184 |
* cdef int i
|
| 2185 |
* for i in range(n_faces): # <<<<<<<<<<<<<<
|
|
@@ -2191,7 +2191,7 @@ static int __pyx_f_8voxelize_voxelize_mesh_(__Pyx_memviewslice __pyx_v_occ, __Py
|
|
| 2191 |
for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_2; __pyx_t_3+=1) {
|
| 2192 |
__pyx_v_i = __pyx_t_3;
|
| 2193 |
|
| 2194 |
-
/* "voxelize.pyx":
|
| 2195 |
* cdef int i
|
| 2196 |
* for i in range(n_faces):
|
| 2197 |
* voxelize_triangle_(occ, faces[i]) # <<<<<<<<<<<<<<
|
|
@@ -2221,7 +2221,7 @@ __pyx_t_4.strides[1] = __pyx_v_faces.strides[2];
|
|
| 2221 |
__pyx_t_4.data = NULL;
|
| 2222 |
}
|
| 2223 |
|
| 2224 |
-
/* "voxelize.pyx":
|
| 2225 |
* @cython.boundscheck(False) # Deactivate bounds checking
|
| 2226 |
* @cython.wraparound(False) # Deactivate negative indexing.
|
| 2227 |
* cpdef int voxelize_mesh_(bint[:, :, :] occ, float[:, :, ::1] faces): # <<<<<<<<<<<<<<
|
|
@@ -2275,11 +2275,11 @@ static PyObject *__pyx_pw_8voxelize_1voxelize_mesh_(PyObject *__pyx_self, PyObje
|
|
| 2275 |
case 1:
|
| 2276 |
if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_faces)) != 0)) kw_args--;
|
| 2277 |
else {
|
| 2278 |
-
__Pyx_RaiseArgtupleInvalid("voxelize_mesh_", 1, 2, 2, 1); __PYX_ERR(0,
|
| 2279 |
}
|
| 2280 |
}
|
| 2281 |
if (unlikely(kw_args > 0)) {
|
| 2282 |
-
if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "voxelize_mesh_") < 0)) __PYX_ERR(0,
|
| 2283 |
}
|
| 2284 |
} else if (PyTuple_GET_SIZE(__pyx_args) != 2) {
|
| 2285 |
goto __pyx_L5_argtuple_error;
|
|
@@ -2287,12 +2287,12 @@ static PyObject *__pyx_pw_8voxelize_1voxelize_mesh_(PyObject *__pyx_self, PyObje
|
|
| 2287 |
values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
|
| 2288 |
values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
|
| 2289 |
}
|
| 2290 |
-
__pyx_v_occ = __Pyx_PyObject_to_MemoryviewSlice_dsdsds_int(values[0], PyBUF_WRITABLE); if (unlikely(!__pyx_v_occ.memview)) __PYX_ERR(0,
|
| 2291 |
-
__pyx_v_faces = __Pyx_PyObject_to_MemoryviewSlice_d_d_dc_float(values[1], PyBUF_WRITABLE); if (unlikely(!__pyx_v_faces.memview)) __PYX_ERR(0,
|
| 2292 |
}
|
| 2293 |
goto __pyx_L4_argument_unpacking_done;
|
| 2294 |
__pyx_L5_argtuple_error:;
|
| 2295 |
-
__Pyx_RaiseArgtupleInvalid("voxelize_mesh_", 1, 2, 2, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0,
|
| 2296 |
__pyx_L3_error:;
|
| 2297 |
__Pyx_AddTraceback("voxelize.voxelize_mesh_", __pyx_clineno, __pyx_lineno, __pyx_filename);
|
| 2298 |
__Pyx_RefNannyFinishContext();
|
|
@@ -2314,9 +2314,9 @@ static PyObject *__pyx_pf_8voxelize_voxelize_mesh_(CYTHON_UNUSED PyObject *__pyx
|
|
| 2314 |
int __pyx_clineno = 0;
|
| 2315 |
__Pyx_RefNannySetupContext("voxelize_mesh_", 0);
|
| 2316 |
__Pyx_XDECREF(__pyx_r);
|
| 2317 |
-
if (unlikely(!__pyx_v_occ.memview)) { __Pyx_RaiseUnboundLocalError("occ"); __PYX_ERR(0,
|
| 2318 |
-
if (unlikely(!__pyx_v_faces.memview)) { __Pyx_RaiseUnboundLocalError("faces"); __PYX_ERR(0,
|
| 2319 |
-
__pyx_t_1 = __Pyx_PyInt_From_int(__pyx_f_8voxelize_voxelize_mesh_(__pyx_v_occ, __pyx_v_faces, 0)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0,
|
| 2320 |
__Pyx_GOTREF(__pyx_t_1);
|
| 2321 |
__pyx_r = __pyx_t_1;
|
| 2322 |
__pyx_t_1 = 0;
|
|
@@ -2335,7 +2335,7 @@ static PyObject *__pyx_pf_8voxelize_voxelize_mesh_(CYTHON_UNUSED PyObject *__pyx
|
|
| 2335 |
return __pyx_r;
|
| 2336 |
}
|
| 2337 |
|
| 2338 |
-
/* "voxelize.pyx":
|
| 2339 |
* @cython.boundscheck(False) # Deactivate bounds checking
|
| 2340 |
* @cython.wraparound(False) # Deactivate negative indexing.
|
| 2341 |
* cpdef int voxelize_triangle_(bint[:, :, :] occupancies, float[:, ::1] triverts): # <<<<<<<<<<<<<<
|
|
@@ -2382,7 +2382,7 @@ static int __pyx_f_8voxelize_voxelize_triangle_(__Pyx_memviewslice __pyx_v_occup
|
|
| 2382 |
Py_ssize_t __pyx_t_25;
|
| 2383 |
__Pyx_RefNannySetupContext("voxelize_triangle_", 0);
|
| 2384 |
|
| 2385 |
-
/* "voxelize.pyx":
|
| 2386 |
* cdef bint intersection
|
| 2387 |
*
|
| 2388 |
* boxhalfsize[:] = (0.5, 0.5, 0.5) # <<<<<<<<<<<<<<
|
|
@@ -2397,7 +2397,7 @@ static int __pyx_f_8voxelize_voxelize_triangle_(__Pyx_memviewslice __pyx_v_occup
|
|
| 2397 |
(__pyx_t_1[1]) = __pyx_t_3;
|
| 2398 |
(__pyx_t_1[2]) = __pyx_t_4;
|
| 2399 |
|
| 2400 |
-
/* "voxelize.pyx":
|
| 2401 |
* boxhalfsize[:] = (0.5, 0.5, 0.5)
|
| 2402 |
*
|
| 2403 |
* for i in range(3): # <<<<<<<<<<<<<<
|
|
@@ -2407,7 +2407,7 @@ static int __pyx_f_8voxelize_voxelize_triangle_(__Pyx_memviewslice __pyx_v_occup
|
|
| 2407 |
for (__pyx_t_5 = 0; __pyx_t_5 < 3; __pyx_t_5+=1) {
|
| 2408 |
__pyx_v_i = __pyx_t_5;
|
| 2409 |
|
| 2410 |
-
/* "voxelize.pyx":
|
| 2411 |
* for i in range(3):
|
| 2412 |
* bbox_min[i] = <int> (
|
| 2413 |
* min(triverts[0, i], triverts[1, i], triverts[2, i]) # <<<<<<<<<<<<<<
|
|
@@ -2435,7 +2435,7 @@ static int __pyx_f_8voxelize_voxelize_triangle_(__Pyx_memviewslice __pyx_v_occup
|
|
| 2435 |
__pyx_t_11 = __pyx_t_10;
|
| 2436 |
}
|
| 2437 |
|
| 2438 |
-
/* "voxelize.pyx":
|
| 2439 |
*
|
| 2440 |
* for i in range(3):
|
| 2441 |
* bbox_min[i] = <int> ( # <<<<<<<<<<<<<<
|
|
@@ -2444,7 +2444,7 @@ static int __pyx_f_8voxelize_voxelize_triangle_(__Pyx_memviewslice __pyx_v_occup
|
|
| 2444 |
*/
|
| 2445 |
(__pyx_v_bbox_min[__pyx_v_i]) = ((int)__pyx_t_11);
|
| 2446 |
|
| 2447 |
-
/* "voxelize.pyx":
|
| 2448 |
* min(triverts[0, i], triverts[1, i], triverts[2, i])
|
| 2449 |
* )
|
| 2450 |
* bbox_min[i] = min(max(bbox_min[i], 0), occupancies.shape[i] - 1) # <<<<<<<<<<<<<<
|
|
@@ -2468,7 +2468,7 @@ static int __pyx_f_8voxelize_voxelize_triangle_(__Pyx_memviewslice __pyx_v_occup
|
|
| 2468 |
(__pyx_v_bbox_min[__pyx_v_i]) = __pyx_t_16;
|
| 2469 |
}
|
| 2470 |
|
| 2471 |
-
/* "voxelize.pyx":
|
| 2472 |
* bbox_min[i] = min(max(bbox_min[i], 0), occupancies.shape[i] - 1)
|
| 2473 |
*
|
| 2474 |
* for i in range(3): # <<<<<<<<<<<<<<
|
|
@@ -2478,7 +2478,7 @@ static int __pyx_f_8voxelize_voxelize_triangle_(__Pyx_memviewslice __pyx_v_occup
|
|
| 2478 |
for (__pyx_t_5 = 0; __pyx_t_5 < 3; __pyx_t_5+=1) {
|
| 2479 |
__pyx_v_i = __pyx_t_5;
|
| 2480 |
|
| 2481 |
-
/* "voxelize.pyx":
|
| 2482 |
* for i in range(3):
|
| 2483 |
* bbox_max[i] = <int> (
|
| 2484 |
* max(triverts[0, i], triverts[1, i], triverts[2, i]) # <<<<<<<<<<<<<<
|
|
@@ -2506,7 +2506,7 @@ static int __pyx_f_8voxelize_voxelize_triangle_(__Pyx_memviewslice __pyx_v_occup
|
|
| 2506 |
__pyx_t_10 = __pyx_t_9;
|
| 2507 |
}
|
| 2508 |
|
| 2509 |
-
/* "voxelize.pyx":
|
| 2510 |
*
|
| 2511 |
* for i in range(3):
|
| 2512 |
* bbox_max[i] = <int> ( # <<<<<<<<<<<<<<
|
|
@@ -2515,7 +2515,7 @@ static int __pyx_f_8voxelize_voxelize_triangle_(__Pyx_memviewslice __pyx_v_occup
|
|
| 2515 |
*/
|
| 2516 |
(__pyx_v_bbox_max[__pyx_v_i]) = ((int)__pyx_t_10);
|
| 2517 |
|
| 2518 |
-
/* "voxelize.pyx":
|
| 2519 |
* max(triverts[0, i], triverts[1, i], triverts[2, i])
|
| 2520 |
* )
|
| 2521 |
* bbox_max[i] = min(max(bbox_max[i], 0), occupancies.shape[i] - 1) # <<<<<<<<<<<<<<
|
|
@@ -2539,7 +2539,7 @@ static int __pyx_f_8voxelize_voxelize_triangle_(__Pyx_memviewslice __pyx_v_occup
|
|
| 2539 |
(__pyx_v_bbox_max[__pyx_v_i]) = __pyx_t_12;
|
| 2540 |
}
|
| 2541 |
|
| 2542 |
-
/* "voxelize.pyx":
|
| 2543 |
* bbox_max[i] = min(max(bbox_max[i], 0), occupancies.shape[i] - 1)
|
| 2544 |
*
|
| 2545 |
* for i in range(bbox_min[0], bbox_max[0] + 1): # <<<<<<<<<<<<<<
|
|
@@ -2551,7 +2551,7 @@ static int __pyx_f_8voxelize_voxelize_triangle_(__Pyx_memviewslice __pyx_v_occup
|
|
| 2551 |
for (__pyx_t_5 = (__pyx_v_bbox_min[0]); __pyx_t_5 < __pyx_t_15; __pyx_t_5+=1) {
|
| 2552 |
__pyx_v_i = __pyx_t_5;
|
| 2553 |
|
| 2554 |
-
/* "voxelize.pyx":
|
| 2555 |
*
|
| 2556 |
* for i in range(bbox_min[0], bbox_max[0] + 1):
|
| 2557 |
* for j in range(bbox_min[1], bbox_max[1] + 1): # <<<<<<<<<<<<<<
|
|
@@ -2563,7 +2563,7 @@ static int __pyx_f_8voxelize_voxelize_triangle_(__Pyx_memviewslice __pyx_v_occup
|
|
| 2563 |
for (__pyx_t_14 = (__pyx_v_bbox_min[1]); __pyx_t_14 < __pyx_t_18; __pyx_t_14+=1) {
|
| 2564 |
__pyx_v_j = __pyx_t_14;
|
| 2565 |
|
| 2566 |
-
/* "voxelize.pyx":
|
| 2567 |
* for i in range(bbox_min[0], bbox_max[0] + 1):
|
| 2568 |
* for j in range(bbox_min[1], bbox_max[1] + 1):
|
| 2569 |
* for k in range(bbox_min[2], bbox_max[2] + 1): # <<<<<<<<<<<<<<
|
|
@@ -2575,7 +2575,7 @@ static int __pyx_f_8voxelize_voxelize_triangle_(__Pyx_memviewslice __pyx_v_occup
|
|
| 2575 |
for (__pyx_t_21 = (__pyx_v_bbox_min[2]); __pyx_t_21 < __pyx_t_20; __pyx_t_21+=1) {
|
| 2576 |
__pyx_v_k = __pyx_t_21;
|
| 2577 |
|
| 2578 |
-
/* "voxelize.pyx":
|
| 2579 |
* for j in range(bbox_min[1], bbox_max[1] + 1):
|
| 2580 |
* for k in range(bbox_min[2], bbox_max[2] + 1):
|
| 2581 |
* boxcenter[:] = (i + 0.5, j + 0.5, k + 0.5) # <<<<<<<<<<<<<<
|
|
@@ -2590,7 +2590,7 @@ static int __pyx_f_8voxelize_voxelize_triangle_(__Pyx_memviewslice __pyx_v_occup
|
|
| 2590 |
(__pyx_t_1[1]) = __pyx_t_3;
|
| 2591 |
(__pyx_t_1[2]) = __pyx_t_2;
|
| 2592 |
|
| 2593 |
-
/* "voxelize.pyx":
|
| 2594 |
* boxcenter[:] = (i + 0.5, j + 0.5, k + 0.5)
|
| 2595 |
* intersection = triBoxOverlap(&boxcenter[0], &boxhalfsize[0],
|
| 2596 |
* &triverts[0, 0], &triverts[1, 0], &triverts[2, 0]) # <<<<<<<<<<<<<<
|
|
@@ -2604,7 +2604,7 @@ static int __pyx_f_8voxelize_voxelize_triangle_(__Pyx_memviewslice __pyx_v_occup
|
|
| 2604 |
__pyx_t_24 = 2;
|
| 2605 |
__pyx_t_25 = 0;
|
| 2606 |
|
| 2607 |
-
/* "voxelize.pyx":
|
| 2608 |
* for k in range(bbox_min[2], bbox_max[2] + 1):
|
| 2609 |
* boxcenter[:] = (i + 0.5, j + 0.5, k + 0.5)
|
| 2610 |
* intersection = triBoxOverlap(&boxcenter[0], &boxhalfsize[0], # <<<<<<<<<<<<<<
|
|
@@ -2613,7 +2613,7 @@ static int __pyx_f_8voxelize_voxelize_triangle_(__Pyx_memviewslice __pyx_v_occup
|
|
| 2613 |
*/
|
| 2614 |
__pyx_v_intersection = triBoxOverlap((&(__pyx_v_boxcenter[0])), (&(__pyx_v_boxhalfsize[0])), (&(*((float *) ( /* dim=1 */ ((char *) (((float *) ( /* dim=0 */ (__pyx_v_triverts.data + __pyx_t_6 * __pyx_v_triverts.strides[0]) )) + __pyx_t_7)) )))), (&(*((float *) ( /* dim=1 */ ((char *) (((float *) ( /* dim=0 */ (__pyx_v_triverts.data + __pyx_t_22 * __pyx_v_triverts.strides[0]) )) + __pyx_t_23)) )))), (&(*((float *) ( /* dim=1 */ ((char *) (((float *) ( /* dim=0 */ (__pyx_v_triverts.data + __pyx_t_24 * __pyx_v_triverts.strides[0]) )) + __pyx_t_25)) )))));
|
| 2615 |
|
| 2616 |
-
/* "voxelize.pyx":
|
| 2617 |
* intersection = triBoxOverlap(&boxcenter[0], &boxhalfsize[0],
|
| 2618 |
* &triverts[0, 0], &triverts[1, 0], &triverts[2, 0])
|
| 2619 |
* occupancies[i, j, k] |= intersection # <<<<<<<<<<<<<<
|
|
@@ -2628,7 +2628,7 @@ static int __pyx_f_8voxelize_voxelize_triangle_(__Pyx_memviewslice __pyx_v_occup
|
|
| 2628 |
}
|
| 2629 |
}
|
| 2630 |
|
| 2631 |
-
/* "voxelize.pyx":
|
| 2632 |
* @cython.boundscheck(False) # Deactivate bounds checking
|
| 2633 |
* @cython.wraparound(False) # Deactivate negative indexing.
|
| 2634 |
* cpdef int voxelize_triangle_(bint[:, :, :] occupancies, float[:, ::1] triverts): # <<<<<<<<<<<<<<
|
|
@@ -2676,11 +2676,11 @@ static PyObject *__pyx_pw_8voxelize_3voxelize_triangle_(PyObject *__pyx_self, Py
|
|
| 2676 |
case 1:
|
| 2677 |
if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_triverts)) != 0)) kw_args--;
|
| 2678 |
else {
|
| 2679 |
-
__Pyx_RaiseArgtupleInvalid("voxelize_triangle_", 1, 2, 2, 1); __PYX_ERR(0,
|
| 2680 |
}
|
| 2681 |
}
|
| 2682 |
if (unlikely(kw_args > 0)) {
|
| 2683 |
-
if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "voxelize_triangle_") < 0)) __PYX_ERR(0,
|
| 2684 |
}
|
| 2685 |
} else if (PyTuple_GET_SIZE(__pyx_args) != 2) {
|
| 2686 |
goto __pyx_L5_argtuple_error;
|
|
@@ -2688,12 +2688,12 @@ static PyObject *__pyx_pw_8voxelize_3voxelize_triangle_(PyObject *__pyx_self, Py
|
|
| 2688 |
values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
|
| 2689 |
values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
|
| 2690 |
}
|
| 2691 |
-
__pyx_v_occupancies = __Pyx_PyObject_to_MemoryviewSlice_dsdsds_int(values[0], PyBUF_WRITABLE); if (unlikely(!__pyx_v_occupancies.memview)) __PYX_ERR(0,
|
| 2692 |
-
__pyx_v_triverts = __Pyx_PyObject_to_MemoryviewSlice_d_dc_float(values[1], PyBUF_WRITABLE); if (unlikely(!__pyx_v_triverts.memview)) __PYX_ERR(0,
|
| 2693 |
}
|
| 2694 |
goto __pyx_L4_argument_unpacking_done;
|
| 2695 |
__pyx_L5_argtuple_error:;
|
| 2696 |
-
__Pyx_RaiseArgtupleInvalid("voxelize_triangle_", 1, 2, 2, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0,
|
| 2697 |
__pyx_L3_error:;
|
| 2698 |
__Pyx_AddTraceback("voxelize.voxelize_triangle_", __pyx_clineno, __pyx_lineno, __pyx_filename);
|
| 2699 |
__Pyx_RefNannyFinishContext();
|
|
@@ -2715,9 +2715,9 @@ static PyObject *__pyx_pf_8voxelize_2voxelize_triangle_(CYTHON_UNUSED PyObject *
|
|
| 2715 |
int __pyx_clineno = 0;
|
| 2716 |
__Pyx_RefNannySetupContext("voxelize_triangle_", 0);
|
| 2717 |
__Pyx_XDECREF(__pyx_r);
|
| 2718 |
-
if (unlikely(!__pyx_v_occupancies.memview)) { __Pyx_RaiseUnboundLocalError("occupancies"); __PYX_ERR(0,
|
| 2719 |
-
if (unlikely(!__pyx_v_triverts.memview)) { __Pyx_RaiseUnboundLocalError("triverts"); __PYX_ERR(0,
|
| 2720 |
-
__pyx_t_1 = __Pyx_PyInt_From_int(__pyx_f_8voxelize_voxelize_triangle_(__pyx_v_occupancies, __pyx_v_triverts, 0)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0,
|
| 2721 |
__Pyx_GOTREF(__pyx_t_1);
|
| 2722 |
__pyx_r = __pyx_t_1;
|
| 2723 |
__pyx_t_1 = 0;
|
|
@@ -2736,7 +2736,7 @@ static PyObject *__pyx_pf_8voxelize_2voxelize_triangle_(CYTHON_UNUSED PyObject *
|
|
| 2736 |
return __pyx_r;
|
| 2737 |
}
|
| 2738 |
|
| 2739 |
-
/* "voxelize.pyx":
|
| 2740 |
* @cython.boundscheck(False) # Deactivate bounds checking
|
| 2741 |
* @cython.wraparound(False) # Deactivate negative indexing.
|
| 2742 |
* cdef int test_triangle_aabb(float[::1] boxcenter, float[::1] boxhalfsize, float[:, ::1] triverts): # <<<<<<<<<<<<<<
|
|
@@ -2762,7 +2762,7 @@ static int __pyx_f_8voxelize_test_triangle_aabb(__Pyx_memviewslice __pyx_v_boxce
|
|
| 2762 |
int __pyx_clineno = 0;
|
| 2763 |
__Pyx_RefNannySetupContext("test_triangle_aabb", 0);
|
| 2764 |
|
| 2765 |
-
/* "voxelize.pyx":
|
| 2766 |
* @cython.wraparound(False) # Deactivate negative indexing.
|
| 2767 |
* cdef int test_triangle_aabb(float[::1] boxcenter, float[::1] boxhalfsize, float[:, ::1] triverts):
|
| 2768 |
* assert(boxcenter.shape[0] == 3) # <<<<<<<<<<<<<<
|
|
@@ -2773,12 +2773,12 @@ static int __pyx_f_8voxelize_test_triangle_aabb(__Pyx_memviewslice __pyx_v_boxce
|
|
| 2773 |
if (unlikely(!Py_OptimizeFlag)) {
|
| 2774 |
if (unlikely(!(((__pyx_v_boxcenter.shape[0]) == 3) != 0))) {
|
| 2775 |
PyErr_SetNone(PyExc_AssertionError);
|
| 2776 |
-
__PYX_ERR(0,
|
| 2777 |
}
|
| 2778 |
}
|
| 2779 |
#endif
|
| 2780 |
|
| 2781 |
-
/* "voxelize.pyx":
|
| 2782 |
* cdef int test_triangle_aabb(float[::1] boxcenter, float[::1] boxhalfsize, float[:, ::1] triverts):
|
| 2783 |
* assert(boxcenter.shape[0] == 3)
|
| 2784 |
* assert(boxhalfsize.shape[0] == 3) # <<<<<<<<<<<<<<
|
|
@@ -2789,12 +2789,12 @@ static int __pyx_f_8voxelize_test_triangle_aabb(__Pyx_memviewslice __pyx_v_boxce
|
|
| 2789 |
if (unlikely(!Py_OptimizeFlag)) {
|
| 2790 |
if (unlikely(!(((__pyx_v_boxhalfsize.shape[0]) == 3) != 0))) {
|
| 2791 |
PyErr_SetNone(PyExc_AssertionError);
|
| 2792 |
-
__PYX_ERR(0,
|
| 2793 |
}
|
| 2794 |
}
|
| 2795 |
#endif
|
| 2796 |
|
| 2797 |
-
/* "voxelize.pyx":
|
| 2798 |
* assert(boxcenter.shape[0] == 3)
|
| 2799 |
* assert(boxhalfsize.shape[0] == 3)
|
| 2800 |
* assert(triverts.shape[0] == triverts.shape[1] == 3) # <<<<<<<<<<<<<<
|
|
@@ -2809,12 +2809,12 @@ static int __pyx_f_8voxelize_test_triangle_aabb(__Pyx_memviewslice __pyx_v_boxce
|
|
| 2809 |
}
|
| 2810 |
if (unlikely(!(__pyx_t_1 != 0))) {
|
| 2811 |
PyErr_SetNone(PyExc_AssertionError);
|
| 2812 |
-
__PYX_ERR(0,
|
| 2813 |
}
|
| 2814 |
}
|
| 2815 |
#endif
|
| 2816 |
|
| 2817 |
-
/* "voxelize.pyx":
|
| 2818 |
* # print(triverts)
|
| 2819 |
* # Call functions
|
| 2820 |
* cdef int result = triBoxOverlap(&boxcenter[0], &boxhalfsize[0], # <<<<<<<<<<<<<<
|
|
@@ -2824,7 +2824,7 @@ static int __pyx_f_8voxelize_test_triangle_aabb(__Pyx_memviewslice __pyx_v_boxce
|
|
| 2824 |
__pyx_t_2 = 0;
|
| 2825 |
__pyx_t_3 = 0;
|
| 2826 |
|
| 2827 |
-
/* "voxelize.pyx":
|
| 2828 |
* # Call functions
|
| 2829 |
* cdef int result = triBoxOverlap(&boxcenter[0], &boxhalfsize[0],
|
| 2830 |
* &triverts[0, 0], &triverts[1, 0], &triverts[2, 0]) # <<<<<<<<<<<<<<
|
|
@@ -2837,7 +2837,7 @@ static int __pyx_f_8voxelize_test_triangle_aabb(__Pyx_memviewslice __pyx_v_boxce
|
|
| 2837 |
__pyx_t_8 = 2;
|
| 2838 |
__pyx_t_9 = 0;
|
| 2839 |
|
| 2840 |
-
/* "voxelize.pyx":
|
| 2841 |
* # print(triverts)
|
| 2842 |
* # Call functions
|
| 2843 |
* cdef int result = triBoxOverlap(&boxcenter[0], &boxhalfsize[0], # <<<<<<<<<<<<<<
|
|
@@ -2846,7 +2846,7 @@ static int __pyx_f_8voxelize_test_triangle_aabb(__Pyx_memviewslice __pyx_v_boxce
|
|
| 2846 |
*/
|
| 2847 |
__pyx_v_result = triBoxOverlap((&(*((float *) ( /* dim=0 */ ((char *) (((float *) __pyx_v_boxcenter.data) + __pyx_t_2)) )))), (&(*((float *) ( /* dim=0 */ ((char *) (((float *) __pyx_v_boxhalfsize.data) + __pyx_t_3)) )))), (&(*((float *) ( /* dim=1 */ ((char *) (((float *) ( /* dim=0 */ (__pyx_v_triverts.data + __pyx_t_4 * __pyx_v_triverts.strides[0]) )) + __pyx_t_5)) )))), (&(*((float *) ( /* dim=1 */ ((char *) (((float *) ( /* dim=0 */ (__pyx_v_triverts.data + __pyx_t_6 * __pyx_v_triverts.strides[0]) )) + __pyx_t_7)) )))), (&(*((float *) ( /* dim=1 */ ((char *) (((float *) ( /* dim=0 */ (__pyx_v_triverts.data + __pyx_t_8 * __pyx_v_triverts.strides[0]) )) + __pyx_t_9)) )))));
|
| 2848 |
|
| 2849 |
-
/* "voxelize.pyx":
|
| 2850 |
* cdef int result = triBoxOverlap(&boxcenter[0], &boxhalfsize[0],
|
| 2851 |
* &triverts[0, 0], &triverts[1, 0], &triverts[2, 0])
|
| 2852 |
* return result # <<<<<<<<<<<<<<
|
|
@@ -2854,7 +2854,7 @@ static int __pyx_f_8voxelize_test_triangle_aabb(__Pyx_memviewslice __pyx_v_boxce
|
|
| 2854 |
__pyx_r = __pyx_v_result;
|
| 2855 |
goto __pyx_L0;
|
| 2856 |
|
| 2857 |
-
/* "voxelize.pyx":
|
| 2858 |
* @cython.boundscheck(False) # Deactivate bounds checking
|
| 2859 |
* @cython.wraparound(False) # Deactivate negative indexing.
|
| 2860 |
* cdef int test_triangle_aabb(float[::1] boxcenter, float[::1] boxhalfsize, float[:, ::1] triverts): # <<<<<<<<<<<<<<
|
|
@@ -16757,7 +16757,7 @@ static __Pyx_StringTabEntry __pyx_string_tab[] = {
|
|
| 16757 |
{0, 0, 0, 0, 0, 0, 0}
|
| 16758 |
};
|
| 16759 |
static CYTHON_SMALL_CODE int __Pyx_InitCachedBuiltins(void) {
|
| 16760 |
-
__pyx_builtin_range = __Pyx_GetBuiltinName(__pyx_n_s_range); if (!__pyx_builtin_range) __PYX_ERR(0,
|
| 16761 |
__pyx_builtin_ValueError = __Pyx_GetBuiltinName(__pyx_n_s_ValueError); if (!__pyx_builtin_ValueError) __PYX_ERR(1, 133, __pyx_L1_error)
|
| 16762 |
__pyx_builtin_MemoryError = __Pyx_GetBuiltinName(__pyx_n_s_MemoryError); if (!__pyx_builtin_MemoryError) __PYX_ERR(1, 148, __pyx_L1_error)
|
| 16763 |
__pyx_builtin_enumerate = __Pyx_GetBuiltinName(__pyx_n_s_enumerate); if (!__pyx_builtin_enumerate) __PYX_ERR(1, 151, __pyx_L1_error)
|
|
@@ -17377,8 +17377,8 @@ if (!__Pyx_RefNanny) {
|
|
| 17377 |
|
| 17378 |
/* "voxelize.pyx":1
|
| 17379 |
* cimport cython # <<<<<<<<<<<<<<
|
| 17380 |
-
* from libc.math cimport floor, ceil
|
| 17381 |
* from cython.view cimport array as cvarray
|
|
|
|
| 17382 |
*/
|
| 17383 |
__pyx_t_1 = __Pyx_PyDict_NewPresized(0); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1, __pyx_L1_error)
|
| 17384 |
__Pyx_GOTREF(__pyx_t_1);
|
|
|
|
| 2115 |
static PyObject *__pyx_codeobj__25;
|
| 2116 |
/* Late includes */
|
| 2117 |
|
| 2118 |
+
/* "voxelize.pyx":13
|
| 2119 |
* @cython.boundscheck(False) # Deactivate bounds checking
|
| 2120 |
* @cython.wraparound(False) # Deactivate negative indexing.
|
| 2121 |
* cpdef int voxelize_mesh_(bint[:, :, :] occ, float[:, :, ::1] faces): # <<<<<<<<<<<<<<
|
|
|
|
| 2138 |
int __pyx_clineno = 0;
|
| 2139 |
__Pyx_RefNannySetupContext("voxelize_mesh_", 0);
|
| 2140 |
|
| 2141 |
+
/* "voxelize.pyx":14
|
| 2142 |
* @cython.wraparound(False) # Deactivate negative indexing.
|
| 2143 |
* cpdef int voxelize_mesh_(bint[:, :, :] occ, float[:, :, ::1] faces):
|
| 2144 |
* assert(faces.shape[1] == 3) # <<<<<<<<<<<<<<
|
|
|
|
| 2149 |
if (unlikely(!Py_OptimizeFlag)) {
|
| 2150 |
if (unlikely(!(((__pyx_v_faces.shape[1]) == 3) != 0))) {
|
| 2151 |
PyErr_SetNone(PyExc_AssertionError);
|
| 2152 |
+
__PYX_ERR(0, 14, __pyx_L1_error)
|
| 2153 |
}
|
| 2154 |
}
|
| 2155 |
#endif
|
| 2156 |
|
| 2157 |
+
/* "voxelize.pyx":15
|
| 2158 |
* cpdef int voxelize_mesh_(bint[:, :, :] occ, float[:, :, ::1] faces):
|
| 2159 |
* assert(faces.shape[1] == 3)
|
| 2160 |
* assert(faces.shape[2] == 3) # <<<<<<<<<<<<<<
|
|
|
|
| 2165 |
if (unlikely(!Py_OptimizeFlag)) {
|
| 2166 |
if (unlikely(!(((__pyx_v_faces.shape[2]) == 3) != 0))) {
|
| 2167 |
PyErr_SetNone(PyExc_AssertionError);
|
| 2168 |
+
__PYX_ERR(0, 15, __pyx_L1_error)
|
| 2169 |
}
|
| 2170 |
}
|
| 2171 |
#endif
|
| 2172 |
|
| 2173 |
+
/* "voxelize.pyx":17
|
| 2174 |
* assert(faces.shape[2] == 3)
|
| 2175 |
*
|
| 2176 |
* n_faces = faces.shape[0] # <<<<<<<<<<<<<<
|
|
|
|
| 2179 |
*/
|
| 2180 |
__pyx_v_n_faces = (__pyx_v_faces.shape[0]);
|
| 2181 |
|
| 2182 |
+
/* "voxelize.pyx":19
|
| 2183 |
* n_faces = faces.shape[0]
|
| 2184 |
* cdef int i
|
| 2185 |
* for i in range(n_faces): # <<<<<<<<<<<<<<
|
|
|
|
| 2191 |
for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_2; __pyx_t_3+=1) {
|
| 2192 |
__pyx_v_i = __pyx_t_3;
|
| 2193 |
|
| 2194 |
+
/* "voxelize.pyx":20
|
| 2195 |
* cdef int i
|
| 2196 |
* for i in range(n_faces):
|
| 2197 |
* voxelize_triangle_(occ, faces[i]) # <<<<<<<<<<<<<<
|
|
|
|
| 2221 |
__pyx_t_4.data = NULL;
|
| 2222 |
}
|
| 2223 |
|
| 2224 |
+
/* "voxelize.pyx":13
|
| 2225 |
* @cython.boundscheck(False) # Deactivate bounds checking
|
| 2226 |
* @cython.wraparound(False) # Deactivate negative indexing.
|
| 2227 |
* cpdef int voxelize_mesh_(bint[:, :, :] occ, float[:, :, ::1] faces): # <<<<<<<<<<<<<<
|
|
|
|
| 2275 |
case 1:
|
| 2276 |
if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_faces)) != 0)) kw_args--;
|
| 2277 |
else {
|
| 2278 |
+
__Pyx_RaiseArgtupleInvalid("voxelize_mesh_", 1, 2, 2, 1); __PYX_ERR(0, 13, __pyx_L3_error)
|
| 2279 |
}
|
| 2280 |
}
|
| 2281 |
if (unlikely(kw_args > 0)) {
|
| 2282 |
+
if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "voxelize_mesh_") < 0)) __PYX_ERR(0, 13, __pyx_L3_error)
|
| 2283 |
}
|
| 2284 |
} else if (PyTuple_GET_SIZE(__pyx_args) != 2) {
|
| 2285 |
goto __pyx_L5_argtuple_error;
|
|
|
|
| 2287 |
values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
|
| 2288 |
values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
|
| 2289 |
}
|
| 2290 |
+
__pyx_v_occ = __Pyx_PyObject_to_MemoryviewSlice_dsdsds_int(values[0], PyBUF_WRITABLE); if (unlikely(!__pyx_v_occ.memview)) __PYX_ERR(0, 13, __pyx_L3_error)
|
| 2291 |
+
__pyx_v_faces = __Pyx_PyObject_to_MemoryviewSlice_d_d_dc_float(values[1], PyBUF_WRITABLE); if (unlikely(!__pyx_v_faces.memview)) __PYX_ERR(0, 13, __pyx_L3_error)
|
| 2292 |
}
|
| 2293 |
goto __pyx_L4_argument_unpacking_done;
|
| 2294 |
__pyx_L5_argtuple_error:;
|
| 2295 |
+
__Pyx_RaiseArgtupleInvalid("voxelize_mesh_", 1, 2, 2, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 13, __pyx_L3_error)
|
| 2296 |
__pyx_L3_error:;
|
| 2297 |
__Pyx_AddTraceback("voxelize.voxelize_mesh_", __pyx_clineno, __pyx_lineno, __pyx_filename);
|
| 2298 |
__Pyx_RefNannyFinishContext();
|
|
|
|
| 2314 |
int __pyx_clineno = 0;
|
| 2315 |
__Pyx_RefNannySetupContext("voxelize_mesh_", 0);
|
| 2316 |
__Pyx_XDECREF(__pyx_r);
|
| 2317 |
+
if (unlikely(!__pyx_v_occ.memview)) { __Pyx_RaiseUnboundLocalError("occ"); __PYX_ERR(0, 13, __pyx_L1_error) }
|
| 2318 |
+
if (unlikely(!__pyx_v_faces.memview)) { __Pyx_RaiseUnboundLocalError("faces"); __PYX_ERR(0, 13, __pyx_L1_error) }
|
| 2319 |
+
__pyx_t_1 = __Pyx_PyInt_From_int(__pyx_f_8voxelize_voxelize_mesh_(__pyx_v_occ, __pyx_v_faces, 0)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 13, __pyx_L1_error)
|
| 2320 |
__Pyx_GOTREF(__pyx_t_1);
|
| 2321 |
__pyx_r = __pyx_t_1;
|
| 2322 |
__pyx_t_1 = 0;
|
|
|
|
| 2335 |
return __pyx_r;
|
| 2336 |
}
|
| 2337 |
|
| 2338 |
+
/* "voxelize.pyx":25
|
| 2339 |
* @cython.boundscheck(False) # Deactivate bounds checking
|
| 2340 |
* @cython.wraparound(False) # Deactivate negative indexing.
|
| 2341 |
* cpdef int voxelize_triangle_(bint[:, :, :] occupancies, float[:, ::1] triverts): # <<<<<<<<<<<<<<
|
|
|
|
| 2382 |
Py_ssize_t __pyx_t_25;
|
| 2383 |
__Pyx_RefNannySetupContext("voxelize_triangle_", 0);
|
| 2384 |
|
| 2385 |
+
/* "voxelize.pyx":33
|
| 2386 |
* cdef bint intersection
|
| 2387 |
*
|
| 2388 |
* boxhalfsize[:] = (0.5, 0.5, 0.5) # <<<<<<<<<<<<<<
|
|
|
|
| 2397 |
(__pyx_t_1[1]) = __pyx_t_3;
|
| 2398 |
(__pyx_t_1[2]) = __pyx_t_4;
|
| 2399 |
|
| 2400 |
+
/* "voxelize.pyx":35
|
| 2401 |
* boxhalfsize[:] = (0.5, 0.5, 0.5)
|
| 2402 |
*
|
| 2403 |
* for i in range(3): # <<<<<<<<<<<<<<
|
|
|
|
| 2407 |
for (__pyx_t_5 = 0; __pyx_t_5 < 3; __pyx_t_5+=1) {
|
| 2408 |
__pyx_v_i = __pyx_t_5;
|
| 2409 |
|
| 2410 |
+
/* "voxelize.pyx":37
|
| 2411 |
* for i in range(3):
|
| 2412 |
* bbox_min[i] = <int> (
|
| 2413 |
* min(triverts[0, i], triverts[1, i], triverts[2, i]) # <<<<<<<<<<<<<<
|
|
|
|
| 2435 |
__pyx_t_11 = __pyx_t_10;
|
| 2436 |
}
|
| 2437 |
|
| 2438 |
+
/* "voxelize.pyx":36
|
| 2439 |
*
|
| 2440 |
* for i in range(3):
|
| 2441 |
* bbox_min[i] = <int> ( # <<<<<<<<<<<<<<
|
|
|
|
| 2444 |
*/
|
| 2445 |
(__pyx_v_bbox_min[__pyx_v_i]) = ((int)__pyx_t_11);
|
| 2446 |
|
| 2447 |
+
/* "voxelize.pyx":39
|
| 2448 |
* min(triverts[0, i], triverts[1, i], triverts[2, i])
|
| 2449 |
* )
|
| 2450 |
* bbox_min[i] = min(max(bbox_min[i], 0), occupancies.shape[i] - 1) # <<<<<<<<<<<<<<
|
|
|
|
| 2468 |
(__pyx_v_bbox_min[__pyx_v_i]) = __pyx_t_16;
|
| 2469 |
}
|
| 2470 |
|
| 2471 |
+
/* "voxelize.pyx":41
|
| 2472 |
* bbox_min[i] = min(max(bbox_min[i], 0), occupancies.shape[i] - 1)
|
| 2473 |
*
|
| 2474 |
* for i in range(3): # <<<<<<<<<<<<<<
|
|
|
|
| 2478 |
for (__pyx_t_5 = 0; __pyx_t_5 < 3; __pyx_t_5+=1) {
|
| 2479 |
__pyx_v_i = __pyx_t_5;
|
| 2480 |
|
| 2481 |
+
/* "voxelize.pyx":43
|
| 2482 |
* for i in range(3):
|
| 2483 |
* bbox_max[i] = <int> (
|
| 2484 |
* max(triverts[0, i], triverts[1, i], triverts[2, i]) # <<<<<<<<<<<<<<
|
|
|
|
| 2506 |
__pyx_t_10 = __pyx_t_9;
|
| 2507 |
}
|
| 2508 |
|
| 2509 |
+
/* "voxelize.pyx":42
|
| 2510 |
*
|
| 2511 |
* for i in range(3):
|
| 2512 |
* bbox_max[i] = <int> ( # <<<<<<<<<<<<<<
|
|
|
|
| 2515 |
*/
|
| 2516 |
(__pyx_v_bbox_max[__pyx_v_i]) = ((int)__pyx_t_10);
|
| 2517 |
|
| 2518 |
+
/* "voxelize.pyx":45
|
| 2519 |
* max(triverts[0, i], triverts[1, i], triverts[2, i])
|
| 2520 |
* )
|
| 2521 |
* bbox_max[i] = min(max(bbox_max[i], 0), occupancies.shape[i] - 1) # <<<<<<<<<<<<<<
|
|
|
|
| 2539 |
(__pyx_v_bbox_max[__pyx_v_i]) = __pyx_t_12;
|
| 2540 |
}
|
| 2541 |
|
| 2542 |
+
/* "voxelize.pyx":47
|
| 2543 |
* bbox_max[i] = min(max(bbox_max[i], 0), occupancies.shape[i] - 1)
|
| 2544 |
*
|
| 2545 |
* for i in range(bbox_min[0], bbox_max[0] + 1): # <<<<<<<<<<<<<<
|
|
|
|
| 2551 |
for (__pyx_t_5 = (__pyx_v_bbox_min[0]); __pyx_t_5 < __pyx_t_15; __pyx_t_5+=1) {
|
| 2552 |
__pyx_v_i = __pyx_t_5;
|
| 2553 |
|
| 2554 |
+
/* "voxelize.pyx":48
|
| 2555 |
*
|
| 2556 |
* for i in range(bbox_min[0], bbox_max[0] + 1):
|
| 2557 |
* for j in range(bbox_min[1], bbox_max[1] + 1): # <<<<<<<<<<<<<<
|
|
|
|
| 2563 |
for (__pyx_t_14 = (__pyx_v_bbox_min[1]); __pyx_t_14 < __pyx_t_18; __pyx_t_14+=1) {
|
| 2564 |
__pyx_v_j = __pyx_t_14;
|
| 2565 |
|
| 2566 |
+
/* "voxelize.pyx":49
|
| 2567 |
* for i in range(bbox_min[0], bbox_max[0] + 1):
|
| 2568 |
* for j in range(bbox_min[1], bbox_max[1] + 1):
|
| 2569 |
* for k in range(bbox_min[2], bbox_max[2] + 1): # <<<<<<<<<<<<<<
|
|
|
|
| 2575 |
for (__pyx_t_21 = (__pyx_v_bbox_min[2]); __pyx_t_21 < __pyx_t_20; __pyx_t_21+=1) {
|
| 2576 |
__pyx_v_k = __pyx_t_21;
|
| 2577 |
|
| 2578 |
+
/* "voxelize.pyx":50
|
| 2579 |
* for j in range(bbox_min[1], bbox_max[1] + 1):
|
| 2580 |
* for k in range(bbox_min[2], bbox_max[2] + 1):
|
| 2581 |
* boxcenter[:] = (i + 0.5, j + 0.5, k + 0.5) # <<<<<<<<<<<<<<
|
|
|
|
| 2590 |
(__pyx_t_1[1]) = __pyx_t_3;
|
| 2591 |
(__pyx_t_1[2]) = __pyx_t_2;
|
| 2592 |
|
| 2593 |
+
/* "voxelize.pyx":52
|
| 2594 |
* boxcenter[:] = (i + 0.5, j + 0.5, k + 0.5)
|
| 2595 |
* intersection = triBoxOverlap(&boxcenter[0], &boxhalfsize[0],
|
| 2596 |
* &triverts[0, 0], &triverts[1, 0], &triverts[2, 0]) # <<<<<<<<<<<<<<
|
|
|
|
| 2604 |
__pyx_t_24 = 2;
|
| 2605 |
__pyx_t_25 = 0;
|
| 2606 |
|
| 2607 |
+
/* "voxelize.pyx":51
|
| 2608 |
* for k in range(bbox_min[2], bbox_max[2] + 1):
|
| 2609 |
* boxcenter[:] = (i + 0.5, j + 0.5, k + 0.5)
|
| 2610 |
* intersection = triBoxOverlap(&boxcenter[0], &boxhalfsize[0], # <<<<<<<<<<<<<<
|
|
|
|
| 2613 |
*/
|
| 2614 |
__pyx_v_intersection = triBoxOverlap((&(__pyx_v_boxcenter[0])), (&(__pyx_v_boxhalfsize[0])), (&(*((float *) ( /* dim=1 */ ((char *) (((float *) ( /* dim=0 */ (__pyx_v_triverts.data + __pyx_t_6 * __pyx_v_triverts.strides[0]) )) + __pyx_t_7)) )))), (&(*((float *) ( /* dim=1 */ ((char *) (((float *) ( /* dim=0 */ (__pyx_v_triverts.data + __pyx_t_22 * __pyx_v_triverts.strides[0]) )) + __pyx_t_23)) )))), (&(*((float *) ( /* dim=1 */ ((char *) (((float *) ( /* dim=0 */ (__pyx_v_triverts.data + __pyx_t_24 * __pyx_v_triverts.strides[0]) )) + __pyx_t_25)) )))));
|
| 2615 |
|
| 2616 |
+
/* "voxelize.pyx":53
|
| 2617 |
* intersection = triBoxOverlap(&boxcenter[0], &boxhalfsize[0],
|
| 2618 |
* &triverts[0, 0], &triverts[1, 0], &triverts[2, 0])
|
| 2619 |
* occupancies[i, j, k] |= intersection # <<<<<<<<<<<<<<
|
|
|
|
| 2628 |
}
|
| 2629 |
}
|
| 2630 |
|
| 2631 |
+
/* "voxelize.pyx":25
|
| 2632 |
* @cython.boundscheck(False) # Deactivate bounds checking
|
| 2633 |
* @cython.wraparound(False) # Deactivate negative indexing.
|
| 2634 |
* cpdef int voxelize_triangle_(bint[:, :, :] occupancies, float[:, ::1] triverts): # <<<<<<<<<<<<<<
|
|
|
|
| 2676 |
case 1:
|
| 2677 |
if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_triverts)) != 0)) kw_args--;
|
| 2678 |
else {
|
| 2679 |
+
__Pyx_RaiseArgtupleInvalid("voxelize_triangle_", 1, 2, 2, 1); __PYX_ERR(0, 25, __pyx_L3_error)
|
| 2680 |
}
|
| 2681 |
}
|
| 2682 |
if (unlikely(kw_args > 0)) {
|
| 2683 |
+
if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "voxelize_triangle_") < 0)) __PYX_ERR(0, 25, __pyx_L3_error)
|
| 2684 |
}
|
| 2685 |
} else if (PyTuple_GET_SIZE(__pyx_args) != 2) {
|
| 2686 |
goto __pyx_L5_argtuple_error;
|
|
|
|
| 2688 |
values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
|
| 2689 |
values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
|
| 2690 |
}
|
| 2691 |
+
__pyx_v_occupancies = __Pyx_PyObject_to_MemoryviewSlice_dsdsds_int(values[0], PyBUF_WRITABLE); if (unlikely(!__pyx_v_occupancies.memview)) __PYX_ERR(0, 25, __pyx_L3_error)
|
| 2692 |
+
__pyx_v_triverts = __Pyx_PyObject_to_MemoryviewSlice_d_dc_float(values[1], PyBUF_WRITABLE); if (unlikely(!__pyx_v_triverts.memview)) __PYX_ERR(0, 25, __pyx_L3_error)
|
| 2693 |
}
|
| 2694 |
goto __pyx_L4_argument_unpacking_done;
|
| 2695 |
__pyx_L5_argtuple_error:;
|
| 2696 |
+
__Pyx_RaiseArgtupleInvalid("voxelize_triangle_", 1, 2, 2, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 25, __pyx_L3_error)
|
| 2697 |
__pyx_L3_error:;
|
| 2698 |
__Pyx_AddTraceback("voxelize.voxelize_triangle_", __pyx_clineno, __pyx_lineno, __pyx_filename);
|
| 2699 |
__Pyx_RefNannyFinishContext();
|
|
|
|
| 2715 |
int __pyx_clineno = 0;
|
| 2716 |
__Pyx_RefNannySetupContext("voxelize_triangle_", 0);
|
| 2717 |
__Pyx_XDECREF(__pyx_r);
|
| 2718 |
+
if (unlikely(!__pyx_v_occupancies.memview)) { __Pyx_RaiseUnboundLocalError("occupancies"); __PYX_ERR(0, 25, __pyx_L1_error) }
|
| 2719 |
+
if (unlikely(!__pyx_v_triverts.memview)) { __Pyx_RaiseUnboundLocalError("triverts"); __PYX_ERR(0, 25, __pyx_L1_error) }
|
| 2720 |
+
__pyx_t_1 = __Pyx_PyInt_From_int(__pyx_f_8voxelize_voxelize_triangle_(__pyx_v_occupancies, __pyx_v_triverts, 0)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 25, __pyx_L1_error)
|
| 2721 |
__Pyx_GOTREF(__pyx_t_1);
|
| 2722 |
__pyx_r = __pyx_t_1;
|
| 2723 |
__pyx_t_1 = 0;
|
|
|
|
| 2736 |
return __pyx_r;
|
| 2737 |
}
|
| 2738 |
|
| 2739 |
+
/* "voxelize.pyx":58
|
| 2740 |
* @cython.boundscheck(False) # Deactivate bounds checking
|
| 2741 |
* @cython.wraparound(False) # Deactivate negative indexing.
|
| 2742 |
* cdef int test_triangle_aabb(float[::1] boxcenter, float[::1] boxhalfsize, float[:, ::1] triverts): # <<<<<<<<<<<<<<
|
|
|
|
| 2762 |
int __pyx_clineno = 0;
|
| 2763 |
__Pyx_RefNannySetupContext("test_triangle_aabb", 0);
|
| 2764 |
|
| 2765 |
+
/* "voxelize.pyx":59
|
| 2766 |
* @cython.wraparound(False) # Deactivate negative indexing.
|
| 2767 |
* cdef int test_triangle_aabb(float[::1] boxcenter, float[::1] boxhalfsize, float[:, ::1] triverts):
|
| 2768 |
* assert(boxcenter.shape[0] == 3) # <<<<<<<<<<<<<<
|
|
|
|
| 2773 |
if (unlikely(!Py_OptimizeFlag)) {
|
| 2774 |
if (unlikely(!(((__pyx_v_boxcenter.shape[0]) == 3) != 0))) {
|
| 2775 |
PyErr_SetNone(PyExc_AssertionError);
|
| 2776 |
+
__PYX_ERR(0, 59, __pyx_L1_error)
|
| 2777 |
}
|
| 2778 |
}
|
| 2779 |
#endif
|
| 2780 |
|
| 2781 |
+
/* "voxelize.pyx":60
|
| 2782 |
* cdef int test_triangle_aabb(float[::1] boxcenter, float[::1] boxhalfsize, float[:, ::1] triverts):
|
| 2783 |
* assert(boxcenter.shape[0] == 3)
|
| 2784 |
* assert(boxhalfsize.shape[0] == 3) # <<<<<<<<<<<<<<
|
|
|
|
| 2789 |
if (unlikely(!Py_OptimizeFlag)) {
|
| 2790 |
if (unlikely(!(((__pyx_v_boxhalfsize.shape[0]) == 3) != 0))) {
|
| 2791 |
PyErr_SetNone(PyExc_AssertionError);
|
| 2792 |
+
__PYX_ERR(0, 60, __pyx_L1_error)
|
| 2793 |
}
|
| 2794 |
}
|
| 2795 |
#endif
|
| 2796 |
|
| 2797 |
+
/* "voxelize.pyx":61
|
| 2798 |
* assert(boxcenter.shape[0] == 3)
|
| 2799 |
* assert(boxhalfsize.shape[0] == 3)
|
| 2800 |
* assert(triverts.shape[0] == triverts.shape[1] == 3) # <<<<<<<<<<<<<<
|
|
|
|
| 2809 |
}
|
| 2810 |
if (unlikely(!(__pyx_t_1 != 0))) {
|
| 2811 |
PyErr_SetNone(PyExc_AssertionError);
|
| 2812 |
+
__PYX_ERR(0, 61, __pyx_L1_error)
|
| 2813 |
}
|
| 2814 |
}
|
| 2815 |
#endif
|
| 2816 |
|
| 2817 |
+
/* "voxelize.pyx":65
|
| 2818 |
* # print(triverts)
|
| 2819 |
* # Call functions
|
| 2820 |
* cdef int result = triBoxOverlap(&boxcenter[0], &boxhalfsize[0], # <<<<<<<<<<<<<<
|
|
|
|
| 2824 |
__pyx_t_2 = 0;
|
| 2825 |
__pyx_t_3 = 0;
|
| 2826 |
|
| 2827 |
+
/* "voxelize.pyx":66
|
| 2828 |
* # Call functions
|
| 2829 |
* cdef int result = triBoxOverlap(&boxcenter[0], &boxhalfsize[0],
|
| 2830 |
* &triverts[0, 0], &triverts[1, 0], &triverts[2, 0]) # <<<<<<<<<<<<<<
|
|
|
|
| 2837 |
__pyx_t_8 = 2;
|
| 2838 |
__pyx_t_9 = 0;
|
| 2839 |
|
| 2840 |
+
/* "voxelize.pyx":65
|
| 2841 |
* # print(triverts)
|
| 2842 |
* # Call functions
|
| 2843 |
* cdef int result = triBoxOverlap(&boxcenter[0], &boxhalfsize[0], # <<<<<<<<<<<<<<
|
|
|
|
| 2846 |
*/
|
| 2847 |
__pyx_v_result = triBoxOverlap((&(*((float *) ( /* dim=0 */ ((char *) (((float *) __pyx_v_boxcenter.data) + __pyx_t_2)) )))), (&(*((float *) ( /* dim=0 */ ((char *) (((float *) __pyx_v_boxhalfsize.data) + __pyx_t_3)) )))), (&(*((float *) ( /* dim=1 */ ((char *) (((float *) ( /* dim=0 */ (__pyx_v_triverts.data + __pyx_t_4 * __pyx_v_triverts.strides[0]) )) + __pyx_t_5)) )))), (&(*((float *) ( /* dim=1 */ ((char *) (((float *) ( /* dim=0 */ (__pyx_v_triverts.data + __pyx_t_6 * __pyx_v_triverts.strides[0]) )) + __pyx_t_7)) )))), (&(*((float *) ( /* dim=1 */ ((char *) (((float *) ( /* dim=0 */ (__pyx_v_triverts.data + __pyx_t_8 * __pyx_v_triverts.strides[0]) )) + __pyx_t_9)) )))));
|
| 2848 |
|
| 2849 |
+
/* "voxelize.pyx":67
|
| 2850 |
* cdef int result = triBoxOverlap(&boxcenter[0], &boxhalfsize[0],
|
| 2851 |
* &triverts[0, 0], &triverts[1, 0], &triverts[2, 0])
|
| 2852 |
* return result # <<<<<<<<<<<<<<
|
|
|
|
| 2854 |
__pyx_r = __pyx_v_result;
|
| 2855 |
goto __pyx_L0;
|
| 2856 |
|
| 2857 |
+
/* "voxelize.pyx":58
|
| 2858 |
* @cython.boundscheck(False) # Deactivate bounds checking
|
| 2859 |
* @cython.wraparound(False) # Deactivate negative indexing.
|
| 2860 |
* cdef int test_triangle_aabb(float[::1] boxcenter, float[::1] boxhalfsize, float[:, ::1] triverts): # <<<<<<<<<<<<<<
|
|
|
|
| 16757 |
{0, 0, 0, 0, 0, 0, 0}
|
| 16758 |
};
|
| 16759 |
static CYTHON_SMALL_CODE int __Pyx_InitCachedBuiltins(void) {
|
| 16760 |
+
__pyx_builtin_range = __Pyx_GetBuiltinName(__pyx_n_s_range); if (!__pyx_builtin_range) __PYX_ERR(0, 19, __pyx_L1_error)
|
| 16761 |
__pyx_builtin_ValueError = __Pyx_GetBuiltinName(__pyx_n_s_ValueError); if (!__pyx_builtin_ValueError) __PYX_ERR(1, 133, __pyx_L1_error)
|
| 16762 |
__pyx_builtin_MemoryError = __Pyx_GetBuiltinName(__pyx_n_s_MemoryError); if (!__pyx_builtin_MemoryError) __PYX_ERR(1, 148, __pyx_L1_error)
|
| 16763 |
__pyx_builtin_enumerate = __Pyx_GetBuiltinName(__pyx_n_s_enumerate); if (!__pyx_builtin_enumerate) __PYX_ERR(1, 151, __pyx_L1_error)
|
|
|
|
| 17377 |
|
| 17378 |
/* "voxelize.pyx":1
|
| 17379 |
* cimport cython # <<<<<<<<<<<<<<
|
|
|
|
| 17380 |
* from cython.view cimport array as cvarray
|
| 17381 |
+
* from libc.math cimport ceil, floor
|
| 17382 |
*/
|
| 17383 |
__pyx_t_1 = __Pyx_PyDict_NewPresized(0); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1, __pyx_L1_error)
|
| 17384 |
__Pyx_GOTREF(__pyx_t_1);
|
lib/common/libvoxelize/voxelize.pyx
CHANGED
|
@@ -1,6 +1,7 @@
|
|
| 1 |
cimport cython
|
| 2 |
-
from libc.math cimport floor, ceil
|
| 3 |
from cython.view cimport array as cvarray
|
|
|
|
|
|
|
| 4 |
|
| 5 |
cdef extern from "tribox2.h":
|
| 6 |
int triBoxOverlap(float boxcenter[3], float boxhalfsize[3],
|
|
|
|
| 1 |
cimport cython
|
|
|
|
| 2 |
from cython.view cimport array as cvarray
|
| 3 |
+
from libc.math cimport ceil, floor
|
| 4 |
+
|
| 5 |
|
| 6 |
cdef extern from "tribox2.h":
|
| 7 |
int triBoxOverlap(float boxcenter[3], float boxhalfsize[3],
|
lib/common/local_affine.py
CHANGED
|
@@ -5,13 +5,14 @@
|
|
| 5 |
# file that should have been included as part of this package.
|
| 6 |
|
| 7 |
import torch
|
| 8 |
-
import trimesh
|
| 9 |
import torch.nn as nn
|
| 10 |
-
|
| 11 |
-
from pytorch3d.structures import Meshes
|
| 12 |
from pytorch3d.loss import chamfer_distance
|
| 13 |
-
from
|
|
|
|
|
|
|
| 14 |
from lib.common.train_util import init_loss
|
|
|
|
| 15 |
|
| 16 |
|
| 17 |
# reference: https://github.com/wuhaozhe/pytorch-nicp
|
|
@@ -84,11 +85,9 @@ def register(target_mesh, src_mesh, device, verbose=True):
|
|
| 84 |
src_mesh.verts_padded().shape[0], src_mesh.edges_packed()
|
| 85 |
).to(device)
|
| 86 |
|
| 87 |
-
optimizer_cloth = torch.optim.Adam(
|
| 88 |
-
|
| 89 |
-
|
| 90 |
-
}], lr=1e-2, amsgrad=True
|
| 91 |
-
)
|
| 92 |
scheduler_cloth = torch.optim.lr_scheduler.ReduceLROnPlateau(
|
| 93 |
optimizer_cloth,
|
| 94 |
mode="min",
|
|
@@ -104,7 +103,7 @@ def register(target_mesh, src_mesh, device, verbose=True):
|
|
| 104 |
loop_cloth = tqdm(range(100))
|
| 105 |
else:
|
| 106 |
loop_cloth = range(100)
|
| 107 |
-
|
| 108 |
for i in loop_cloth:
|
| 109 |
|
| 110 |
optimizer_cloth.zero_grad()
|
|
|
|
| 5 |
# file that should have been included as part of this package.
|
| 6 |
|
| 7 |
import torch
|
|
|
|
| 8 |
import torch.nn as nn
|
| 9 |
+
import trimesh
|
|
|
|
| 10 |
from pytorch3d.loss import chamfer_distance
|
| 11 |
+
from pytorch3d.structures import Meshes
|
| 12 |
+
from tqdm import tqdm
|
| 13 |
+
|
| 14 |
from lib.common.train_util import init_loss
|
| 15 |
+
from lib.dataset.mesh_util import update_mesh_shape_prior_losses
|
| 16 |
|
| 17 |
|
| 18 |
# reference: https://github.com/wuhaozhe/pytorch-nicp
|
|
|
|
| 85 |
src_mesh.verts_padded().shape[0], src_mesh.edges_packed()
|
| 86 |
).to(device)
|
| 87 |
|
| 88 |
+
optimizer_cloth = torch.optim.Adam([{'params': local_affine_model.parameters()}],
|
| 89 |
+
lr=1e-2,
|
| 90 |
+
amsgrad=True)
|
|
|
|
|
|
|
| 91 |
scheduler_cloth = torch.optim.lr_scheduler.ReduceLROnPlateau(
|
| 92 |
optimizer_cloth,
|
| 93 |
mode="min",
|
|
|
|
| 103 |
loop_cloth = tqdm(range(100))
|
| 104 |
else:
|
| 105 |
loop_cloth = range(100)
|
| 106 |
+
|
| 107 |
for i in loop_cloth:
|
| 108 |
|
| 109 |
optimizer_cloth.zero_grad()
|
lib/common/render.py
CHANGED
|
@@ -14,35 +14,36 @@
|
|
| 14 |
#
|
| 15 |
# Contact: ps-license@tuebingen.mpg.de
|
| 16 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 17 |
from pytorch3d.renderer import (
|
|
|
|
| 18 |
BlendParams,
|
| 19 |
-
blending,
|
| 20 |
-
look_at_view_transform,
|
| 21 |
FoVOrthographicCameras,
|
| 22 |
-
|
|
|
|
| 23 |
PointsRasterizationSettings,
|
| 24 |
-
PointsRenderer,
|
| 25 |
-
AlphaCompositor,
|
| 26 |
PointsRasterizer,
|
| 27 |
-
|
| 28 |
-
|
| 29 |
SoftSilhouetteShader,
|
| 30 |
TexturesVertex,
|
|
|
|
|
|
|
| 31 |
)
|
| 32 |
from pytorch3d.renderer.mesh import TexturesVertex
|
| 33 |
from pytorch3d.structures import Meshes
|
| 34 |
-
from
|
| 35 |
-
from
|
| 36 |
|
| 37 |
import lib.common.render_utils as util
|
| 38 |
-
import
|
| 39 |
-
|
| 40 |
-
from PIL import ImageColor
|
| 41 |
-
from tqdm import tqdm
|
| 42 |
-
import os
|
| 43 |
-
import cv2
|
| 44 |
-
import math
|
| 45 |
-
from termcolor import colored
|
| 46 |
|
| 47 |
|
| 48 |
def image2vid(images, vid_path):
|
|
@@ -58,7 +59,7 @@ def image2vid(images, vid_path):
|
|
| 58 |
video.release()
|
| 59 |
|
| 60 |
|
| 61 |
-
def query_color(verts, faces, image, device):
|
| 62 |
"""query colors from points and image
|
| 63 |
|
| 64 |
Args:
|
|
@@ -77,16 +78,16 @@ def query_color(verts, faces, image, device):
|
|
| 77 |
visibility = get_visibility(xy, z, faces[:, [0, 2, 1]]).flatten()
|
| 78 |
uv = xy.unsqueeze(0).unsqueeze(2) # [B, N, 2]
|
| 79 |
uv = uv * torch.tensor([1.0, -1.0]).type_as(uv)
|
| 80 |
-
colors = (
|
| 81 |
-
(
|
| 82 |
-
|
| 83 |
-
|
| 84 |
-
|
| 85 |
-
|
| 86 |
-
|
| 87 |
-
|
| 88 |
-
|
| 89 |
-
|
| 90 |
|
| 91 |
return colors.detach().cpu()
|
| 92 |
|
|
@@ -121,31 +122,25 @@ class Render:
|
|
| 121 |
self.step = 3
|
| 122 |
|
| 123 |
self.cam_pos = {
|
| 124 |
-
"
|
| 125 |
-
|
| 126 |
-
|
| 127 |
-
|
| 128 |
-
|
| 129 |
-
|
| 130 |
-
),
|
| 131 |
-
|
| 132 |
-
|
| 133 |
-
|
| 134 |
-
|
| 135 |
-
|
| 136 |
-
|
| 137 |
-
|
| 138 |
-
|
| 139 |
-
|
| 140 |
-
|
| 141 |
-
|
| 142 |
-
|
| 143 |
-
(
|
| 144 |
-
100.0 * math.cos(np.pi / 180 * angle), self.mesh_y_center,
|
| 145 |
-
100.0 * math.sin(np.pi / 180 * angle)
|
| 146 |
-
) for angle in range(0, 360, self.step)
|
| 147 |
-
]
|
| 148 |
-
)
|
| 149 |
}
|
| 150 |
|
| 151 |
self.type = "color"
|
|
@@ -315,7 +310,7 @@ class Render:
|
|
| 315 |
save_path,
|
| 316 |
fourcc,
|
| 317 |
self.fps,
|
| 318 |
-
(width*3, int(height)),
|
| 319 |
)
|
| 320 |
|
| 321 |
pbar = tqdm(range(len(self.meshes)))
|
|
@@ -352,15 +347,13 @@ class Render:
|
|
| 352 |
for cam_id in pbar:
|
| 353 |
img_raw = data["img_raw"]
|
| 354 |
num_obj = len(mesh_renders) // 2
|
| 355 |
-
img_smpl = blend_rgb_norm(
|
| 356 |
-
|
| 357 |
-
)
|
| 358 |
-
|
| 359 |
-
|
| 360 |
-
|
| 361 |
-
|
| 362 |
-
[img_raw, img_smpl, img_cloth], dim=-1).squeeze(0).permute(1, 2, 0).numpy().astype(np.uint8)
|
| 363 |
-
|
| 364 |
video.write(final_img[:, :, ::-1])
|
| 365 |
|
| 366 |
video.release()
|
|
|
|
| 14 |
#
|
| 15 |
# Contact: ps-license@tuebingen.mpg.de
|
| 16 |
|
| 17 |
+
import math
|
| 18 |
+
import os
|
| 19 |
+
|
| 20 |
+
import cv2
|
| 21 |
+
import numpy as np
|
| 22 |
+
import torch
|
| 23 |
+
from PIL import ImageColor
|
| 24 |
from pytorch3d.renderer import (
|
| 25 |
+
AlphaCompositor,
|
| 26 |
BlendParams,
|
|
|
|
|
|
|
| 27 |
FoVOrthographicCameras,
|
| 28 |
+
MeshRasterizer,
|
| 29 |
+
MeshRenderer,
|
| 30 |
PointsRasterizationSettings,
|
|
|
|
|
|
|
| 31 |
PointsRasterizer,
|
| 32 |
+
PointsRenderer,
|
| 33 |
+
RasterizationSettings,
|
| 34 |
SoftSilhouetteShader,
|
| 35 |
TexturesVertex,
|
| 36 |
+
blending,
|
| 37 |
+
look_at_view_transform,
|
| 38 |
)
|
| 39 |
from pytorch3d.renderer.mesh import TexturesVertex
|
| 40 |
from pytorch3d.structures import Meshes
|
| 41 |
+
from termcolor import colored
|
| 42 |
+
from tqdm import tqdm
|
| 43 |
|
| 44 |
import lib.common.render_utils as util
|
| 45 |
+
from lib.common.imutils import blend_rgb_norm
|
| 46 |
+
from lib.dataset.mesh_util import get_visibility
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 47 |
|
| 48 |
|
| 49 |
def image2vid(images, vid_path):
|
|
|
|
| 59 |
video.release()
|
| 60 |
|
| 61 |
|
| 62 |
+
def query_color(verts, faces, image, device, paint_normal=True):
|
| 63 |
"""query colors from points and image
|
| 64 |
|
| 65 |
Args:
|
|
|
|
| 78 |
visibility = get_visibility(xy, z, faces[:, [0, 2, 1]]).flatten()
|
| 79 |
uv = xy.unsqueeze(0).unsqueeze(2) # [B, N, 2]
|
| 80 |
uv = uv * torch.tensor([1.0, -1.0]).type_as(uv)
|
| 81 |
+
colors = ((
|
| 82 |
+
torch.nn.functional.grid_sample(image, uv, align_corners=True)[0, :, :, 0].permute(1, 0) +
|
| 83 |
+
1.0
|
| 84 |
+
) * 0.5 * 255.0)
|
| 85 |
+
if paint_normal:
|
| 86 |
+
colors[visibility == 0.0] = ((
|
| 87 |
+
Meshes(verts.unsqueeze(0), faces.unsqueeze(0)).verts_normals_padded().squeeze(0) + 1.0
|
| 88 |
+
) * 0.5 * 255.0)[visibility == 0.0]
|
| 89 |
+
else:
|
| 90 |
+
colors[visibility == 0.0] = torch.tensor([0.0, 0.0, 0.0]).to(device)
|
| 91 |
|
| 92 |
return colors.detach().cpu()
|
| 93 |
|
|
|
|
| 122 |
self.step = 3
|
| 123 |
|
| 124 |
self.cam_pos = {
|
| 125 |
+
"front":
|
| 126 |
+
torch.tensor([
|
| 127 |
+
(0, self.mesh_y_center, self.dis),
|
| 128 |
+
(0, self.mesh_y_center, -self.dis),
|
| 129 |
+
]), "frontback":
|
| 130 |
+
torch.tensor([
|
| 131 |
+
(0, self.mesh_y_center, self.dis),
|
| 132 |
+
(0, self.mesh_y_center, -self.dis),
|
| 133 |
+
]), "four":
|
| 134 |
+
torch.tensor([
|
| 135 |
+
(0, self.mesh_y_center, self.dis),
|
| 136 |
+
(self.dis, self.mesh_y_center, 0),
|
| 137 |
+
(0, self.mesh_y_center, -self.dis),
|
| 138 |
+
(-self.dis, self.mesh_y_center, 0),
|
| 139 |
+
]), "around":
|
| 140 |
+
torch.tensor([(
|
| 141 |
+
100.0 * math.cos(np.pi / 180 * angle), self.mesh_y_center,
|
| 142 |
+
100.0 * math.sin(np.pi / 180 * angle)
|
| 143 |
+
) for angle in range(0, 360, self.step)])
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 144 |
}
|
| 145 |
|
| 146 |
self.type = "color"
|
|
|
|
| 310 |
save_path,
|
| 311 |
fourcc,
|
| 312 |
self.fps,
|
| 313 |
+
(width * 3, int(height)),
|
| 314 |
)
|
| 315 |
|
| 316 |
pbar = tqdm(range(len(self.meshes)))
|
|
|
|
| 347 |
for cam_id in pbar:
|
| 348 |
img_raw = data["img_raw"]
|
| 349 |
num_obj = len(mesh_renders) // 2
|
| 350 |
+
img_smpl = blend_rgb_norm((torch.stack(mesh_renders)[:num_obj, cam_id] - 0.5) * 2.0,
|
| 351 |
+
data)
|
| 352 |
+
img_cloth = blend_rgb_norm((torch.stack(mesh_renders)[num_obj:, cam_id] - 0.5) * 2.0,
|
| 353 |
+
data)
|
| 354 |
+
final_img = torch.cat([img_raw, img_smpl, img_cloth],
|
| 355 |
+
dim=-1).squeeze(0).permute(1, 2, 0).numpy().astype(np.uint8)
|
| 356 |
+
|
|
|
|
|
|
|
| 357 |
video.write(final_img[:, :, ::-1])
|
| 358 |
|
| 359 |
video.release()
|
lib/common/render_utils.py
CHANGED
|
@@ -14,13 +14,15 @@
|
|
| 14 |
#
|
| 15 |
# Contact: ps-license@tuebingen.mpg.de
|
| 16 |
|
| 17 |
-
import torch
|
| 18 |
-
from torch import nn
|
| 19 |
-
import trimesh
|
| 20 |
import math
|
| 21 |
from typing import NewType
|
| 22 |
-
|
|
|
|
|
|
|
|
|
|
| 23 |
from pytorch3d.renderer.mesh import rasterize_meshes
|
|
|
|
|
|
|
| 24 |
|
| 25 |
Tensor = NewType("Tensor", torch.Tensor)
|
| 26 |
|
|
@@ -125,8 +127,6 @@ def batch_contains(verts, faces, points):
|
|
| 125 |
|
| 126 |
|
| 127 |
def dict2obj(d):
|
| 128 |
-
# if isinstance(d, list):
|
| 129 |
-
# d = [dict2obj(x) for x in d]
|
| 130 |
if not isinstance(d, dict):
|
| 131 |
return d
|
| 132 |
|
|
@@ -161,7 +161,9 @@ class Pytorch3dRasterizer(nn.Module):
|
|
| 161 |
x,y,z are in image space, normalized
|
| 162 |
can only render squared image now
|
| 163 |
"""
|
| 164 |
-
def __init__(
|
|
|
|
|
|
|
| 165 |
"""
|
| 166 |
use fixed raster_settings for rendering faces
|
| 167 |
"""
|
|
@@ -177,6 +179,7 @@ class Pytorch3dRasterizer(nn.Module):
|
|
| 177 |
}
|
| 178 |
raster_settings = dict2obj(raster_settings)
|
| 179 |
self.raster_settings = raster_settings
|
|
|
|
| 180 |
|
| 181 |
def forward(self, vertices, faces, attributes=None):
|
| 182 |
fixed_vertices = vertices.clone()
|
|
@@ -209,3 +212,15 @@ class Pytorch3dRasterizer(nn.Module):
|
|
| 209 |
pixel_vals = pixel_vals[:, :, :, 0].permute(0, 3, 1, 2)
|
| 210 |
pixel_vals = torch.cat([pixel_vals, vismask[:, :, :, 0][:, None, :, :]], dim=1)
|
| 211 |
return pixel_vals
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 14 |
#
|
| 15 |
# Contact: ps-license@tuebingen.mpg.de
|
| 16 |
|
|
|
|
|
|
|
|
|
|
| 17 |
import math
|
| 18 |
from typing import NewType
|
| 19 |
+
|
| 20 |
+
import numpy as np
|
| 21 |
+
import torch
|
| 22 |
+
import trimesh
|
| 23 |
from pytorch3d.renderer.mesh import rasterize_meshes
|
| 24 |
+
from pytorch3d.structures import Meshes
|
| 25 |
+
from torch import nn
|
| 26 |
|
| 27 |
Tensor = NewType("Tensor", torch.Tensor)
|
| 28 |
|
|
|
|
| 127 |
|
| 128 |
|
| 129 |
def dict2obj(d):
|
|
|
|
|
|
|
| 130 |
if not isinstance(d, dict):
|
| 131 |
return d
|
| 132 |
|
|
|
|
| 161 |
x,y,z are in image space, normalized
|
| 162 |
can only render squared image now
|
| 163 |
"""
|
| 164 |
+
def __init__(
|
| 165 |
+
self, image_size=224, blur_radius=0.0, faces_per_pixel=1, device=torch.device("cuda:0")
|
| 166 |
+
):
|
| 167 |
"""
|
| 168 |
use fixed raster_settings for rendering faces
|
| 169 |
"""
|
|
|
|
| 179 |
}
|
| 180 |
raster_settings = dict2obj(raster_settings)
|
| 181 |
self.raster_settings = raster_settings
|
| 182 |
+
self.device = device
|
| 183 |
|
| 184 |
def forward(self, vertices, faces, attributes=None):
|
| 185 |
fixed_vertices = vertices.clone()
|
|
|
|
| 212 |
pixel_vals = pixel_vals[:, :, :, 0].permute(0, 3, 1, 2)
|
| 213 |
pixel_vals = torch.cat([pixel_vals, vismask[:, :, :, 0][:, None, :, :]], dim=1)
|
| 214 |
return pixel_vals
|
| 215 |
+
|
| 216 |
+
def get_texture(self, uvcoords, uvfaces, verts, faces, verts_color):
|
| 217 |
+
|
| 218 |
+
batch_size = verts.shape[0]
|
| 219 |
+
uv_verts_color = face_vertices(verts_color, faces.expand(batch_size, -1,
|
| 220 |
+
-1)).to(self.device)
|
| 221 |
+
uv_map = self.forward(
|
| 222 |
+
uvcoords.expand(batch_size, -1, -1), uvfaces.expand(batch_size, -1, -1), uv_verts_color
|
| 223 |
+
)[:, :3]
|
| 224 |
+
uv_map_npy = np.flip(uv_map.squeeze(0).permute(1, 2, 0).cpu().numpy(), 0)
|
| 225 |
+
|
| 226 |
+
return uv_map_npy
|
lib/common/seg3d_lossless.py
CHANGED
|
@@ -14,19 +14,16 @@
|
|
| 14 |
#
|
| 15 |
# Contact: ps-license@tuebingen.mpg.de
|
| 16 |
|
| 17 |
-
|
| 18 |
-
create_grid3D,
|
| 19 |
-
plot_mask3D,
|
| 20 |
-
SmoothConv3D,
|
| 21 |
-
)
|
| 22 |
|
|
|
|
| 23 |
import torch
|
| 24 |
import torch.nn as nn
|
| 25 |
-
import numpy as np
|
| 26 |
import torch.nn.functional as F
|
| 27 |
-
import logging
|
| 28 |
from pytorch3d.ops.marching_cubes import marching_cubes
|
| 29 |
|
|
|
|
|
|
|
| 30 |
logging.getLogger("lightning").setLevel(logging.ERROR)
|
| 31 |
|
| 32 |
|
|
@@ -378,10 +375,8 @@ class Seg3dLossless(nn.Module):
|
|
| 378 |
|
| 379 |
with torch.no_grad():
|
| 380 |
# conflicts
|
| 381 |
-
conflicts = (
|
| 382 |
-
|
| 383 |
-
(occupancys_topk - self.balance_value) < 0
|
| 384 |
-
)[0, 0]
|
| 385 |
|
| 386 |
if self.visualize:
|
| 387 |
self.plot(occupancys, coords, final_D, final_H, final_W)
|
|
@@ -407,12 +402,9 @@ class Seg3dLossless(nn.Module):
|
|
| 407 |
title="conflicts",
|
| 408 |
)
|
| 409 |
|
| 410 |
-
conflicts_boundary = (
|
| 411 |
-
(
|
| 412 |
-
|
| 413 |
-
self.gird8_offsets.unsqueeze(1) * stride.int()
|
| 414 |
-
).reshape(-1, 3).long().unique(dim=0)
|
| 415 |
-
)
|
| 416 |
conflicts_boundary[:, 0] = conflicts_boundary[:, 0].clamp(
|
| 417 |
0,
|
| 418 |
calculated.size(2) - 1
|
|
@@ -466,10 +458,8 @@ class Seg3dLossless(nn.Module):
|
|
| 466 |
|
| 467 |
with torch.no_grad():
|
| 468 |
# conflicts
|
| 469 |
-
conflicts = (
|
| 470 |
-
|
| 471 |
-
(occupancys_topk - self.balance_value) < 0
|
| 472 |
-
)[0, 0]
|
| 473 |
|
| 474 |
# put mask point predictions to the right places on the upsampled grid.
|
| 475 |
point_indices = point_indices.unsqueeze(1).expand(-1, C, -1)
|
|
|
|
| 14 |
#
|
| 15 |
# Contact: ps-license@tuebingen.mpg.de
|
| 16 |
|
| 17 |
+
import logging
|
|
|
|
|
|
|
|
|
|
|
|
|
| 18 |
|
| 19 |
+
import numpy as np
|
| 20 |
import torch
|
| 21 |
import torch.nn as nn
|
|
|
|
| 22 |
import torch.nn.functional as F
|
|
|
|
| 23 |
from pytorch3d.ops.marching_cubes import marching_cubes
|
| 24 |
|
| 25 |
+
from .seg3d_utils import SmoothConv3D, create_grid3D, plot_mask3D
|
| 26 |
+
|
| 27 |
logging.getLogger("lightning").setLevel(logging.ERROR)
|
| 28 |
|
| 29 |
|
|
|
|
| 375 |
|
| 376 |
with torch.no_grad():
|
| 377 |
# conflicts
|
| 378 |
+
conflicts = ((occupancys_interp - self.balance_value) *
|
| 379 |
+
(occupancys_topk - self.balance_value) < 0)[0, 0]
|
|
|
|
|
|
|
| 380 |
|
| 381 |
if self.visualize:
|
| 382 |
self.plot(occupancys, coords, final_D, final_H, final_W)
|
|
|
|
| 402 |
title="conflicts",
|
| 403 |
)
|
| 404 |
|
| 405 |
+
conflicts_boundary = ((
|
| 406 |
+
conflicts_coords.int() + self.gird8_offsets.unsqueeze(1) * stride.int()
|
| 407 |
+
).reshape(-1, 3).long().unique(dim=0))
|
|
|
|
|
|
|
|
|
|
| 408 |
conflicts_boundary[:, 0] = conflicts_boundary[:, 0].clamp(
|
| 409 |
0,
|
| 410 |
calculated.size(2) - 1
|
|
|
|
| 458 |
|
| 459 |
with torch.no_grad():
|
| 460 |
# conflicts
|
| 461 |
+
conflicts = ((occupancys_interp - self.balance_value) *
|
| 462 |
+
(occupancys_topk - self.balance_value) < 0)[0, 0]
|
|
|
|
|
|
|
| 463 |
|
| 464 |
# put mask point predictions to the right places on the upsampled grid.
|
| 465 |
point_indices = point_indices.unsqueeze(1).expand(-1, C, -1)
|
lib/common/seg3d_utils.py
CHANGED
|
@@ -14,10 +14,10 @@
|
|
| 14 |
#
|
| 15 |
# Contact: ps-license@tuebingen.mpg.de
|
| 16 |
|
|
|
|
| 17 |
import torch
|
| 18 |
import torch.nn as nn
|
| 19 |
import torch.nn.functional as F
|
| 20 |
-
import matplotlib.pyplot as plt
|
| 21 |
|
| 22 |
|
| 23 |
def plot_mask2D(mask, title="", point_coords=None, figsize=10, point_marker_size=5):
|
|
@@ -140,9 +140,8 @@ class SmoothConv2D(nn.Module):
|
|
| 140 |
assert kernel_size % 2 == 1, "kernel_size for smooth_conv must be odd: {3, 5, ...}"
|
| 141 |
self.padding = (kernel_size - 1) // 2
|
| 142 |
|
| 143 |
-
weight = torch.ones(
|
| 144 |
-
|
| 145 |
-
) / (kernel_size**2)
|
| 146 |
self.register_buffer('weight', weight)
|
| 147 |
|
| 148 |
def forward(self, input):
|
|
@@ -155,9 +154,8 @@ class SmoothConv3D(nn.Module):
|
|
| 155 |
assert kernel_size % 2 == 1, "kernel_size for smooth_conv must be odd: {3, 5, ...}"
|
| 156 |
self.padding = (kernel_size - 1) // 2
|
| 157 |
|
| 158 |
-
weight = torch.ones(
|
| 159 |
-
|
| 160 |
-
) / (kernel_size**3)
|
| 161 |
self.register_buffer('weight', weight)
|
| 162 |
|
| 163 |
def forward(self, input):
|
|
@@ -185,9 +183,8 @@ def build_smooth_conv2D(in_channels=1, out_channels=1, kernel_size=3, padding=1)
|
|
| 185 |
kernel_size=kernel_size,
|
| 186 |
padding=padding
|
| 187 |
)
|
| 188 |
-
smooth_conv.weight.data = torch.ones(
|
| 189 |
-
|
| 190 |
-
) / (kernel_size**2)
|
| 191 |
smooth_conv.bias.data = torch.zeros(out_channels)
|
| 192 |
return smooth_conv
|
| 193 |
|
|
|
|
| 14 |
#
|
| 15 |
# Contact: ps-license@tuebingen.mpg.de
|
| 16 |
|
| 17 |
+
import matplotlib.pyplot as plt
|
| 18 |
import torch
|
| 19 |
import torch.nn as nn
|
| 20 |
import torch.nn.functional as F
|
|
|
|
| 21 |
|
| 22 |
|
| 23 |
def plot_mask2D(mask, title="", point_coords=None, figsize=10, point_marker_size=5):
|
|
|
|
| 140 |
assert kernel_size % 2 == 1, "kernel_size for smooth_conv must be odd: {3, 5, ...}"
|
| 141 |
self.padding = (kernel_size - 1) // 2
|
| 142 |
|
| 143 |
+
weight = torch.ones((in_channels, out_channels, kernel_size, kernel_size),
|
| 144 |
+
dtype=torch.float32) / (kernel_size**2)
|
|
|
|
| 145 |
self.register_buffer('weight', weight)
|
| 146 |
|
| 147 |
def forward(self, input):
|
|
|
|
| 154 |
assert kernel_size % 2 == 1, "kernel_size for smooth_conv must be odd: {3, 5, ...}"
|
| 155 |
self.padding = (kernel_size - 1) // 2
|
| 156 |
|
| 157 |
+
weight = torch.ones((in_channels, out_channels, kernel_size, kernel_size, kernel_size),
|
| 158 |
+
dtype=torch.float32) / (kernel_size**3)
|
|
|
|
| 159 |
self.register_buffer('weight', weight)
|
| 160 |
|
| 161 |
def forward(self, input):
|
|
|
|
| 183 |
kernel_size=kernel_size,
|
| 184 |
padding=padding
|
| 185 |
)
|
| 186 |
+
smooth_conv.weight.data = torch.ones((in_channels, out_channels, kernel_size, kernel_size),
|
| 187 |
+
dtype=torch.float32) / (kernel_size**2)
|
|
|
|
| 188 |
smooth_conv.bias.data = torch.zeros(out_channels)
|
| 189 |
return smooth_conv
|
| 190 |
|
lib/common/train_util.py
CHANGED
|
@@ -14,11 +14,12 @@
|
|
| 14 |
#
|
| 15 |
# Contact: ps-license@tuebingen.mpg.de
|
| 16 |
|
|
|
|
| 17 |
import torch
|
|
|
|
|
|
|
| 18 |
from ..dataset.mesh_util import *
|
| 19 |
from ..net.geometry import orthogonal
|
| 20 |
-
from termcolor import colored
|
| 21 |
-
import pytorch_lightning as pl
|
| 22 |
|
| 23 |
|
| 24 |
class Format:
|
|
@@ -30,50 +31,23 @@ def init_loss():
|
|
| 30 |
|
| 31 |
losses = {
|
| 32 |
# Cloth: chamfer distance
|
| 33 |
-
"cloth": {
|
| 34 |
-
"weight": 1e3,
|
| 35 |
-
"value": 0.0
|
| 36 |
-
},
|
| 37 |
# Stiffness: [RT]_v1 - [RT]_v2 (v1-edge-v2)
|
| 38 |
-
"stiff": {
|
| 39 |
-
"weight": 1e5,
|
| 40 |
-
"value": 0.0
|
| 41 |
-
},
|
| 42 |
# Cloth: det(R) = 1
|
| 43 |
-
"rigid": {
|
| 44 |
-
"weight": 1e5,
|
| 45 |
-
"value": 0.0
|
| 46 |
-
},
|
| 47 |
# Cloth: edge length
|
| 48 |
-
"edge": {
|
| 49 |
-
"weight": 0,
|
| 50 |
-
"value": 0.0
|
| 51 |
-
},
|
| 52 |
# Cloth: normal consistency
|
| 53 |
-
"nc": {
|
| 54 |
-
"weight": 0,
|
| 55 |
-
"value": 0.0
|
| 56 |
-
},
|
| 57 |
# Cloth: laplacian smoonth
|
| 58 |
-
"lapla": {
|
| 59 |
-
"weight": 1e2,
|
| 60 |
-
"value": 0.0
|
| 61 |
-
},
|
| 62 |
# Body: Normal_pred - Normal_smpl
|
| 63 |
-
"normal": {
|
| 64 |
-
"weight": 1e0,
|
| 65 |
-
"value": 0.0
|
| 66 |
-
},
|
| 67 |
# Body: Silhouette_pred - Silhouette_smpl
|
| 68 |
-
"silhouette": {
|
| 69 |
-
"weight": 1e0,
|
| 70 |
-
"value": 0.0
|
| 71 |
-
},
|
| 72 |
# Joint: reprojected joints difference
|
| 73 |
-
"joint": {
|
| 74 |
-
"weight": 5e0,
|
| 75 |
-
"value": 0.0
|
| 76 |
-
},
|
| 77 |
}
|
| 78 |
|
| 79 |
return losses
|
|
@@ -143,9 +117,9 @@ def query_func_IF(batch, netG, points):
|
|
| 143 |
|
| 144 |
|
| 145 |
def batch_mean(res, key):
|
| 146 |
-
return torch.stack(
|
| 147 |
-
|
| 148 |
-
).mean()
|
| 149 |
|
| 150 |
|
| 151 |
def accumulate(outputs, rot_num, split):
|
|
|
|
| 14 |
#
|
| 15 |
# Contact: ps-license@tuebingen.mpg.de
|
| 16 |
|
| 17 |
+
import pytorch_lightning as pl
|
| 18 |
import torch
|
| 19 |
+
from termcolor import colored
|
| 20 |
+
|
| 21 |
from ..dataset.mesh_util import *
|
| 22 |
from ..net.geometry import orthogonal
|
|
|
|
|
|
|
| 23 |
|
| 24 |
|
| 25 |
class Format:
|
|
|
|
| 31 |
|
| 32 |
losses = {
|
| 33 |
# Cloth: chamfer distance
|
| 34 |
+
"cloth": {"weight": 1e3, "value": 0.0},
|
|
|
|
|
|
|
|
|
|
| 35 |
# Stiffness: [RT]_v1 - [RT]_v2 (v1-edge-v2)
|
| 36 |
+
"stiff": {"weight": 1e5, "value": 0.0},
|
|
|
|
|
|
|
|
|
|
| 37 |
# Cloth: det(R) = 1
|
| 38 |
+
"rigid": {"weight": 1e5, "value": 0.0},
|
|
|
|
|
|
|
|
|
|
| 39 |
# Cloth: edge length
|
| 40 |
+
"edge": {"weight": 0, "value": 0.0},
|
|
|
|
|
|
|
|
|
|
| 41 |
# Cloth: normal consistency
|
| 42 |
+
"nc": {"weight": 0, "value": 0.0},
|
|
|
|
|
|
|
|
|
|
| 43 |
# Cloth: laplacian smoonth
|
| 44 |
+
"lapla": {"weight": 1e2, "value": 0.0},
|
|
|
|
|
|
|
|
|
|
| 45 |
# Body: Normal_pred - Normal_smpl
|
| 46 |
+
"normal": {"weight": 1e0, "value": 0.0},
|
|
|
|
|
|
|
|
|
|
| 47 |
# Body: Silhouette_pred - Silhouette_smpl
|
| 48 |
+
"silhouette": {"weight": 1e0, "value": 0.0},
|
|
|
|
|
|
|
|
|
|
| 49 |
# Joint: reprojected joints difference
|
| 50 |
+
"joint": {"weight": 5e0, "value": 0.0},
|
|
|
|
|
|
|
|
|
|
| 51 |
}
|
| 52 |
|
| 53 |
return losses
|
|
|
|
| 117 |
|
| 118 |
|
| 119 |
def batch_mean(res, key):
|
| 120 |
+
return torch.stack([
|
| 121 |
+
x[key] if torch.is_tensor(x[key]) else torch.as_tensor(x[key]) for x in res
|
| 122 |
+
]).mean()
|
| 123 |
|
| 124 |
|
| 125 |
def accumulate(outputs, rot_num, split):
|
lib/common/voxelize.py
CHANGED
|
@@ -1,15 +1,14 @@
|
|
| 1 |
-
import trimesh
|
| 2 |
-
import numpy as np
|
| 3 |
import os
|
| 4 |
import traceback
|
| 5 |
|
| 6 |
-
import torch
|
| 7 |
import numpy as np
|
|
|
|
| 8 |
import trimesh
|
| 9 |
from scipy import ndimage
|
| 10 |
from skimage.measure import block_reduce
|
| 11 |
-
|
| 12 |
from lib.common.libmesh.inside_mesh import check_mesh_contains
|
|
|
|
| 13 |
|
| 14 |
# From Occupancy Networks, Mescheder et. al. CVPR'19
|
| 15 |
|
|
@@ -147,76 +146,63 @@ class VoxelGrid:
|
|
| 147 |
f2_r_x, f2_r_y, f2_r_z = np.where(f2_r)
|
| 148 |
f3_r_x, f3_r_y, f3_r_z = np.where(f3_r)
|
| 149 |
|
| 150 |
-
faces_1_l = np.stack(
|
| 151 |
-
[
|
| 152 |
-
|
| 153 |
-
|
| 154 |
-
|
| 155 |
-
|
| 156 |
-
|
| 157 |
-
|
| 158 |
-
|
| 159 |
-
|
| 160 |
-
|
| 161 |
-
[
|
| 162 |
-
|
| 163 |
-
|
| 164 |
-
|
| 165 |
-
|
| 166 |
-
|
| 167 |
-
|
| 168 |
-
|
| 169 |
-
|
| 170 |
-
|
| 171 |
-
|
| 172 |
-
|
| 173 |
-
|
| 174 |
-
|
| 175 |
-
|
| 176 |
-
],
|
| 177 |
-
|
| 178 |
-
|
| 179 |
-
|
| 180 |
-
|
| 181 |
-
|
| 182 |
-
|
| 183 |
-
|
| 184 |
-
|
| 185 |
-
|
| 186 |
-
],
|
| 187 |
-
|
| 188 |
-
|
| 189 |
-
|
| 190 |
-
|
| 191 |
-
[
|
| 192 |
-
|
| 193 |
-
|
| 194 |
-
|
| 195 |
-
|
| 196 |
-
|
| 197 |
-
|
| 198 |
-
|
| 199 |
-
|
| 200 |
-
|
| 201 |
-
|
| 202 |
-
|
| 203 |
-
|
| 204 |
-
|
| 205 |
-
|
| 206 |
-
|
| 207 |
-
axis=1
|
| 208 |
-
)
|
| 209 |
-
|
| 210 |
-
faces = np.concatenate(
|
| 211 |
-
[
|
| 212 |
-
faces_1_l,
|
| 213 |
-
faces_1_r,
|
| 214 |
-
faces_2_l,
|
| 215 |
-
faces_2_r,
|
| 216 |
-
faces_3_l,
|
| 217 |
-
faces_3_r,
|
| 218 |
-
], axis=0
|
| 219 |
-
)
|
| 220 |
|
| 221 |
vertices = self.loc + self.scale * vertices
|
| 222 |
mesh = trimesh.Trimesh(vertices, faces, process=False)
|
|
|
|
|
|
|
|
|
|
| 1 |
import os
|
| 2 |
import traceback
|
| 3 |
|
|
|
|
| 4 |
import numpy as np
|
| 5 |
+
import torch
|
| 6 |
import trimesh
|
| 7 |
from scipy import ndimage
|
| 8 |
from skimage.measure import block_reduce
|
| 9 |
+
|
| 10 |
from lib.common.libmesh.inside_mesh import check_mesh_contains
|
| 11 |
+
from lib.common.libvoxelize.voxelize import voxelize_mesh_
|
| 12 |
|
| 13 |
# From Occupancy Networks, Mescheder et. al. CVPR'19
|
| 14 |
|
|
|
|
| 146 |
f2_r_x, f2_r_y, f2_r_z = np.where(f2_r)
|
| 147 |
f3_r_x, f3_r_y, f3_r_z = np.where(f3_r)
|
| 148 |
|
| 149 |
+
faces_1_l = np.stack([
|
| 150 |
+
v_idx[f1_l_x, f1_l_y, f1_l_z],
|
| 151 |
+
v_idx[f1_l_x, f1_l_y, f1_l_z + 1],
|
| 152 |
+
v_idx[f1_l_x, f1_l_y + 1, f1_l_z + 1],
|
| 153 |
+
v_idx[f1_l_x, f1_l_y + 1, f1_l_z],
|
| 154 |
+
],
|
| 155 |
+
axis=1)
|
| 156 |
+
|
| 157 |
+
faces_1_r = np.stack([
|
| 158 |
+
v_idx[f1_r_x, f1_r_y, f1_r_z],
|
| 159 |
+
v_idx[f1_r_x, f1_r_y + 1, f1_r_z],
|
| 160 |
+
v_idx[f1_r_x, f1_r_y + 1, f1_r_z + 1],
|
| 161 |
+
v_idx[f1_r_x, f1_r_y, f1_r_z + 1],
|
| 162 |
+
],
|
| 163 |
+
axis=1)
|
| 164 |
+
|
| 165 |
+
faces_2_l = np.stack([
|
| 166 |
+
v_idx[f2_l_x, f2_l_y, f2_l_z],
|
| 167 |
+
v_idx[f2_l_x + 1, f2_l_y, f2_l_z],
|
| 168 |
+
v_idx[f2_l_x + 1, f2_l_y, f2_l_z + 1],
|
| 169 |
+
v_idx[f2_l_x, f2_l_y, f2_l_z + 1],
|
| 170 |
+
],
|
| 171 |
+
axis=1)
|
| 172 |
+
|
| 173 |
+
faces_2_r = np.stack([
|
| 174 |
+
v_idx[f2_r_x, f2_r_y, f2_r_z],
|
| 175 |
+
v_idx[f2_r_x, f2_r_y, f2_r_z + 1],
|
| 176 |
+
v_idx[f2_r_x + 1, f2_r_y, f2_r_z + 1],
|
| 177 |
+
v_idx[f2_r_x + 1, f2_r_y, f2_r_z],
|
| 178 |
+
],
|
| 179 |
+
axis=1)
|
| 180 |
+
|
| 181 |
+
faces_3_l = np.stack([
|
| 182 |
+
v_idx[f3_l_x, f3_l_y, f3_l_z],
|
| 183 |
+
v_idx[f3_l_x, f3_l_y + 1, f3_l_z],
|
| 184 |
+
v_idx[f3_l_x + 1, f3_l_y + 1, f3_l_z],
|
| 185 |
+
v_idx[f3_l_x + 1, f3_l_y, f3_l_z],
|
| 186 |
+
],
|
| 187 |
+
axis=1)
|
| 188 |
+
|
| 189 |
+
faces_3_r = np.stack([
|
| 190 |
+
v_idx[f3_r_x, f3_r_y, f3_r_z],
|
| 191 |
+
v_idx[f3_r_x + 1, f3_r_y, f3_r_z],
|
| 192 |
+
v_idx[f3_r_x + 1, f3_r_y + 1, f3_r_z],
|
| 193 |
+
v_idx[f3_r_x, f3_r_y + 1, f3_r_z],
|
| 194 |
+
],
|
| 195 |
+
axis=1)
|
| 196 |
+
|
| 197 |
+
faces = np.concatenate([
|
| 198 |
+
faces_1_l,
|
| 199 |
+
faces_1_r,
|
| 200 |
+
faces_2_l,
|
| 201 |
+
faces_2_r,
|
| 202 |
+
faces_3_l,
|
| 203 |
+
faces_3_r,
|
| 204 |
+
],
|
| 205 |
+
axis=0)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 206 |
|
| 207 |
vertices = self.loc + self.scale * vertices
|
| 208 |
mesh = trimesh.Trimesh(vertices, faces, process=False)
|
lib/dataset/EvalDataset.py
CHANGED
|
@@ -14,22 +14,24 @@
|
|
| 14 |
#
|
| 15 |
# Contact: ps-license@tuebingen.mpg.de
|
| 16 |
|
| 17 |
-
import torch.nn.functional as F
|
| 18 |
-
from lib.common.render import Render
|
| 19 |
-
from lib.dataset.mesh_util import (SMPLX, projection, rescale_smpl, HoppeMesh)
|
| 20 |
-
import os.path as osp
|
| 21 |
-
import numpy as np
|
| 22 |
-
from PIL import Image
|
| 23 |
import os
|
|
|
|
|
|
|
| 24 |
import cv2
|
| 25 |
-
import
|
| 26 |
import torch
|
|
|
|
| 27 |
import torchvision.transforms as transforms
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 28 |
|
| 29 |
cape_gender = {
|
| 30 |
"male":
|
| 31 |
-
|
| 32 |
-
|
| 33 |
}
|
| 34 |
|
| 35 |
|
|
@@ -74,30 +76,27 @@ class EvalDataset:
|
|
| 74 |
"scale": self.scales[dataset_id],
|
| 75 |
}
|
| 76 |
|
| 77 |
-
self.datasets_dict[dataset].update(
|
| 78 |
-
|
| 79 |
-
|
|
|
|
| 80 |
|
| 81 |
self.subject_list = self.get_subject_list()
|
| 82 |
self.smplx = SMPLX()
|
| 83 |
|
| 84 |
# PIL to tensor
|
| 85 |
-
self.image_to_tensor = transforms.Compose(
|
| 86 |
-
|
| 87 |
-
|
| 88 |
-
|
| 89 |
-
|
| 90 |
-
]
|
| 91 |
-
)
|
| 92 |
|
| 93 |
# PIL to tensor
|
| 94 |
-
self.mask_to_tensor = transforms.Compose(
|
| 95 |
-
|
| 96 |
-
|
| 97 |
-
|
| 98 |
-
|
| 99 |
-
]
|
| 100 |
-
)
|
| 101 |
|
| 102 |
self.device = device
|
| 103 |
self.render = Render(size=512, device=self.device)
|
|
@@ -154,27 +153,23 @@ class EvalDataset:
|
|
| 154 |
}
|
| 155 |
|
| 156 |
if dataset == "cape":
|
| 157 |
-
data_dict.update(
|
| 158 |
-
|
| 159 |
-
|
| 160 |
-
|
| 161 |
-
|
| 162 |
-
|
| 163 |
-
}
|
| 164 |
-
)
|
| 165 |
else:
|
| 166 |
|
| 167 |
-
data_dict.update(
|
| 168 |
-
|
| 169 |
-
|
| 170 |
-
|
| 171 |
-
|
| 172 |
-
|
| 173 |
-
|
| 174 |
-
|
| 175 |
-
|
| 176 |
-
}
|
| 177 |
-
)
|
| 178 |
|
| 179 |
# load training data
|
| 180 |
data_dict.update(self.load_calib(data_dict))
|
|
@@ -183,18 +178,17 @@ class EvalDataset:
|
|
| 183 |
for name, channel in zip(self.in_total, self.in_total_dim):
|
| 184 |
|
| 185 |
if f"{name}_path" not in data_dict.keys():
|
| 186 |
-
data_dict.update(
|
| 187 |
-
{
|
| 188 |
-
|
| 189 |
-
|
| 190 |
-
}
|
| 191 |
-
)
|
| 192 |
|
| 193 |
# tensor update
|
| 194 |
if os.path.exists(data_dict[f"{name}_path"]):
|
| 195 |
-
data_dict.update(
|
| 196 |
-
|
| 197 |
-
|
|
|
|
| 198 |
|
| 199 |
data_dict.update(self.load_mesh(data_dict))
|
| 200 |
data_dict.update(self.load_smpl(data_dict))
|
|
|
|
| 14 |
#
|
| 15 |
# Contact: ps-license@tuebingen.mpg.de
|
| 16 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 17 |
import os
|
| 18 |
+
import os.path as osp
|
| 19 |
+
|
| 20 |
import cv2
|
| 21 |
+
import numpy as np
|
| 22 |
import torch
|
| 23 |
+
import torch.nn.functional as F
|
| 24 |
import torchvision.transforms as transforms
|
| 25 |
+
import trimesh
|
| 26 |
+
from PIL import Image
|
| 27 |
+
|
| 28 |
+
from lib.common.render import Render
|
| 29 |
+
from lib.dataset.mesh_util import SMPLX, HoppeMesh, projection, rescale_smpl
|
| 30 |
|
| 31 |
cape_gender = {
|
| 32 |
"male":
|
| 33 |
+
['00032', '00096', '00122', '00127', '00145', '00215', '02474', '03284', '03375',
|
| 34 |
+
'03394'], "female": ['00134', '00159', '03223', '03331', '03383']
|
| 35 |
}
|
| 36 |
|
| 37 |
|
|
|
|
| 76 |
"scale": self.scales[dataset_id],
|
| 77 |
}
|
| 78 |
|
| 79 |
+
self.datasets_dict[dataset].update({
|
| 80 |
+
"subjects":
|
| 81 |
+
np.loadtxt(osp.join(dataset_dir, "all.txt"), dtype=str)
|
| 82 |
+
})
|
| 83 |
|
| 84 |
self.subject_list = self.get_subject_list()
|
| 85 |
self.smplx = SMPLX()
|
| 86 |
|
| 87 |
# PIL to tensor
|
| 88 |
+
self.image_to_tensor = transforms.Compose([
|
| 89 |
+
transforms.Resize(self.input_size),
|
| 90 |
+
transforms.ToTensor(),
|
| 91 |
+
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
|
| 92 |
+
])
|
|
|
|
|
|
|
| 93 |
|
| 94 |
# PIL to tensor
|
| 95 |
+
self.mask_to_tensor = transforms.Compose([
|
| 96 |
+
transforms.Resize(self.input_size),
|
| 97 |
+
transforms.ToTensor(),
|
| 98 |
+
transforms.Normalize((0.0, ), (1.0, )),
|
| 99 |
+
])
|
|
|
|
|
|
|
| 100 |
|
| 101 |
self.device = device
|
| 102 |
self.render = Render(size=512, device=self.device)
|
|
|
|
| 153 |
}
|
| 154 |
|
| 155 |
if dataset == "cape":
|
| 156 |
+
data_dict.update({
|
| 157 |
+
"mesh_path":
|
| 158 |
+
osp.join(self.datasets_dict[dataset]["mesh_dir"], f"{subject}.obj"),
|
| 159 |
+
"smpl_path":
|
| 160 |
+
osp.join(self.datasets_dict[dataset]["smpl_dir"], f"{subject}.obj"),
|
| 161 |
+
})
|
|
|
|
|
|
|
| 162 |
else:
|
| 163 |
|
| 164 |
+
data_dict.update({
|
| 165 |
+
"mesh_path":
|
| 166 |
+
osp.join(
|
| 167 |
+
self.datasets_dict[dataset]["mesh_dir"],
|
| 168 |
+
f"{subject}.obj",
|
| 169 |
+
),
|
| 170 |
+
"smplx_path":
|
| 171 |
+
osp.join(self.datasets_dict[dataset]["smplx_dir"], f"{subject}.obj"),
|
| 172 |
+
})
|
|
|
|
|
|
|
| 173 |
|
| 174 |
# load training data
|
| 175 |
data_dict.update(self.load_calib(data_dict))
|
|
|
|
| 178 |
for name, channel in zip(self.in_total, self.in_total_dim):
|
| 179 |
|
| 180 |
if f"{name}_path" not in data_dict.keys():
|
| 181 |
+
data_dict.update({
|
| 182 |
+
f"{name}_path":
|
| 183 |
+
osp.join(self.root, render_folder, name, f"{rotation:03d}.png")
|
| 184 |
+
})
|
|
|
|
|
|
|
| 185 |
|
| 186 |
# tensor update
|
| 187 |
if os.path.exists(data_dict[f"{name}_path"]):
|
| 188 |
+
data_dict.update({
|
| 189 |
+
name:
|
| 190 |
+
self.imagepath2tensor(data_dict[f"{name}_path"], channel, inv=False)
|
| 191 |
+
})
|
| 192 |
|
| 193 |
data_dict.update(self.load_mesh(data_dict))
|
| 194 |
data_dict.update(self.load_smpl(data_dict))
|
lib/dataset/Evaluator.py
CHANGED
|
@@ -14,20 +14,21 @@
|
|
| 14 |
#
|
| 15 |
# Contact: ps-license@tuebingen.mpg.de
|
| 16 |
|
| 17 |
-
from
|
| 18 |
-
|
| 19 |
import numpy as np
|
| 20 |
import torch
|
| 21 |
-
from
|
| 22 |
from pytorch3d import _C
|
|
|
|
|
|
|
|
|
|
| 23 |
from torch.autograd import Function
|
| 24 |
from torch.autograd.function import once_differentiable
|
| 25 |
-
from
|
| 26 |
-
from PIL import Image
|
| 27 |
|
| 28 |
-
from
|
| 29 |
-
from
|
| 30 |
-
from pytorch3d.ops.packed_to_padded import packed_to_padded
|
| 31 |
|
| 32 |
_DEFAULT_MIN_TRIANGLE_AREA: float = 5e-3
|
| 33 |
|
|
@@ -278,12 +279,10 @@ class Evaluator:
|
|
| 278 |
|
| 279 |
# error_hf = ((((src_normal_arr - tgt_normal_arr) * sim_mask)**2).sum(dim=0).mean()) * 4.0
|
| 280 |
|
| 281 |
-
normal_img = Image.fromarray(
|
| 282 |
-
(
|
| 283 |
-
|
| 284 |
-
|
| 285 |
-
).astype(np.uint8)
|
| 286 |
-
)
|
| 287 |
normal_img.save(normal_path)
|
| 288 |
|
| 289 |
return error
|
|
|
|
| 14 |
#
|
| 15 |
# Contact: ps-license@tuebingen.mpg.de
|
| 16 |
|
| 17 |
+
from typing import Tuple
|
| 18 |
+
|
| 19 |
import numpy as np
|
| 20 |
import torch
|
| 21 |
+
from PIL import Image
|
| 22 |
from pytorch3d import _C
|
| 23 |
+
from pytorch3d.ops.mesh_face_areas_normals import mesh_face_areas_normals
|
| 24 |
+
from pytorch3d.ops.packed_to_padded import packed_to_padded
|
| 25 |
+
from pytorch3d.structures import Pointclouds
|
| 26 |
from torch.autograd import Function
|
| 27 |
from torch.autograd.function import once_differentiable
|
| 28 |
+
from torchvision.utils import make_grid
|
|
|
|
| 29 |
|
| 30 |
+
from lib.common.render import Render
|
| 31 |
+
from lib.dataset.mesh_util import projection
|
|
|
|
| 32 |
|
| 33 |
_DEFAULT_MIN_TRIANGLE_AREA: float = 5e-3
|
| 34 |
|
|
|
|
| 279 |
|
| 280 |
# error_hf = ((((src_normal_arr - tgt_normal_arr) * sim_mask)**2).sum(dim=0).mean()) * 4.0
|
| 281 |
|
| 282 |
+
normal_img = Image.fromarray((
|
| 283 |
+
torch.cat([src_normal_arr, tgt_normal_arr],
|
| 284 |
+
dim=1).permute(1, 2, 0).detach().cpu().numpy() * 255.0
|
| 285 |
+
).astype(np.uint8))
|
|
|
|
|
|
|
| 286 |
normal_img.save(normal_path)
|
| 287 |
|
| 288 |
return error
|
lib/dataset/NormalDataset.py
CHANGED
|
@@ -14,12 +14,13 @@
|
|
| 14 |
#
|
| 15 |
# Contact: ps-license@tuebingen.mpg.de
|
| 16 |
|
| 17 |
-
import kornia
|
| 18 |
import os.path as osp
|
|
|
|
|
|
|
| 19 |
import numpy as np
|
|
|
|
| 20 |
from PIL import Image
|
| 21 |
from termcolor import colored
|
| 22 |
-
import torchvision.transforms as transforms
|
| 23 |
|
| 24 |
|
| 25 |
class NormalDataset:
|
|
@@ -59,22 +60,18 @@ class NormalDataset:
|
|
| 59 |
self.subject_list = self.get_subject_list(split)
|
| 60 |
|
| 61 |
# PIL to tensor
|
| 62 |
-
self.image_to_tensor = transforms.Compose(
|
| 63 |
-
|
| 64 |
-
|
| 65 |
-
|
| 66 |
-
|
| 67 |
-
]
|
| 68 |
-
)
|
| 69 |
|
| 70 |
# PIL to tensor
|
| 71 |
-
self.mask_to_tensor = transforms.Compose(
|
| 72 |
-
|
| 73 |
-
|
| 74 |
-
|
| 75 |
-
|
| 76 |
-
]
|
| 77 |
-
)
|
| 78 |
|
| 79 |
def get_subject_list(self, split):
|
| 80 |
|
|
@@ -128,21 +125,15 @@ class NormalDataset:
|
|
| 128 |
for name, channel in zip(self.in_total, self.in_total_dim):
|
| 129 |
|
| 130 |
if f"{name}_path" not in data_dict.keys():
|
| 131 |
-
data_dict.update(
|
| 132 |
-
{
|
| 133 |
-
|
| 134 |
-
|
| 135 |
-
|
| 136 |
-
|
| 137 |
-
|
| 138 |
-
|
| 139 |
-
|
| 140 |
-
name:
|
| 141 |
-
self.imagepath2tensor(
|
| 142 |
-
data_dict[f"{name}_path"], channel, inv=False, erasing=False
|
| 143 |
-
)
|
| 144 |
-
}
|
| 145 |
-
)
|
| 146 |
|
| 147 |
path_keys = [key for key in data_dict.keys() if "_path" in key or "_dir" in key]
|
| 148 |
|
|
|
|
| 14 |
#
|
| 15 |
# Contact: ps-license@tuebingen.mpg.de
|
| 16 |
|
|
|
|
| 17 |
import os.path as osp
|
| 18 |
+
|
| 19 |
+
import kornia
|
| 20 |
import numpy as np
|
| 21 |
+
import torchvision.transforms as transforms
|
| 22 |
from PIL import Image
|
| 23 |
from termcolor import colored
|
|
|
|
| 24 |
|
| 25 |
|
| 26 |
class NormalDataset:
|
|
|
|
| 60 |
self.subject_list = self.get_subject_list(split)
|
| 61 |
|
| 62 |
# PIL to tensor
|
| 63 |
+
self.image_to_tensor = transforms.Compose([
|
| 64 |
+
transforms.Resize(self.input_size),
|
| 65 |
+
transforms.ToTensor(),
|
| 66 |
+
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
|
| 67 |
+
])
|
|
|
|
|
|
|
| 68 |
|
| 69 |
# PIL to tensor
|
| 70 |
+
self.mask_to_tensor = transforms.Compose([
|
| 71 |
+
transforms.Resize(self.input_size),
|
| 72 |
+
transforms.ToTensor(),
|
| 73 |
+
transforms.Normalize((0.0, ), (1.0, )),
|
| 74 |
+
])
|
|
|
|
|
|
|
| 75 |
|
| 76 |
def get_subject_list(self, split):
|
| 77 |
|
|
|
|
| 125 |
for name, channel in zip(self.in_total, self.in_total_dim):
|
| 126 |
|
| 127 |
if f"{name}_path" not in data_dict.keys():
|
| 128 |
+
data_dict.update({
|
| 129 |
+
f"{name}_path":
|
| 130 |
+
osp.join(self.root, render_folder, name, f"{rotation:03d}.png")
|
| 131 |
+
})
|
| 132 |
+
|
| 133 |
+
data_dict.update({
|
| 134 |
+
name:
|
| 135 |
+
self.imagepath2tensor(data_dict[f"{name}_path"], channel, inv=False, erasing=False)
|
| 136 |
+
})
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 137 |
|
| 138 |
path_keys = [key for key in data_dict.keys() if "_path" in key or "_dir" in key]
|
| 139 |
|
lib/dataset/NormalModule.py
CHANGED
|
@@ -14,11 +14,11 @@
|
|
| 14 |
#
|
| 15 |
# Contact: ps-license@tuebingen.mpg.de
|
| 16 |
|
| 17 |
-
from torch.utils.data import DataLoader
|
| 18 |
-
from lib.dataset.NormalDataset import NormalDataset
|
| 19 |
-
|
| 20 |
# pytorch lightning related libs
|
| 21 |
import pytorch_lightning as pl
|
|
|
|
|
|
|
|
|
|
| 22 |
|
| 23 |
|
| 24 |
class NormalModule(pl.LightningDataModule):
|
|
|
|
| 14 |
#
|
| 15 |
# Contact: ps-license@tuebingen.mpg.de
|
| 16 |
|
|
|
|
|
|
|
|
|
|
| 17 |
# pytorch lightning related libs
|
| 18 |
import pytorch_lightning as pl
|
| 19 |
+
from torch.utils.data import DataLoader
|
| 20 |
+
|
| 21 |
+
from lib.dataset.NormalDataset import NormalDataset
|
| 22 |
|
| 23 |
|
| 24 |
class NormalModule(pl.LightningDataModule):
|
lib/dataset/PointFeat.py
CHANGED
|
@@ -1,5 +1,6 @@
|
|
| 1 |
-
from pytorch3d.structures import Meshes, Pointclouds
|
| 2 |
import torch
|
|
|
|
|
|
|
| 3 |
from lib.common.render_utils import face_vertices
|
| 4 |
from lib.dataset.Evaluator import point_mesh_distance
|
| 5 |
from lib.dataset.mesh_util import SMPLX, barycentric_coordinates_of_projection
|
|
|
|
|
|
|
| 1 |
import torch
|
| 2 |
+
from pytorch3d.structures import Meshes, Pointclouds
|
| 3 |
+
|
| 4 |
from lib.common.render_utils import face_vertices
|
| 5 |
from lib.dataset.Evaluator import point_mesh_distance
|
| 6 |
from lib.dataset.mesh_util import SMPLX, barycentric_coordinates_of_projection
|
lib/dataset/TestDataset.py
CHANGED
|
@@ -14,37 +14,34 @@
|
|
| 14 |
#
|
| 15 |
# Contact: ps-license@tuebingen.mpg.de
|
| 16 |
|
| 17 |
-
import warnings
|
| 18 |
import logging
|
|
|
|
| 19 |
|
| 20 |
warnings.filterwarnings("ignore")
|
| 21 |
logging.getLogger("lightning").setLevel(logging.ERROR)
|
| 22 |
logging.getLogger("trimesh").setLevel(logging.ERROR)
|
| 23 |
|
| 24 |
-
|
| 25 |
-
|
| 26 |
-
from lib.pixielib.models.SMPLX import SMPLX as PIXIE_SMPLX
|
| 27 |
-
from lib.common.imutils import process_image
|
| 28 |
-
from lib.common.train_util import Format
|
| 29 |
-
from lib.net.geometry import rotation_matrix_to_angle_axis, rot6d_to_rotmat
|
| 30 |
-
|
| 31 |
-
from lib.pymafx.core import path_config
|
| 32 |
-
from lib.pymafx.models import pymaf_net
|
| 33 |
|
| 34 |
-
|
| 35 |
-
|
| 36 |
-
from lib.dataset.body_model import TetraSMPLModel
|
| 37 |
-
from lib.dataset.mesh_util import get_visibility, SMPLX
|
| 38 |
import torch.nn.functional as F
|
|
|
|
|
|
|
| 39 |
from torchvision import transforms
|
| 40 |
from torchvision.models import detection
|
| 41 |
|
| 42 |
-
|
| 43 |
-
import
|
| 44 |
-
import
|
| 45 |
-
|
| 46 |
-
from
|
| 47 |
-
from
|
|
|
|
|
|
|
|
|
|
|
|
|
| 48 |
|
| 49 |
ImageFile.LOAD_TRUNCATED_IMAGES = True
|
| 50 |
|
|
@@ -66,9 +63,8 @@ class TestDataset:
|
|
| 66 |
keep_lst = sorted(glob.glob(f"{self.image_dir}/*"))
|
| 67 |
img_fmts = ["jpg", "png", "jpeg", "JPG", "bmp", "exr"]
|
| 68 |
|
| 69 |
-
self.subject_list = sorted(
|
| 70 |
-
|
| 71 |
-
)
|
| 72 |
|
| 73 |
# smpl related
|
| 74 |
self.smpl_data = SMPLX()
|
|
|
|
| 14 |
#
|
| 15 |
# Contact: ps-license@tuebingen.mpg.de
|
| 16 |
|
|
|
|
| 17 |
import logging
|
| 18 |
+
import warnings
|
| 19 |
|
| 20 |
warnings.filterwarnings("ignore")
|
| 21 |
logging.getLogger("lightning").setLevel(logging.ERROR)
|
| 22 |
logging.getLogger("trimesh").setLevel(logging.ERROR)
|
| 23 |
|
| 24 |
+
import glob
|
| 25 |
+
import os.path as osp
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 26 |
|
| 27 |
+
import numpy as np
|
| 28 |
+
import torch
|
|
|
|
|
|
|
| 29 |
import torch.nn.functional as F
|
| 30 |
+
from PIL import ImageFile
|
| 31 |
+
from termcolor import colored
|
| 32 |
from torchvision import transforms
|
| 33 |
from torchvision.models import detection
|
| 34 |
|
| 35 |
+
from lib.common.config import cfg
|
| 36 |
+
from lib.common.imutils import process_image
|
| 37 |
+
from lib.common.render import Render
|
| 38 |
+
from lib.common.train_util import Format
|
| 39 |
+
from lib.dataset.mesh_util import SMPLX, get_visibility
|
| 40 |
+
from lib.pixielib.models.SMPLX import SMPLX as PIXIE_SMPLX
|
| 41 |
+
from lib.pixielib.pixie import PIXIE
|
| 42 |
+
from lib.pixielib.utils.config import cfg as pixie_cfg
|
| 43 |
+
from lib.pymafx.core import path_config
|
| 44 |
+
from lib.pymafx.models import pymaf_net
|
| 45 |
|
| 46 |
ImageFile.LOAD_TRUNCATED_IMAGES = True
|
| 47 |
|
|
|
|
| 63 |
keep_lst = sorted(glob.glob(f"{self.image_dir}/*"))
|
| 64 |
img_fmts = ["jpg", "png", "jpeg", "JPG", "bmp", "exr"]
|
| 65 |
|
| 66 |
+
self.subject_list = sorted([item for item in keep_lst if item.split(".")[-1] in img_fmts],
|
| 67 |
+
reverse=False)
|
|
|
|
| 68 |
|
| 69 |
# smpl related
|
| 70 |
self.smpl_data = SMPLX()
|
lib/dataset/body_model.py
CHANGED
|
@@ -14,10 +14,11 @@
|
|
| 14 |
#
|
| 15 |
# Contact: ps-license@tuebingen.mpg.de
|
| 16 |
|
| 17 |
-
import
|
| 18 |
import pickle
|
|
|
|
|
|
|
| 19 |
import torch
|
| 20 |
-
import os
|
| 21 |
|
| 22 |
|
| 23 |
class SMPLModel:
|
|
@@ -126,12 +127,10 @@ class SMPLModel:
|
|
| 126 |
for i in range(1, self.kintree_table.shape[1]):
|
| 127 |
G[i] = G[self.parent[i]].dot(
|
| 128 |
self.with_zeros(
|
| 129 |
-
np.hstack(
|
| 130 |
-
[
|
| 131 |
-
|
| 132 |
-
|
| 133 |
-
]
|
| 134 |
-
)
|
| 135 |
)
|
| 136 |
)
|
| 137 |
# remove the transformation due to the rest pose
|
|
@@ -163,19 +162,17 @@ class SMPLModel:
|
|
| 163 |
r_hat = r / theta
|
| 164 |
cos = np.cos(theta)
|
| 165 |
z_stick = np.zeros(theta.shape[0])
|
| 166 |
-
m = np.dstack(
|
| 167 |
-
|
| 168 |
-
|
| 169 |
-
|
| 170 |
-
|
| 171 |
-
|
| 172 |
-
|
| 173 |
-
|
| 174 |
-
|
| 175 |
-
|
| 176 |
-
|
| 177 |
-
]
|
| 178 |
-
).reshape([-1, 3, 3])
|
| 179 |
i_cube = np.broadcast_to(np.expand_dims(np.eye(3), axis=0), [theta.shape[0], 3, 3])
|
| 180 |
A = np.transpose(r_hat, axes=[0, 2, 1])
|
| 181 |
B = r_hat
|
|
@@ -357,12 +354,10 @@ class TetraSMPLModel:
|
|
| 357 |
for i in range(1, self.kintree_table.shape[1]):
|
| 358 |
G[i] = G[self.parent[i]].dot(
|
| 359 |
self.with_zeros(
|
| 360 |
-
np.hstack(
|
| 361 |
-
[
|
| 362 |
-
|
| 363 |
-
|
| 364 |
-
]
|
| 365 |
-
)
|
| 366 |
)
|
| 367 |
)
|
| 368 |
# remove the transformation due to the rest pose
|
|
@@ -398,19 +393,17 @@ class TetraSMPLModel:
|
|
| 398 |
r_hat = r / theta
|
| 399 |
cos = np.cos(theta)
|
| 400 |
z_stick = np.zeros(theta.shape[0])
|
| 401 |
-
m = np.dstack(
|
| 402 |
-
|
| 403 |
-
|
| 404 |
-
|
| 405 |
-
|
| 406 |
-
|
| 407 |
-
|
| 408 |
-
|
| 409 |
-
|
| 410 |
-
|
| 411 |
-
|
| 412 |
-
]
|
| 413 |
-
).reshape([-1, 3, 3])
|
| 414 |
i_cube = np.broadcast_to(np.expand_dims(np.eye(3), axis=0), [theta.shape[0], 3, 3])
|
| 415 |
A = np.transpose(r_hat, axes=[0, 2, 1])
|
| 416 |
B = r_hat
|
|
|
|
| 14 |
#
|
| 15 |
# Contact: ps-license@tuebingen.mpg.de
|
| 16 |
|
| 17 |
+
import os
|
| 18 |
import pickle
|
| 19 |
+
|
| 20 |
+
import numpy as np
|
| 21 |
import torch
|
|
|
|
| 22 |
|
| 23 |
|
| 24 |
class SMPLModel:
|
|
|
|
| 127 |
for i in range(1, self.kintree_table.shape[1]):
|
| 128 |
G[i] = G[self.parent[i]].dot(
|
| 129 |
self.with_zeros(
|
| 130 |
+
np.hstack([
|
| 131 |
+
self.R[i],
|
| 132 |
+
((self.J[i, :] - self.J[self.parent[i], :]).reshape([3, 1])),
|
| 133 |
+
])
|
|
|
|
|
|
|
| 134 |
)
|
| 135 |
)
|
| 136 |
# remove the transformation due to the rest pose
|
|
|
|
| 162 |
r_hat = r / theta
|
| 163 |
cos = np.cos(theta)
|
| 164 |
z_stick = np.zeros(theta.shape[0])
|
| 165 |
+
m = np.dstack([
|
| 166 |
+
z_stick,
|
| 167 |
+
-r_hat[:, 0, 2],
|
| 168 |
+
r_hat[:, 0, 1],
|
| 169 |
+
r_hat[:, 0, 2],
|
| 170 |
+
z_stick,
|
| 171 |
+
-r_hat[:, 0, 0],
|
| 172 |
+
-r_hat[:, 0, 1],
|
| 173 |
+
r_hat[:, 0, 0],
|
| 174 |
+
z_stick,
|
| 175 |
+
]).reshape([-1, 3, 3])
|
|
|
|
|
|
|
| 176 |
i_cube = np.broadcast_to(np.expand_dims(np.eye(3), axis=0), [theta.shape[0], 3, 3])
|
| 177 |
A = np.transpose(r_hat, axes=[0, 2, 1])
|
| 178 |
B = r_hat
|
|
|
|
| 354 |
for i in range(1, self.kintree_table.shape[1]):
|
| 355 |
G[i] = G[self.parent[i]].dot(
|
| 356 |
self.with_zeros(
|
| 357 |
+
np.hstack([
|
| 358 |
+
self.R[i],
|
| 359 |
+
((self.J[i, :] - self.J[self.parent[i], :]).reshape([3, 1])),
|
| 360 |
+
])
|
|
|
|
|
|
|
| 361 |
)
|
| 362 |
)
|
| 363 |
# remove the transformation due to the rest pose
|
|
|
|
| 393 |
r_hat = r / theta
|
| 394 |
cos = np.cos(theta)
|
| 395 |
z_stick = np.zeros(theta.shape[0])
|
| 396 |
+
m = np.dstack([
|
| 397 |
+
z_stick,
|
| 398 |
+
-r_hat[:, 0, 2],
|
| 399 |
+
r_hat[:, 0, 1],
|
| 400 |
+
r_hat[:, 0, 2],
|
| 401 |
+
z_stick,
|
| 402 |
+
-r_hat[:, 0, 0],
|
| 403 |
+
-r_hat[:, 0, 1],
|
| 404 |
+
r_hat[:, 0, 0],
|
| 405 |
+
z_stick,
|
| 406 |
+
]).reshape([-1, 3, 3])
|
|
|
|
|
|
|
| 407 |
i_cube = np.broadcast_to(np.expand_dims(np.eye(3), axis=0), [theta.shape[0], 3, 3])
|
| 408 |
A = np.transpose(r_hat, axes=[0, 2, 1])
|
| 409 |
B = r_hat
|
lib/dataset/mesh_util.py
CHANGED
|
@@ -14,25 +14,25 @@
|
|
| 14 |
#
|
| 15 |
# Contact: ps-license@tuebingen.mpg.de
|
| 16 |
|
|
|
|
| 17 |
import os
|
|
|
|
|
|
|
|
|
|
| 18 |
import numpy as np
|
|
|
|
| 19 |
import torch
|
|
|
|
| 20 |
import torchvision
|
| 21 |
import trimesh
|
| 22 |
-
import
|
| 23 |
-
import
|
| 24 |
-
|
| 25 |
-
|
| 26 |
-
from termcolor import colored
|
| 27 |
from scipy.spatial import cKDTree
|
| 28 |
|
| 29 |
-
from pytorch3d.structures import Meshes
|
| 30 |
-
import torch.nn.functional as F
|
| 31 |
import lib.smplx as smplx
|
| 32 |
-
from lib.common.render_utils import Pytorch3dRasterizer
|
| 33 |
-
from pytorch3d.renderer.mesh import rasterize_meshes
|
| 34 |
-
from PIL import Image, ImageFont, ImageDraw
|
| 35 |
-
from pytorch3d.loss import mesh_laplacian_smoothing, mesh_normal_consistency
|
| 36 |
|
| 37 |
|
| 38 |
class Format:
|
|
@@ -74,19 +74,17 @@ class SMPLX:
|
|
| 74 |
self.smplx_vertex_lmkid = np.load(self.smplx_vertex_lmkid_path)
|
| 75 |
|
| 76 |
self.smpl_vert_seg = json.load(open(self.smpl_vert_seg_path))
|
| 77 |
-
self.smpl_mano_vid = np.concatenate(
|
| 78 |
-
[
|
| 79 |
-
|
| 80 |
-
|
| 81 |
-
]
|
| 82 |
-
)
|
| 83 |
|
| 84 |
self.smplx_eyeball_fid_mask = np.load(self.smplx_eyeball_fid_path)
|
| 85 |
self.smplx_mouth_fid = np.load(self.smplx_fill_mouth_fid_path)
|
| 86 |
self.smplx_mano_vid_dict = np.load(self.smplx_mano_vid_path, allow_pickle=True)
|
| 87 |
-
self.smplx_mano_vid = np.concatenate(
|
| 88 |
-
|
| 89 |
-
)
|
| 90 |
self.smplx_flame_vid = np.load(self.smplx_flame_vid_path, allow_pickle=True)
|
| 91 |
self.smplx_front_flame_vid = self.smplx_flame_vid[np.load(self.front_flame_path)]
|
| 92 |
|
|
@@ -110,26 +108,22 @@ class SMPLX:
|
|
| 110 |
|
| 111 |
self.model_dir = osp.join(self.current_dir, "models")
|
| 112 |
|
| 113 |
-
self.ghum_smpl_pairs = torch.tensor(
|
| 114 |
-
|
| 115 |
-
|
| 116 |
-
|
| 117 |
-
|
| 118 |
-
|
| 119 |
-
]
|
| 120 |
-
).long()
|
| 121 |
|
| 122 |
# smpl-smplx correspondence
|
| 123 |
self.smpl_joint_ids_24 = np.arange(22).tolist() + [68, 73]
|
| 124 |
self.smpl_joint_ids_24_pixie = np.arange(22).tolist() + [61 + 68, 72 + 68]
|
| 125 |
self.smpl_joint_ids_45 = np.arange(22).tolist() + [68, 73] + np.arange(55, 76).tolist()
|
| 126 |
|
| 127 |
-
self.extra_joint_ids = np.array(
|
| 128 |
-
|
| 129 |
-
|
| 130 |
-
|
| 131 |
-
]
|
| 132 |
-
)
|
| 133 |
|
| 134 |
self.extra_joint_ids += 68
|
| 135 |
|
|
@@ -369,9 +363,9 @@ def mesh_edge_loss(meshes, target_length: float = 0.0):
|
|
| 369 |
return loss_all
|
| 370 |
|
| 371 |
|
| 372 |
-
def remesh_laplacian(mesh, obj_path):
|
| 373 |
|
| 374 |
-
mesh = mesh.simplify_quadratic_decimation(
|
| 375 |
mesh = trimesh.smoothing.filter_humphrey(
|
| 376 |
mesh, alpha=0.1, beta=0.5, iterations=10, laplacian_operator=None
|
| 377 |
)
|
|
@@ -380,7 +374,7 @@ def remesh_laplacian(mesh, obj_path):
|
|
| 380 |
return mesh
|
| 381 |
|
| 382 |
|
| 383 |
-
def poisson(mesh, obj_path, depth=10,
|
| 384 |
|
| 385 |
pcd_path = obj_path[:-4] + "_soups.ply"
|
| 386 |
assert (mesh.vertex_normals.shape[1] == 3)
|
|
@@ -395,12 +389,9 @@ def poisson(mesh, obj_path, depth=10, decimation=True):
|
|
| 395 |
largest_mesh = keep_largest(trimesh.Trimesh(np.array(mesh.vertices), np.array(mesh.triangles)))
|
| 396 |
largest_mesh.export(obj_path)
|
| 397 |
|
| 398 |
-
|
| 399 |
-
|
| 400 |
-
|
| 401 |
-
return low_res_mesh
|
| 402 |
-
else:
|
| 403 |
-
return largest_mesh
|
| 404 |
|
| 405 |
|
| 406 |
# Losses to smooth / regularize the mesh shape
|
|
@@ -437,10 +428,9 @@ def read_smpl_constants(folder):
|
|
| 437 |
smpl_tetras = (np.loadtxt(os.path.join(folder, "tetrahedrons.txt"), dtype=np.int32) - 1)
|
| 438 |
|
| 439 |
return_dict = {
|
| 440 |
-
"smpl_vertex_code": torch.tensor(smpl_vertex_code),
|
| 441 |
-
|
| 442 |
-
|
| 443 |
-
"smpl_tetras": torch.tensor(smpl_tetras)
|
| 444 |
}
|
| 445 |
|
| 446 |
return return_dict
|
|
@@ -598,22 +588,6 @@ def compute_normal(vertices, faces):
|
|
| 598 |
return vert_norms, face_norms
|
| 599 |
|
| 600 |
|
| 601 |
-
def face_vertices(vertices, faces):
|
| 602 |
-
"""
|
| 603 |
-
:param vertices: [batch size, number of vertices, 3]
|
| 604 |
-
:param faces: [batch size, number of faces, 3]
|
| 605 |
-
:return: [batch size, number of faces, 3, 3]
|
| 606 |
-
"""
|
| 607 |
-
|
| 608 |
-
bs, nv = vertices.shape[:2]
|
| 609 |
-
bs, nf = faces.shape[:2]
|
| 610 |
-
device = vertices.device
|
| 611 |
-
faces = faces + (torch.arange(bs, dtype=torch.int32).to(device) * nv)[:, None, None]
|
| 612 |
-
vertices = vertices.reshape((bs * nv, vertices.shape[-1]))
|
| 613 |
-
|
| 614 |
-
return vertices[faces.long()]
|
| 615 |
-
|
| 616 |
-
|
| 617 |
def compute_normal_batch(vertices, faces):
|
| 618 |
|
| 619 |
if faces.shape[0] != vertices.shape[0]:
|
|
@@ -657,20 +631,18 @@ def get_optim_grid_image(per_loop_lst, loss=None, nrow=4, type="smpl"):
|
|
| 657 |
draw.text((10, 5), f"error: {loss:.3f}", (255, 0, 0), font=font)
|
| 658 |
|
| 659 |
if type == "smpl":
|
| 660 |
-
for col_id, col_txt in enumerate(
|
| 661 |
-
|
| 662 |
-
|
| 663 |
-
|
| 664 |
-
|
| 665 |
-
|
| 666 |
-
|
| 667 |
-
]
|
| 668 |
-
):
|
| 669 |
draw.text((10 + (col_id * grid_size), 5), col_txt, (255, 0, 0), font=font)
|
| 670 |
elif type == "cloth":
|
| 671 |
-
for col_id, col_txt in enumerate(
|
| 672 |
-
|
| 673 |
-
):
|
| 674 |
draw.text((10 + (col_id * grid_size), 5), col_txt, (255, 0, 0), font=font)
|
| 675 |
for col_id, col_txt in enumerate(["0", "90", "180", "270"]):
|
| 676 |
draw.text(
|
|
@@ -751,3 +723,61 @@ def get_joint_mesh(joints, radius=2.0):
|
|
| 751 |
else:
|
| 752 |
combined = sum([combined, ball_new])
|
| 753 |
return combined
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 14 |
#
|
| 15 |
# Contact: ps-license@tuebingen.mpg.de
|
| 16 |
|
| 17 |
+
import json
|
| 18 |
import os
|
| 19 |
+
import os.path as osp
|
| 20 |
+
|
| 21 |
+
import _pickle as cPickle
|
| 22 |
import numpy as np
|
| 23 |
+
import open3d as o3d
|
| 24 |
import torch
|
| 25 |
+
import torch.nn.functional as F
|
| 26 |
import torchvision
|
| 27 |
import trimesh
|
| 28 |
+
from PIL import Image, ImageDraw, ImageFont
|
| 29 |
+
from pytorch3d.loss import mesh_laplacian_smoothing, mesh_normal_consistency
|
| 30 |
+
from pytorch3d.renderer.mesh import rasterize_meshes
|
| 31 |
+
from pytorch3d.structures import Meshes
|
|
|
|
| 32 |
from scipy.spatial import cKDTree
|
| 33 |
|
|
|
|
|
|
|
| 34 |
import lib.smplx as smplx
|
| 35 |
+
from lib.common.render_utils import Pytorch3dRasterizer, face_vertices
|
|
|
|
|
|
|
|
|
|
| 36 |
|
| 37 |
|
| 38 |
class Format:
|
|
|
|
| 74 |
self.smplx_vertex_lmkid = np.load(self.smplx_vertex_lmkid_path)
|
| 75 |
|
| 76 |
self.smpl_vert_seg = json.load(open(self.smpl_vert_seg_path))
|
| 77 |
+
self.smpl_mano_vid = np.concatenate([
|
| 78 |
+
self.smpl_vert_seg["rightHand"], self.smpl_vert_seg["rightHandIndex1"],
|
| 79 |
+
self.smpl_vert_seg["leftHand"], self.smpl_vert_seg["leftHandIndex1"]
|
| 80 |
+
])
|
|
|
|
|
|
|
| 81 |
|
| 82 |
self.smplx_eyeball_fid_mask = np.load(self.smplx_eyeball_fid_path)
|
| 83 |
self.smplx_mouth_fid = np.load(self.smplx_fill_mouth_fid_path)
|
| 84 |
self.smplx_mano_vid_dict = np.load(self.smplx_mano_vid_path, allow_pickle=True)
|
| 85 |
+
self.smplx_mano_vid = np.concatenate([
|
| 86 |
+
self.smplx_mano_vid_dict["left_hand"], self.smplx_mano_vid_dict["right_hand"]
|
| 87 |
+
])
|
| 88 |
self.smplx_flame_vid = np.load(self.smplx_flame_vid_path, allow_pickle=True)
|
| 89 |
self.smplx_front_flame_vid = self.smplx_flame_vid[np.load(self.front_flame_path)]
|
| 90 |
|
|
|
|
| 108 |
|
| 109 |
self.model_dir = osp.join(self.current_dir, "models")
|
| 110 |
|
| 111 |
+
self.ghum_smpl_pairs = torch.tensor([(0, 24), (2, 26), (5, 25), (7, 28), (8, 27), (11, 16),
|
| 112 |
+
(12, 17), (13, 18), (14, 19), (15, 20), (16, 21),
|
| 113 |
+
(17, 39), (18, 44), (19, 36), (20, 41), (21, 35),
|
| 114 |
+
(22, 40), (23, 1), (24, 2), (25, 4), (26, 5), (27, 7),
|
| 115 |
+
(28, 8), (29, 31), (30, 34), (31, 29),
|
| 116 |
+
(32, 32)]).long()
|
|
|
|
|
|
|
| 117 |
|
| 118 |
# smpl-smplx correspondence
|
| 119 |
self.smpl_joint_ids_24 = np.arange(22).tolist() + [68, 73]
|
| 120 |
self.smpl_joint_ids_24_pixie = np.arange(22).tolist() + [61 + 68, 72 + 68]
|
| 121 |
self.smpl_joint_ids_45 = np.arange(22).tolist() + [68, 73] + np.arange(55, 76).tolist()
|
| 122 |
|
| 123 |
+
self.extra_joint_ids = np.array([
|
| 124 |
+
61, 72, 66, 69, 58, 68, 57, 56, 64, 59, 67, 75, 70, 65, 60, 61, 63, 62, 76, 71, 72, 74,
|
| 125 |
+
73
|
| 126 |
+
])
|
|
|
|
|
|
|
| 127 |
|
| 128 |
self.extra_joint_ids += 68
|
| 129 |
|
|
|
|
| 363 |
return loss_all
|
| 364 |
|
| 365 |
|
| 366 |
+
def remesh_laplacian(mesh, obj_path, face_count=50000):
|
| 367 |
|
| 368 |
+
mesh = mesh.simplify_quadratic_decimation(face_count)
|
| 369 |
mesh = trimesh.smoothing.filter_humphrey(
|
| 370 |
mesh, alpha=0.1, beta=0.5, iterations=10, laplacian_operator=None
|
| 371 |
)
|
|
|
|
| 374 |
return mesh
|
| 375 |
|
| 376 |
|
| 377 |
+
def poisson(mesh, obj_path, depth=10, face_count=50000):
|
| 378 |
|
| 379 |
pcd_path = obj_path[:-4] + "_soups.ply"
|
| 380 |
assert (mesh.vertex_normals.shape[1] == 3)
|
|
|
|
| 389 |
largest_mesh = keep_largest(trimesh.Trimesh(np.array(mesh.vertices), np.array(mesh.triangles)))
|
| 390 |
largest_mesh.export(obj_path)
|
| 391 |
|
| 392 |
+
# mesh decimation for faster rendering
|
| 393 |
+
low_res_mesh = largest_mesh.simplify_quadratic_decimation(face_count)
|
| 394 |
+
return low_res_mesh
|
|
|
|
|
|
|
|
|
|
| 395 |
|
| 396 |
|
| 397 |
# Losses to smooth / regularize the mesh shape
|
|
|
|
| 428 |
smpl_tetras = (np.loadtxt(os.path.join(folder, "tetrahedrons.txt"), dtype=np.int32) - 1)
|
| 429 |
|
| 430 |
return_dict = {
|
| 431 |
+
"smpl_vertex_code": torch.tensor(smpl_vertex_code), "smpl_face_code":
|
| 432 |
+
torch.tensor(smpl_face_code), "smpl_faces": torch.tensor(smpl_faces), "smpl_tetras":
|
| 433 |
+
torch.tensor(smpl_tetras)
|
|
|
|
| 434 |
}
|
| 435 |
|
| 436 |
return return_dict
|
|
|
|
| 588 |
return vert_norms, face_norms
|
| 589 |
|
| 590 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 591 |
def compute_normal_batch(vertices, faces):
|
| 592 |
|
| 593 |
if faces.shape[0] != vertices.shape[0]:
|
|
|
|
| 631 |
draw.text((10, 5), f"error: {loss:.3f}", (255, 0, 0), font=font)
|
| 632 |
|
| 633 |
if type == "smpl":
|
| 634 |
+
for col_id, col_txt in enumerate([
|
| 635 |
+
"image",
|
| 636 |
+
"smpl-norm(render)",
|
| 637 |
+
"cloth-norm(pred)",
|
| 638 |
+
"diff-norm",
|
| 639 |
+
"diff-mask",
|
| 640 |
+
]):
|
|
|
|
|
|
|
| 641 |
draw.text((10 + (col_id * grid_size), 5), col_txt, (255, 0, 0), font=font)
|
| 642 |
elif type == "cloth":
|
| 643 |
+
for col_id, col_txt in enumerate([
|
| 644 |
+
"image", "cloth-norm(recon)", "cloth-norm(pred)", "diff-norm"
|
| 645 |
+
]):
|
| 646 |
draw.text((10 + (col_id * grid_size), 5), col_txt, (255, 0, 0), font=font)
|
| 647 |
for col_id, col_txt in enumerate(["0", "90", "180", "270"]):
|
| 648 |
draw.text(
|
|
|
|
| 723 |
else:
|
| 724 |
combined = sum([combined, ball_new])
|
| 725 |
return combined
|
| 726 |
+
|
| 727 |
+
|
| 728 |
+
def preprocess_point_cloud(pcd, voxel_size):
|
| 729 |
+
pcd_down = pcd
|
| 730 |
+
pcd_fpfh = o3d.pipelines.registration.compute_fpfh_feature(
|
| 731 |
+
pcd_down, o3d.geometry.KDTreeSearchParamHybrid(radius=voxel_size * 5.0, max_nn=100)
|
| 732 |
+
)
|
| 733 |
+
return (pcd_down, pcd_fpfh)
|
| 734 |
+
|
| 735 |
+
|
| 736 |
+
def o3d_ransac(src, dst):
|
| 737 |
+
|
| 738 |
+
voxel_size = 0.01
|
| 739 |
+
distance_threshold = 1.5 * voxel_size
|
| 740 |
+
|
| 741 |
+
o3d.utility.set_verbosity_level(o3d.utility.VerbosityLevel.Error)
|
| 742 |
+
|
| 743 |
+
# print('Downsampling inputs')
|
| 744 |
+
src_down, src_fpfh = preprocess_point_cloud(src, voxel_size)
|
| 745 |
+
dst_down, dst_fpfh = preprocess_point_cloud(dst, voxel_size)
|
| 746 |
+
|
| 747 |
+
# print('Running RANSAC')
|
| 748 |
+
result = o3d.pipelines.registration.registration_ransac_based_on_feature_matching(
|
| 749 |
+
src_down,
|
| 750 |
+
dst_down,
|
| 751 |
+
src_fpfh,
|
| 752 |
+
dst_fpfh,
|
| 753 |
+
mutual_filter=False,
|
| 754 |
+
max_correspondence_distance=distance_threshold,
|
| 755 |
+
estimation_method=o3d.pipelines.registration.TransformationEstimationPointToPoint(False),
|
| 756 |
+
ransac_n=3,
|
| 757 |
+
checkers=[
|
| 758 |
+
o3d.pipelines.registration.CorrespondenceCheckerBasedOnEdgeLength(0.9),
|
| 759 |
+
o3d.pipelines.registration.CorrespondenceCheckerBasedOnDistance(distance_threshold)
|
| 760 |
+
],
|
| 761 |
+
criteria=o3d.pipelines.registration.RANSACConvergenceCriteria(1000000, 0.999)
|
| 762 |
+
)
|
| 763 |
+
|
| 764 |
+
return result.transformation
|
| 765 |
+
|
| 766 |
+
|
| 767 |
+
def export_obj(v_np, f_np, vt, ft, path):
|
| 768 |
+
|
| 769 |
+
# write mtl info into obj
|
| 770 |
+
new_line = f"mtllib material.mtl \n"
|
| 771 |
+
vt_lines = "\nusemtl mat0 \n"
|
| 772 |
+
v_lines = ""
|
| 773 |
+
f_lines = ""
|
| 774 |
+
|
| 775 |
+
for _v in v_np:
|
| 776 |
+
v_lines += f"v {_v[0]} {_v[1]} {_v[2]}\n"
|
| 777 |
+
for fid, _f in enumerate(f_np):
|
| 778 |
+
f_lines += f"f {_f[0]+1}/{ft[fid][0]+1} {_f[1]+1}/{ft[fid][1]+1} {_f[2]+1}/{ft[fid][2]+1}\n"
|
| 779 |
+
for _vt in vt:
|
| 780 |
+
vt_lines += f"vt {_vt[0]} {_vt[1]}\n"
|
| 781 |
+
new_file_data = new_line + v_lines + vt_lines + f_lines
|
| 782 |
+
with open(path, 'w') as file:
|
| 783 |
+
file.write(new_file_data)
|
lib/net/BasePIFuNet.py
CHANGED
|
@@ -14,8 +14,8 @@
|
|
| 14 |
#
|
| 15 |
# Contact: ps-license@tuebingen.mpg.de
|
| 16 |
|
| 17 |
-
import torch.nn as nn
|
| 18 |
import pytorch_lightning as pl
|
|
|
|
| 19 |
|
| 20 |
from .geometry import index, orthogonal, perspective
|
| 21 |
|
|
|
|
| 14 |
#
|
| 15 |
# Contact: ps-license@tuebingen.mpg.de
|
| 16 |
|
|
|
|
| 17 |
import pytorch_lightning as pl
|
| 18 |
+
import torch.nn as nn
|
| 19 |
|
| 20 |
from .geometry import index, orthogonal, perspective
|
| 21 |
|
lib/net/Discriminator.py
CHANGED
|
@@ -1,11 +1,16 @@
|
|
| 1 |
""" The code is based on https://github.com/apple/ml-gsn/ with adaption. """
|
| 2 |
|
| 3 |
import math
|
|
|
|
| 4 |
import torch
|
| 5 |
import torch.nn as nn
|
| 6 |
-
import math
|
| 7 |
import torch.nn.functional as F
|
| 8 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 9 |
|
| 10 |
|
| 11 |
class DiscriminatorHead(nn.Module):
|
|
|
|
| 1 |
""" The code is based on https://github.com/apple/ml-gsn/ with adaption. """
|
| 2 |
|
| 3 |
import math
|
| 4 |
+
|
| 5 |
import torch
|
| 6 |
import torch.nn as nn
|
|
|
|
| 7 |
import torch.nn.functional as F
|
| 8 |
+
|
| 9 |
+
from lib.torch_utils.ops.native_ops import (
|
| 10 |
+
FusedLeakyReLU,
|
| 11 |
+
fused_leaky_relu,
|
| 12 |
+
upfirdn2d,
|
| 13 |
+
)
|
| 14 |
|
| 15 |
|
| 16 |
class DiscriminatorHead(nn.Module):
|
lib/net/FBNet.py
CHANGED
|
@@ -19,13 +19,14 @@ DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
|
|
| 19 |
WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING
|
| 20 |
OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
| 21 |
"""
|
| 22 |
-
import torch
|
| 23 |
-
import torch.nn as nn
|
| 24 |
import functools
|
|
|
|
| 25 |
import numpy as np
|
| 26 |
import pytorch_lightning as pl
|
| 27 |
-
|
|
|
|
| 28 |
import torch.nn.functional as F
|
|
|
|
| 29 |
|
| 30 |
|
| 31 |
###############################################################################
|
|
@@ -313,34 +314,28 @@ class NLayerDiscriminator(nn.Module):
|
|
| 313 |
|
| 314 |
kw = 4
|
| 315 |
padw = int(np.ceil((kw - 1.0) / 2))
|
| 316 |
-
sequence = [
|
| 317 |
-
|
| 318 |
-
|
| 319 |
-
|
| 320 |
-
]
|
| 321 |
-
]
|
| 322 |
|
| 323 |
nf = ndf
|
| 324 |
for n in range(1, n_layers):
|
| 325 |
nf_prev = nf
|
| 326 |
nf = min(nf * 2, 512)
|
| 327 |
-
sequence += [
|
| 328 |
-
|
| 329 |
-
|
| 330 |
-
|
| 331 |
-
|
| 332 |
-
]
|
| 333 |
-
]
|
| 334 |
|
| 335 |
nf_prev = nf
|
| 336 |
nf = min(nf * 2, 512)
|
| 337 |
-
sequence += [
|
| 338 |
-
|
| 339 |
-
|
| 340 |
-
|
| 341 |
-
|
| 342 |
-
]
|
| 343 |
-
]
|
| 344 |
|
| 345 |
sequence += [[nn.Conv2d(nf, 1, kernel_size=kw, stride=1, padding=padw)]]
|
| 346 |
|
|
@@ -632,18 +627,16 @@ class GANLoss(pl.LightningModule):
|
|
| 632 |
def get_target_tensor(self, input, target_is_real):
|
| 633 |
target_tensor = None
|
| 634 |
if target_is_real:
|
| 635 |
-
create_label = (
|
| 636 |
-
|
| 637 |
-
)
|
| 638 |
if create_label:
|
| 639 |
real_tensor = self.tensor(input.size()).fill_(self.real_label)
|
| 640 |
self.real_label_var = real_tensor
|
| 641 |
self.real_label_var.requires_grad = False
|
| 642 |
target_tensor = self.real_label_var
|
| 643 |
else:
|
| 644 |
-
create_label = (
|
| 645 |
-
|
| 646 |
-
)
|
| 647 |
if create_label:
|
| 648 |
fake_tensor = self.tensor(input.size()).fill_(self.fake_label)
|
| 649 |
self.fake_label_var = fake_tensor
|
|
|
|
| 19 |
WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING
|
| 20 |
OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
| 21 |
"""
|
|
|
|
|
|
|
| 22 |
import functools
|
| 23 |
+
|
| 24 |
import numpy as np
|
| 25 |
import pytorch_lightning as pl
|
| 26 |
+
import torch
|
| 27 |
+
import torch.nn as nn
|
| 28 |
import torch.nn.functional as F
|
| 29 |
+
from torchvision import models
|
| 30 |
|
| 31 |
|
| 32 |
###############################################################################
|
|
|
|
| 314 |
|
| 315 |
kw = 4
|
| 316 |
padw = int(np.ceil((kw - 1.0) / 2))
|
| 317 |
+
sequence = [[
|
| 318 |
+
nn.Conv2d(input_nc, ndf, kernel_size=kw, stride=2, padding=padw),
|
| 319 |
+
nn.LeakyReLU(0.2, True)
|
| 320 |
+
]]
|
|
|
|
|
|
|
| 321 |
|
| 322 |
nf = ndf
|
| 323 |
for n in range(1, n_layers):
|
| 324 |
nf_prev = nf
|
| 325 |
nf = min(nf * 2, 512)
|
| 326 |
+
sequence += [[
|
| 327 |
+
nn.Conv2d(nf_prev, nf, kernel_size=kw, stride=2, padding=padw),
|
| 328 |
+
norm_layer(nf),
|
| 329 |
+
nn.LeakyReLU(0.2, True)
|
| 330 |
+
]]
|
|
|
|
|
|
|
| 331 |
|
| 332 |
nf_prev = nf
|
| 333 |
nf = min(nf * 2, 512)
|
| 334 |
+
sequence += [[
|
| 335 |
+
nn.Conv2d(nf_prev, nf, kernel_size=kw, stride=1, padding=padw),
|
| 336 |
+
norm_layer(nf),
|
| 337 |
+
nn.LeakyReLU(0.2, True)
|
| 338 |
+
]]
|
|
|
|
|
|
|
| 339 |
|
| 340 |
sequence += [[nn.Conv2d(nf, 1, kernel_size=kw, stride=1, padding=padw)]]
|
| 341 |
|
|
|
|
| 627 |
def get_target_tensor(self, input, target_is_real):
|
| 628 |
target_tensor = None
|
| 629 |
if target_is_real:
|
| 630 |
+
create_label = ((self.real_label_var is None) or
|
| 631 |
+
(self.real_label_var.numel() != input.numel()))
|
|
|
|
| 632 |
if create_label:
|
| 633 |
real_tensor = self.tensor(input.size()).fill_(self.real_label)
|
| 634 |
self.real_label_var = real_tensor
|
| 635 |
self.real_label_var.requires_grad = False
|
| 636 |
target_tensor = self.real_label_var
|
| 637 |
else:
|
| 638 |
+
create_label = ((self.fake_label_var is None) or
|
| 639 |
+
(self.fake_label_var.numel() != input.numel()))
|
|
|
|
| 640 |
if create_label:
|
| 641 |
fake_tensor = self.tensor(input.size()).fill_(self.fake_label)
|
| 642 |
self.fake_label_var = fake_tensor
|
lib/net/GANLoss.py
CHANGED
|
@@ -2,8 +2,9 @@
|
|
| 2 |
|
| 3 |
import torch
|
| 4 |
import torch.nn as nn
|
| 5 |
-
from torch import autograd
|
| 6 |
import torch.nn.functional as F
|
|
|
|
|
|
|
| 7 |
from lib.net.Discriminator import StyleDiscriminator
|
| 8 |
|
| 9 |
|
|
|
|
| 2 |
|
| 3 |
import torch
|
| 4 |
import torch.nn as nn
|
|
|
|
| 5 |
import torch.nn.functional as F
|
| 6 |
+
from torch import autograd
|
| 7 |
+
|
| 8 |
from lib.net.Discriminator import StyleDiscriminator
|
| 9 |
|
| 10 |
|
lib/net/IFGeoNet.py
CHANGED
|
@@ -1,7 +1,9 @@
|
|
| 1 |
from pickle import TRUE
|
|
|
|
| 2 |
import torch
|
| 3 |
import torch.nn as nn
|
| 4 |
import torch.nn.functional as F
|
|
|
|
| 5 |
from lib.net.geometry import orthogonal
|
| 6 |
|
| 7 |
|
|
@@ -151,13 +153,11 @@ class IFGeoNet(nn.Module):
|
|
| 151 |
|
| 152 |
# here every channel corresponse to one feature.
|
| 153 |
|
| 154 |
-
features = torch.cat(
|
| 155 |
-
|
| 156 |
-
|
| 157 |
-
|
| 158 |
-
|
| 159 |
-
dim=1
|
| 160 |
-
) # (B, features, 1,7,sample_num)
|
| 161 |
shape = features.shape
|
| 162 |
features = torch.reshape(
|
| 163 |
features, (shape[0], shape[1] * shape[3], shape[4])
|
|
|
|
| 1 |
from pickle import TRUE
|
| 2 |
+
|
| 3 |
import torch
|
| 4 |
import torch.nn as nn
|
| 5 |
import torch.nn.functional as F
|
| 6 |
+
|
| 7 |
from lib.net.geometry import orthogonal
|
| 8 |
|
| 9 |
|
|
|
|
| 153 |
|
| 154 |
# here every channel corresponse to one feature.
|
| 155 |
|
| 156 |
+
features = torch.cat((
|
| 157 |
+
feature_0_partial, feature_1_fused, feature_2, feature_3, feature_4, feature_5,
|
| 158 |
+
feature_6
|
| 159 |
+
),
|
| 160 |
+
dim=1) # (B, features, 1,7,sample_num)
|
|
|
|
|
|
|
| 161 |
shape = features.shape
|
| 162 |
features = torch.reshape(
|
| 163 |
features, (shape[0], shape[1] * shape[3], shape[4])
|
lib/net/IFGeoNet_nobody.py
CHANGED
|
@@ -1,7 +1,9 @@
|
|
| 1 |
from pickle import TRUE
|
|
|
|
| 2 |
import torch
|
| 3 |
import torch.nn as nn
|
| 4 |
import torch.nn.functional as F
|
|
|
|
| 5 |
from lib.net.geometry import orthogonal
|
| 6 |
|
| 7 |
|
|
@@ -136,13 +138,11 @@ class IFGeoNet(nn.Module):
|
|
| 136 |
|
| 137 |
# here every channel corresponse to one feature.
|
| 138 |
|
| 139 |
-
features = torch.cat(
|
| 140 |
-
|
| 141 |
-
|
| 142 |
-
|
| 143 |
-
|
| 144 |
-
dim=1
|
| 145 |
-
) # (B, features, 1,7,sample_num)
|
| 146 |
shape = features.shape
|
| 147 |
features = torch.reshape(
|
| 148 |
features, (shape[0], shape[1] * shape[3], shape[4])
|
|
|
|
| 1 |
from pickle import TRUE
|
| 2 |
+
|
| 3 |
import torch
|
| 4 |
import torch.nn as nn
|
| 5 |
import torch.nn.functional as F
|
| 6 |
+
|
| 7 |
from lib.net.geometry import orthogonal
|
| 8 |
|
| 9 |
|
|
|
|
| 138 |
|
| 139 |
# here every channel corresponse to one feature.
|
| 140 |
|
| 141 |
+
features = torch.cat((
|
| 142 |
+
feature_0_partial, feature_1_fused, feature_2, feature_3, feature_4, feature_5,
|
| 143 |
+
feature_6
|
| 144 |
+
),
|
| 145 |
+
dim=1) # (B, features, 1,7,sample_num)
|
|
|
|
|
|
|
| 146 |
shape = features.shape
|
| 147 |
features = torch.reshape(
|
| 148 |
features, (shape[0], shape[1] * shape[3], shape[4])
|
lib/net/NormalNet.py
CHANGED
|
@@ -14,14 +14,14 @@
|
|
| 14 |
#
|
| 15 |
# Contact: ps-license@tuebingen.mpg.de
|
| 16 |
|
| 17 |
-
from lib.net.FBNet import define_G, define_D, VGGLoss, GANLoss, IDMRFLoss
|
| 18 |
-
from lib.net.net_util import init_net
|
| 19 |
-
from lib.net.BasePIFuNet import BasePIFuNet
|
| 20 |
-
|
| 21 |
import torch
|
| 22 |
import torch.nn as nn
|
| 23 |
import torch.nn.functional as F
|
| 24 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 25 |
|
| 26 |
class NormalNet(BasePIFuNet):
|
| 27 |
"""
|
|
@@ -63,12 +63,12 @@ class NormalNet(BasePIFuNet):
|
|
| 63 |
self.in_nmlB = [
|
| 64 |
item[0] for item in self.opt.in_nml if "_B" in item[0] or item[0] == "image"
|
| 65 |
]
|
| 66 |
-
self.in_nmlF_dim = sum(
|
| 67 |
-
|
| 68 |
-
)
|
| 69 |
-
self.in_nmlB_dim = sum(
|
| 70 |
-
|
| 71 |
-
)
|
| 72 |
|
| 73 |
self.netF = define_G(self.in_nmlF_dim, 3, 64, "global", 4, 9, 1, 3, "instance")
|
| 74 |
self.netB = define_G(self.in_nmlB_dim, 3, 64, "global", 4, 9, 1, 3, "instance")
|
|
|
|
| 14 |
#
|
| 15 |
# Contact: ps-license@tuebingen.mpg.de
|
| 16 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 17 |
import torch
|
| 18 |
import torch.nn as nn
|
| 19 |
import torch.nn.functional as F
|
| 20 |
|
| 21 |
+
from lib.net.BasePIFuNet import BasePIFuNet
|
| 22 |
+
from lib.net.FBNet import GANLoss, IDMRFLoss, VGGLoss, define_D, define_G
|
| 23 |
+
from lib.net.net_util import init_net
|
| 24 |
+
|
| 25 |
|
| 26 |
class NormalNet(BasePIFuNet):
|
| 27 |
"""
|
|
|
|
| 63 |
self.in_nmlB = [
|
| 64 |
item[0] for item in self.opt.in_nml if "_B" in item[0] or item[0] == "image"
|
| 65 |
]
|
| 66 |
+
self.in_nmlF_dim = sum([
|
| 67 |
+
item[1] for item in self.opt.in_nml if "_F" in item[0] or item[0] == "image"
|
| 68 |
+
])
|
| 69 |
+
self.in_nmlB_dim = sum([
|
| 70 |
+
item[1] for item in self.opt.in_nml if "_B" in item[0] or item[0] == "image"
|
| 71 |
+
])
|
| 72 |
|
| 73 |
self.netF = define_G(self.in_nmlF_dim, 3, 64, "global", 4, 9, 1, 3, "instance")
|
| 74 |
self.netB = define_G(self.in_nmlB_dim, 3, 64, "global", 4, 9, 1, 3, "instance")
|
lib/net/geometry.py
CHANGED
|
@@ -14,11 +14,13 @@
|
|
| 14 |
#
|
| 15 |
# Contact: ps-license@tuebingen.mpg.de
|
| 16 |
|
| 17 |
-
import torch
|
| 18 |
-
import numpy as np
|
| 19 |
import numbers
|
| 20 |
-
|
|
|
|
|
|
|
| 21 |
from einops.einops import rearrange
|
|
|
|
|
|
|
| 22 |
"""
|
| 23 |
Useful geometric operations, e.g. Perspective projection and a differentiable Rodrigues formula
|
| 24 |
Parts of the code are taken from https://github.com/MandyMo/pytorch_HMR
|
|
@@ -42,13 +44,11 @@ def quaternion_to_rotation_matrix(quat):
|
|
| 42 |
wx, wy, wz = w * x, w * y, w * z
|
| 43 |
xy, xz, yz = x * y, x * z, y * z
|
| 44 |
|
| 45 |
-
rotMat = torch.stack(
|
| 46 |
-
|
| 47 |
-
|
| 48 |
-
|
| 49 |
-
|
| 50 |
-
dim=1
|
| 51 |
-
).view(B, 3, 3)
|
| 52 |
return rotMat
|
| 53 |
|
| 54 |
|
|
@@ -508,12 +508,10 @@ def estimate_translation_np(S, joints_2d, joints_conf, focal_length=5000, img_si
|
|
| 508 |
weight2 = np.reshape(np.tile(np.sqrt(joints_conf), (2, 1)).T, -1)
|
| 509 |
|
| 510 |
# least squares
|
| 511 |
-
Q = np.array(
|
| 512 |
-
[
|
| 513 |
-
|
| 514 |
-
|
| 515 |
-
]
|
| 516 |
-
).T
|
| 517 |
c = (np.reshape(joints_2d, -1) - O) * Z - F * XY
|
| 518 |
|
| 519 |
# weighted least squares
|
|
@@ -580,13 +578,11 @@ def Rot_y(angle, category="torch", prepend_dim=True, device=None):
|
|
| 580 |
prepend_dim: prepend an extra dimension
|
| 581 |
Return: Rotation matrix with shape [1, 3, 3] (prepend_dim=True)
|
| 582 |
"""
|
| 583 |
-
m = np.array(
|
| 584 |
-
[
|
| 585 |
-
|
| 586 |
-
|
| 587 |
-
|
| 588 |
-
]
|
| 589 |
-
)
|
| 590 |
if category == "torch":
|
| 591 |
if prepend_dim:
|
| 592 |
return torch.tensor(m, dtype=torch.float, device=device).unsqueeze(0)
|
|
@@ -608,13 +604,11 @@ def Rot_x(angle, category="torch", prepend_dim=True, device=None):
|
|
| 608 |
prepend_dim: prepend an extra dimension
|
| 609 |
Return: Rotation matrix with shape [1, 3, 3] (prepend_dim=True)
|
| 610 |
"""
|
| 611 |
-
m = np.array(
|
| 612 |
-
[
|
| 613 |
-
|
| 614 |
-
|
| 615 |
-
|
| 616 |
-
]
|
| 617 |
-
)
|
| 618 |
if category == "torch":
|
| 619 |
if prepend_dim:
|
| 620 |
return torch.tensor(m, dtype=torch.float, device=device).unsqueeze(0)
|
|
@@ -636,13 +630,11 @@ def Rot_z(angle, category="torch", prepend_dim=True, device=None):
|
|
| 636 |
prepend_dim: prepend an extra dimension
|
| 637 |
Return: Rotation matrix with shape [1, 3, 3] (prepend_dim=True)
|
| 638 |
"""
|
| 639 |
-
m = np.array(
|
| 640 |
-
[
|
| 641 |
-
|
| 642 |
-
|
| 643 |
-
|
| 644 |
-
]
|
| 645 |
-
)
|
| 646 |
if category == "torch":
|
| 647 |
if prepend_dim:
|
| 648 |
return torch.tensor(m, dtype=torch.float, device=device).unsqueeze(0)
|
|
|
|
| 14 |
#
|
| 15 |
# Contact: ps-license@tuebingen.mpg.de
|
| 16 |
|
|
|
|
|
|
|
| 17 |
import numbers
|
| 18 |
+
|
| 19 |
+
import numpy as np
|
| 20 |
+
import torch
|
| 21 |
from einops.einops import rearrange
|
| 22 |
+
from torch.nn import functional as F
|
| 23 |
+
|
| 24 |
"""
|
| 25 |
Useful geometric operations, e.g. Perspective projection and a differentiable Rodrigues formula
|
| 26 |
Parts of the code are taken from https://github.com/MandyMo/pytorch_HMR
|
|
|
|
| 44 |
wx, wy, wz = w * x, w * y, w * z
|
| 45 |
xy, xz, yz = x * y, x * z, y * z
|
| 46 |
|
| 47 |
+
rotMat = torch.stack([
|
| 48 |
+
w2 + x2 - y2 - z2, 2 * xy - 2 * wz, 2 * wy + 2 * xz, 2 * wz + 2 * xy, w2 - x2 + y2 - z2,
|
| 49 |
+
2 * yz - 2 * wx, 2 * xz - 2 * wy, 2 * wx + 2 * yz, w2 - x2 - y2 + z2
|
| 50 |
+
],
|
| 51 |
+
dim=1).view(B, 3, 3)
|
|
|
|
|
|
|
| 52 |
return rotMat
|
| 53 |
|
| 54 |
|
|
|
|
| 508 |
weight2 = np.reshape(np.tile(np.sqrt(joints_conf), (2, 1)).T, -1)
|
| 509 |
|
| 510 |
# least squares
|
| 511 |
+
Q = np.array([
|
| 512 |
+
F * np.tile(np.array([1, 0]), num_joints), F * np.tile(np.array([0, 1]), num_joints),
|
| 513 |
+
O - np.reshape(joints_2d, -1)
|
| 514 |
+
]).T
|
|
|
|
|
|
|
| 515 |
c = (np.reshape(joints_2d, -1) - O) * Z - F * XY
|
| 516 |
|
| 517 |
# weighted least squares
|
|
|
|
| 578 |
prepend_dim: prepend an extra dimension
|
| 579 |
Return: Rotation matrix with shape [1, 3, 3] (prepend_dim=True)
|
| 580 |
"""
|
| 581 |
+
m = np.array([
|
| 582 |
+
[np.cos(angle), 0.0, np.sin(angle)],
|
| 583 |
+
[0.0, 1.0, 0.0],
|
| 584 |
+
[-np.sin(angle), 0.0, np.cos(angle)],
|
| 585 |
+
])
|
|
|
|
|
|
|
| 586 |
if category == "torch":
|
| 587 |
if prepend_dim:
|
| 588 |
return torch.tensor(m, dtype=torch.float, device=device).unsqueeze(0)
|
|
|
|
| 604 |
prepend_dim: prepend an extra dimension
|
| 605 |
Return: Rotation matrix with shape [1, 3, 3] (prepend_dim=True)
|
| 606 |
"""
|
| 607 |
+
m = np.array([
|
| 608 |
+
[1.0, 0.0, 0.0],
|
| 609 |
+
[0.0, np.cos(angle), -np.sin(angle)],
|
| 610 |
+
[0.0, np.sin(angle), np.cos(angle)],
|
| 611 |
+
])
|
|
|
|
|
|
|
| 612 |
if category == "torch":
|
| 613 |
if prepend_dim:
|
| 614 |
return torch.tensor(m, dtype=torch.float, device=device).unsqueeze(0)
|
|
|
|
| 630 |
prepend_dim: prepend an extra dimension
|
| 631 |
Return: Rotation matrix with shape [1, 3, 3] (prepend_dim=True)
|
| 632 |
"""
|
| 633 |
+
m = np.array([
|
| 634 |
+
[np.cos(angle), -np.sin(angle), 0.0],
|
| 635 |
+
[np.sin(angle), np.cos(angle), 0.0],
|
| 636 |
+
[0.0, 0.0, 1.0],
|
| 637 |
+
])
|
|
|
|
|
|
|
| 638 |
if category == "torch":
|
| 639 |
if prepend_dim:
|
| 640 |
return torch.tensor(m, dtype=torch.float, device=device).unsqueeze(0)
|
lib/net/net_util.py
CHANGED
|
@@ -14,12 +14,13 @@
|
|
| 14 |
#
|
| 15 |
# Contact: ps-license@tuebingen.mpg.de
|
| 16 |
|
|
|
|
|
|
|
| 17 |
import torch
|
| 18 |
-
from torch.nn import init
|
| 19 |
import torch.nn as nn
|
| 20 |
import torch.nn.functional as F
|
| 21 |
-
import functools
|
| 22 |
from torch.autograd import grad
|
|
|
|
| 23 |
|
| 24 |
|
| 25 |
def gradient(inputs, outputs):
|
|
|
|
| 14 |
#
|
| 15 |
# Contact: ps-license@tuebingen.mpg.de
|
| 16 |
|
| 17 |
+
import functools
|
| 18 |
+
|
| 19 |
import torch
|
|
|
|
| 20 |
import torch.nn as nn
|
| 21 |
import torch.nn.functional as F
|
|
|
|
| 22 |
from torch.autograd import grad
|
| 23 |
+
from torch.nn import init
|
| 24 |
|
| 25 |
|
| 26 |
def gradient(inputs, outputs):
|
lib/net/voxelize.py
CHANGED
|
@@ -1,11 +1,11 @@
|
|
| 1 |
from __future__ import division, print_function
|
|
|
|
|
|
|
| 2 |
import torch
|
| 3 |
import torch.nn as nn
|
| 4 |
import torch.nn.functional as F
|
| 5 |
-
import numpy as np
|
| 6 |
-
from torch.autograd import Function
|
| 7 |
-
|
| 8 |
import voxelize_cuda
|
|
|
|
| 9 |
|
| 10 |
|
| 11 |
class VoxelizationFunction(Function):
|
|
|
|
| 1 |
from __future__ import division, print_function
|
| 2 |
+
|
| 3 |
+
import numpy as np
|
| 4 |
import torch
|
| 5 |
import torch.nn as nn
|
| 6 |
import torch.nn.functional as F
|
|
|
|
|
|
|
|
|
|
| 7 |
import voxelize_cuda
|
| 8 |
+
from torch.autograd import Function
|
| 9 |
|
| 10 |
|
| 11 |
class VoxelizationFunction(Function):
|
lib/pixielib/models/FLAME.py
CHANGED
|
@@ -13,10 +13,11 @@
|
|
| 13 |
# For comments or questions, please email us at pixie@tue.mpg.de
|
| 14 |
# For commercial licensing contact, please contact ps-license@tuebingen.mpg.de
|
| 15 |
|
|
|
|
|
|
|
|
|
|
| 16 |
import torch
|
| 17 |
import torch.nn as nn
|
| 18 |
-
import numpy as np
|
| 19 |
-
import pickle
|
| 20 |
import torch.nn.functional as F
|
| 21 |
|
| 22 |
|
|
|
|
| 13 |
# For comments or questions, please email us at pixie@tue.mpg.de
|
| 14 |
# For commercial licensing contact, please contact ps-license@tuebingen.mpg.de
|
| 15 |
|
| 16 |
+
import pickle
|
| 17 |
+
|
| 18 |
+
import numpy as np
|
| 19 |
import torch
|
| 20 |
import torch.nn as nn
|
|
|
|
|
|
|
| 21 |
import torch.nn.functional as F
|
| 22 |
|
| 23 |
|
lib/pixielib/models/SMPLX.py
CHANGED
|
@@ -3,19 +3,20 @@ original from https://github.com/vchoutas/smplx
|
|
| 3 |
modified by Vassilis and Yao
|
| 4 |
"""
|
| 5 |
|
|
|
|
|
|
|
|
|
|
| 6 |
import torch
|
| 7 |
import torch.nn as nn
|
| 8 |
-
import numpy as np
|
| 9 |
-
import pickle
|
| 10 |
|
| 11 |
from .lbs import (
|
|
|
|
| 12 |
Struct,
|
| 13 |
-
|
| 14 |
-
to_np,
|
| 15 |
lbs,
|
|
|
|
|
|
|
| 16 |
vertices2landmarks,
|
| 17 |
-
JointsFromVerticesSelector,
|
| 18 |
-
find_dynamic_lmk_idx_and_bcoords,
|
| 19 |
)
|
| 20 |
|
| 21 |
# SMPLX
|
|
@@ -209,468 +210,452 @@ extra_names = [
|
|
| 209 |
SMPLX_names += extra_names
|
| 210 |
|
| 211 |
part_indices = {}
|
| 212 |
-
part_indices["body"] = np.array(
|
| 213 |
-
|
| 214 |
-
|
| 215 |
-
|
| 216 |
-
|
| 217 |
-
|
| 218 |
-
|
| 219 |
-
|
| 220 |
-
|
| 221 |
-
|
| 222 |
-
|
| 223 |
-
|
| 224 |
-
|
| 225 |
-
|
| 226 |
-
|
| 227 |
-
|
| 228 |
-
|
| 229 |
-
|
| 230 |
-
|
| 231 |
-
|
| 232 |
-
|
| 233 |
-
|
| 234 |
-
|
| 235 |
-
|
| 236 |
-
|
| 237 |
-
|
| 238 |
-
|
| 239 |
-
|
| 240 |
-
|
| 241 |
-
|
| 242 |
-
|
| 243 |
-
|
| 244 |
-
|
| 245 |
-
|
| 246 |
-
|
| 247 |
-
|
| 248 |
-
|
| 249 |
-
|
| 250 |
-
|
| 251 |
-
|
| 252 |
-
|
| 253 |
-
|
| 254 |
-
|
| 255 |
-
|
| 256 |
-
|
| 257 |
-
|
| 258 |
-
|
| 259 |
-
|
| 260 |
-
|
| 261 |
-
|
| 262 |
-
|
| 263 |
-
|
| 264 |
-
|
| 265 |
-
|
| 266 |
-
|
| 267 |
-
|
| 268 |
-
|
| 269 |
-
|
| 270 |
-
|
| 271 |
-
|
| 272 |
-
|
| 273 |
-
|
| 274 |
-
|
| 275 |
-
|
| 276 |
-
|
| 277 |
-
|
| 278 |
-
|
| 279 |
-
|
| 280 |
-
|
| 281 |
-
|
| 282 |
-
|
| 283 |
-
|
| 284 |
-
|
| 285 |
-
|
| 286 |
-
|
| 287 |
-
|
| 288 |
-
|
| 289 |
-
|
| 290 |
-
|
| 291 |
-
|
| 292 |
-
|
| 293 |
-
|
| 294 |
-
|
| 295 |
-
|
| 296 |
-
|
| 297 |
-
|
| 298 |
-
|
| 299 |
-
|
| 300 |
-
|
| 301 |
-
|
| 302 |
-
|
| 303 |
-
|
| 304 |
-
|
| 305 |
-
|
| 306 |
-
|
| 307 |
-
|
| 308 |
-
|
| 309 |
-
|
| 310 |
-
|
| 311 |
-
|
| 312 |
-
|
| 313 |
-
|
| 314 |
-
|
| 315 |
-
|
| 316 |
-
|
| 317 |
-
|
| 318 |
-
|
| 319 |
-
|
| 320 |
-
|
| 321 |
-
|
| 322 |
-
|
| 323 |
-
|
| 324 |
-
|
| 325 |
-
|
| 326 |
-
|
| 327 |
-
|
| 328 |
-
|
| 329 |
-
|
| 330 |
-
|
| 331 |
-
|
| 332 |
-
|
| 333 |
-
|
| 334 |
-
|
| 335 |
-
|
| 336 |
-
|
| 337 |
-
|
| 338 |
-
|
| 339 |
-
|
| 340 |
-
|
| 341 |
-
|
| 342 |
-
|
| 343 |
-
|
| 344 |
-
|
| 345 |
-
|
| 346 |
-
|
| 347 |
-
|
| 348 |
-
|
| 349 |
-
|
| 350 |
-
|
| 351 |
-
|
| 352 |
-
|
| 353 |
-
|
| 354 |
-
|
| 355 |
-
|
| 356 |
-
|
| 357 |
-
|
| 358 |
-
|
| 359 |
-
|
| 360 |
-
|
| 361 |
-
|
| 362 |
-
|
| 363 |
-
|
| 364 |
-
|
| 365 |
-
|
| 366 |
-
|
| 367 |
-
|
| 368 |
-
|
| 369 |
-
|
| 370 |
-
|
| 371 |
-
|
| 372 |
-
|
| 373 |
-
|
| 374 |
-
|
| 375 |
-
|
| 376 |
-
|
| 377 |
-
|
| 378 |
-
|
| 379 |
-
|
| 380 |
-
|
| 381 |
-
|
| 382 |
-
|
| 383 |
-
|
| 384 |
-
|
| 385 |
-
|
| 386 |
-
|
| 387 |
-
|
| 388 |
-
|
| 389 |
-
|
| 390 |
-
|
| 391 |
-
|
| 392 |
-
|
| 393 |
-
|
| 394 |
-
|
| 395 |
-
|
| 396 |
-
|
| 397 |
-
|
| 398 |
-
|
| 399 |
-
|
| 400 |
-
|
| 401 |
-
|
| 402 |
-
|
| 403 |
-
|
| 404 |
-
|
| 405 |
-
|
| 406 |
-
|
| 407 |
-
|
| 408 |
-
|
| 409 |
-
|
| 410 |
-
|
| 411 |
-
|
| 412 |
-
|
| 413 |
-
|
| 414 |
-
|
| 415 |
-
|
| 416 |
-
|
| 417 |
-
|
| 418 |
-
|
| 419 |
-
|
| 420 |
-
|
| 421 |
-
|
| 422 |
-
|
| 423 |
-
|
| 424 |
-
|
| 425 |
-
|
| 426 |
-
|
| 427 |
-
|
| 428 |
-
|
| 429 |
-
|
| 430 |
-
|
| 431 |
-
|
| 432 |
-
|
| 433 |
-
|
| 434 |
-
|
| 435 |
-
|
| 436 |
-
|
| 437 |
-
|
| 438 |
-
|
| 439 |
-
|
| 440 |
-
|
| 441 |
-
|
| 442 |
-
|
| 443 |
-
|
| 444 |
-
|
| 445 |
-
|
| 446 |
-
|
| 447 |
-
|
| 448 |
-
|
| 449 |
-
|
| 450 |
-
|
| 451 |
-
|
| 452 |
-
|
| 453 |
-
|
| 454 |
-
|
| 455 |
-
|
| 456 |
-
|
| 457 |
-
|
| 458 |
-
|
| 459 |
-
|
| 460 |
-
|
| 461 |
-
|
| 462 |
-
|
| 463 |
-
|
| 464 |
-
|
| 465 |
-
|
| 466 |
-
|
| 467 |
-
|
| 468 |
-
|
| 469 |
-
|
| 470 |
-
|
| 471 |
-
|
| 472 |
-
|
| 473 |
-
|
| 474 |
-
|
| 475 |
-
|
| 476 |
-
|
| 477 |
-
|
| 478 |
-
|
| 479 |
-
|
| 480 |
-
|
| 481 |
-
|
| 482 |
-
|
| 483 |
-
|
| 484 |
-
|
| 485 |
-
|
| 486 |
-
|
| 487 |
-
|
| 488 |
-
|
| 489 |
-
|
| 490 |
-
|
| 491 |
-
|
| 492 |
-
|
| 493 |
-
|
| 494 |
-
|
| 495 |
-
|
| 496 |
-
|
| 497 |
-
|
| 498 |
-
|
| 499 |
-
|
| 500 |
-
|
| 501 |
-
|
| 502 |
-
|
| 503 |
-
|
| 504 |
-
|
| 505 |
-
|
| 506 |
-
|
| 507 |
-
|
| 508 |
-
|
| 509 |
-
|
| 510 |
-
|
| 511 |
-
|
| 512 |
-
|
| 513 |
-
|
| 514 |
-
|
| 515 |
-
|
| 516 |
-
|
| 517 |
-
|
| 518 |
-
|
| 519 |
-
|
| 520 |
-
|
| 521 |
-
|
| 522 |
-
|
| 523 |
-
|
| 524 |
-
|
| 525 |
-
|
| 526 |
-
|
| 527 |
-
|
| 528 |
-
|
| 529 |
-
|
| 530 |
-
|
| 531 |
-
|
| 532 |
-
|
| 533 |
-
|
| 534 |
-
|
| 535 |
-
|
| 536 |
-
|
| 537 |
-
|
| 538 |
-
|
| 539 |
-
|
| 540 |
-
|
| 541 |
-
|
| 542 |
-
|
| 543 |
-
|
| 544 |
-
|
| 545 |
-
|
| 546 |
-
|
| 547 |
-
|
| 548 |
-
|
| 549 |
-
|
| 550 |
-
|
| 551 |
-
|
| 552 |
-
|
| 553 |
-
|
| 554 |
-
|
| 555 |
-
|
| 556 |
-
|
| 557 |
-
|
| 558 |
-
|
| 559 |
-
|
| 560 |
-
|
| 561 |
-
|
| 562 |
-
|
| 563 |
-
|
| 564 |
-
|
| 565 |
-
|
| 566 |
-
|
| 567 |
-
|
| 568 |
-
|
| 569 |
-
|
| 570 |
-
|
| 571 |
-
|
| 572 |
-
|
| 573 |
-
|
| 574 |
-
|
| 575 |
-
|
| 576 |
-
|
| 577 |
-
|
| 578 |
-
|
| 579 |
-
|
| 580 |
-
|
| 581 |
-
|
| 582 |
-
|
| 583 |
-
|
| 584 |
-
|
| 585 |
-
|
| 586 |
-
|
| 587 |
-
|
| 588 |
-
|
| 589 |
-
|
| 590 |
-
|
| 591 |
-
|
| 592 |
-
|
| 593 |
-
|
| 594 |
-
|
| 595 |
-
|
| 596 |
-
|
| 597 |
-
|
| 598 |
-
|
| 599 |
-
|
| 600 |
-
|
| 601 |
-
|
| 602 |
-
|
| 603 |
-
|
| 604 |
-
|
| 605 |
-
|
| 606 |
-
|
| 607 |
-
|
| 608 |
-
|
| 609 |
-
|
| 610 |
-
|
| 611 |
-
|
| 612 |
-
|
| 613 |
-
|
| 614 |
-
|
| 615 |
-
|
| 616 |
-
|
| 617 |
-
|
| 618 |
-
|
| 619 |
-
|
| 620 |
-
|
| 621 |
-
|
| 622 |
-
|
| 623 |
-
|
| 624 |
-
|
| 625 |
-
|
| 626 |
-
|
| 627 |
-
|
| 628 |
-
|
| 629 |
-
|
| 630 |
-
|
| 631 |
-
|
| 632 |
-
|
| 633 |
-
|
| 634 |
-
|
| 635 |
-
|
| 636 |
-
|
| 637 |
-
|
| 638 |
-
|
| 639 |
-
|
| 640 |
-
|
| 641 |
-
|
| 642 |
-
|
| 643 |
-
|
| 644 |
-
|
| 645 |
-
|
| 646 |
-
|
| 647 |
-
|
| 648 |
-
|
| 649 |
-
|
| 650 |
-
|
| 651 |
-
|
| 652 |
-
|
| 653 |
-
|
| 654 |
-
|
| 655 |
-
|
| 656 |
-
|
| 657 |
-
|
| 658 |
-
46,
|
| 659 |
-
47,
|
| 660 |
-
48,
|
| 661 |
-
49,
|
| 662 |
-
50,
|
| 663 |
-
51,
|
| 664 |
-
52,
|
| 665 |
-
53,
|
| 666 |
-
54,
|
| 667 |
-
139,
|
| 668 |
-
140,
|
| 669 |
-
141,
|
| 670 |
-
142,
|
| 671 |
-
144,
|
| 672 |
-
]
|
| 673 |
-
)
|
| 674 |
# kinematic tree
|
| 675 |
head_kin_chain = [15, 12, 9, 6, 3, 0]
|
| 676 |
|
|
|
|
| 3 |
modified by Vassilis and Yao
|
| 4 |
"""
|
| 5 |
|
| 6 |
+
import pickle
|
| 7 |
+
|
| 8 |
+
import numpy as np
|
| 9 |
import torch
|
| 10 |
import torch.nn as nn
|
|
|
|
|
|
|
| 11 |
|
| 12 |
from .lbs import (
|
| 13 |
+
JointsFromVerticesSelector,
|
| 14 |
Struct,
|
| 15 |
+
find_dynamic_lmk_idx_and_bcoords,
|
|
|
|
| 16 |
lbs,
|
| 17 |
+
to_np,
|
| 18 |
+
to_tensor,
|
| 19 |
vertices2landmarks,
|
|
|
|
|
|
|
| 20 |
)
|
| 21 |
|
| 22 |
# SMPLX
|
|
|
|
| 210 |
SMPLX_names += extra_names
|
| 211 |
|
| 212 |
part_indices = {}
|
| 213 |
+
part_indices["body"] = np.array([
|
| 214 |
+
0,
|
| 215 |
+
1,
|
| 216 |
+
2,
|
| 217 |
+
3,
|
| 218 |
+
4,
|
| 219 |
+
5,
|
| 220 |
+
6,
|
| 221 |
+
7,
|
| 222 |
+
8,
|
| 223 |
+
9,
|
| 224 |
+
10,
|
| 225 |
+
11,
|
| 226 |
+
12,
|
| 227 |
+
13,
|
| 228 |
+
14,
|
| 229 |
+
15,
|
| 230 |
+
16,
|
| 231 |
+
17,
|
| 232 |
+
18,
|
| 233 |
+
19,
|
| 234 |
+
20,
|
| 235 |
+
21,
|
| 236 |
+
22,
|
| 237 |
+
23,
|
| 238 |
+
24,
|
| 239 |
+
123,
|
| 240 |
+
124,
|
| 241 |
+
125,
|
| 242 |
+
126,
|
| 243 |
+
127,
|
| 244 |
+
132,
|
| 245 |
+
134,
|
| 246 |
+
135,
|
| 247 |
+
136,
|
| 248 |
+
137,
|
| 249 |
+
138,
|
| 250 |
+
143,
|
| 251 |
+
])
|
| 252 |
+
part_indices["torso"] = np.array([
|
| 253 |
+
0,
|
| 254 |
+
1,
|
| 255 |
+
2,
|
| 256 |
+
3,
|
| 257 |
+
6,
|
| 258 |
+
9,
|
| 259 |
+
12,
|
| 260 |
+
13,
|
| 261 |
+
14,
|
| 262 |
+
15,
|
| 263 |
+
16,
|
| 264 |
+
17,
|
| 265 |
+
18,
|
| 266 |
+
19,
|
| 267 |
+
22,
|
| 268 |
+
23,
|
| 269 |
+
24,
|
| 270 |
+
55,
|
| 271 |
+
56,
|
| 272 |
+
57,
|
| 273 |
+
58,
|
| 274 |
+
59,
|
| 275 |
+
76,
|
| 276 |
+
77,
|
| 277 |
+
78,
|
| 278 |
+
79,
|
| 279 |
+
80,
|
| 280 |
+
81,
|
| 281 |
+
82,
|
| 282 |
+
83,
|
| 283 |
+
84,
|
| 284 |
+
85,
|
| 285 |
+
86,
|
| 286 |
+
87,
|
| 287 |
+
88,
|
| 288 |
+
89,
|
| 289 |
+
90,
|
| 290 |
+
91,
|
| 291 |
+
92,
|
| 292 |
+
93,
|
| 293 |
+
94,
|
| 294 |
+
95,
|
| 295 |
+
96,
|
| 296 |
+
97,
|
| 297 |
+
98,
|
| 298 |
+
99,
|
| 299 |
+
100,
|
| 300 |
+
101,
|
| 301 |
+
102,
|
| 302 |
+
103,
|
| 303 |
+
104,
|
| 304 |
+
105,
|
| 305 |
+
106,
|
| 306 |
+
107,
|
| 307 |
+
108,
|
| 308 |
+
109,
|
| 309 |
+
110,
|
| 310 |
+
111,
|
| 311 |
+
112,
|
| 312 |
+
113,
|
| 313 |
+
114,
|
| 314 |
+
115,
|
| 315 |
+
116,
|
| 316 |
+
117,
|
| 317 |
+
118,
|
| 318 |
+
119,
|
| 319 |
+
120,
|
| 320 |
+
121,
|
| 321 |
+
122,
|
| 322 |
+
123,
|
| 323 |
+
124,
|
| 324 |
+
125,
|
| 325 |
+
126,
|
| 326 |
+
127,
|
| 327 |
+
128,
|
| 328 |
+
129,
|
| 329 |
+
130,
|
| 330 |
+
131,
|
| 331 |
+
132,
|
| 332 |
+
133,
|
| 333 |
+
134,
|
| 334 |
+
135,
|
| 335 |
+
136,
|
| 336 |
+
137,
|
| 337 |
+
138,
|
| 338 |
+
139,
|
| 339 |
+
140,
|
| 340 |
+
141,
|
| 341 |
+
142,
|
| 342 |
+
143,
|
| 343 |
+
144,
|
| 344 |
+
])
|
| 345 |
+
part_indices["head"] = np.array([
|
| 346 |
+
12,
|
| 347 |
+
15,
|
| 348 |
+
22,
|
| 349 |
+
23,
|
| 350 |
+
24,
|
| 351 |
+
55,
|
| 352 |
+
56,
|
| 353 |
+
57,
|
| 354 |
+
58,
|
| 355 |
+
59,
|
| 356 |
+
60,
|
| 357 |
+
61,
|
| 358 |
+
62,
|
| 359 |
+
63,
|
| 360 |
+
64,
|
| 361 |
+
65,
|
| 362 |
+
66,
|
| 363 |
+
67,
|
| 364 |
+
68,
|
| 365 |
+
69,
|
| 366 |
+
70,
|
| 367 |
+
71,
|
| 368 |
+
72,
|
| 369 |
+
73,
|
| 370 |
+
74,
|
| 371 |
+
75,
|
| 372 |
+
76,
|
| 373 |
+
77,
|
| 374 |
+
78,
|
| 375 |
+
79,
|
| 376 |
+
80,
|
| 377 |
+
81,
|
| 378 |
+
82,
|
| 379 |
+
83,
|
| 380 |
+
84,
|
| 381 |
+
85,
|
| 382 |
+
86,
|
| 383 |
+
87,
|
| 384 |
+
88,
|
| 385 |
+
89,
|
| 386 |
+
90,
|
| 387 |
+
91,
|
| 388 |
+
92,
|
| 389 |
+
93,
|
| 390 |
+
94,
|
| 391 |
+
95,
|
| 392 |
+
96,
|
| 393 |
+
97,
|
| 394 |
+
98,
|
| 395 |
+
99,
|
| 396 |
+
100,
|
| 397 |
+
101,
|
| 398 |
+
102,
|
| 399 |
+
103,
|
| 400 |
+
104,
|
| 401 |
+
105,
|
| 402 |
+
106,
|
| 403 |
+
107,
|
| 404 |
+
108,
|
| 405 |
+
109,
|
| 406 |
+
110,
|
| 407 |
+
111,
|
| 408 |
+
112,
|
| 409 |
+
113,
|
| 410 |
+
114,
|
| 411 |
+
115,
|
| 412 |
+
116,
|
| 413 |
+
117,
|
| 414 |
+
118,
|
| 415 |
+
119,
|
| 416 |
+
120,
|
| 417 |
+
121,
|
| 418 |
+
122,
|
| 419 |
+
123,
|
| 420 |
+
125,
|
| 421 |
+
126,
|
| 422 |
+
134,
|
| 423 |
+
136,
|
| 424 |
+
137,
|
| 425 |
+
])
|
| 426 |
+
part_indices["face"] = np.array([
|
| 427 |
+
55,
|
| 428 |
+
56,
|
| 429 |
+
57,
|
| 430 |
+
58,
|
| 431 |
+
59,
|
| 432 |
+
60,
|
| 433 |
+
61,
|
| 434 |
+
62,
|
| 435 |
+
63,
|
| 436 |
+
64,
|
| 437 |
+
65,
|
| 438 |
+
66,
|
| 439 |
+
67,
|
| 440 |
+
68,
|
| 441 |
+
69,
|
| 442 |
+
70,
|
| 443 |
+
71,
|
| 444 |
+
72,
|
| 445 |
+
73,
|
| 446 |
+
74,
|
| 447 |
+
75,
|
| 448 |
+
76,
|
| 449 |
+
77,
|
| 450 |
+
78,
|
| 451 |
+
79,
|
| 452 |
+
80,
|
| 453 |
+
81,
|
| 454 |
+
82,
|
| 455 |
+
83,
|
| 456 |
+
84,
|
| 457 |
+
85,
|
| 458 |
+
86,
|
| 459 |
+
87,
|
| 460 |
+
88,
|
| 461 |
+
89,
|
| 462 |
+
90,
|
| 463 |
+
91,
|
| 464 |
+
92,
|
| 465 |
+
93,
|
| 466 |
+
94,
|
| 467 |
+
95,
|
| 468 |
+
96,
|
| 469 |
+
97,
|
| 470 |
+
98,
|
| 471 |
+
99,
|
| 472 |
+
100,
|
| 473 |
+
101,
|
| 474 |
+
102,
|
| 475 |
+
103,
|
| 476 |
+
104,
|
| 477 |
+
105,
|
| 478 |
+
106,
|
| 479 |
+
107,
|
| 480 |
+
108,
|
| 481 |
+
109,
|
| 482 |
+
110,
|
| 483 |
+
111,
|
| 484 |
+
112,
|
| 485 |
+
113,
|
| 486 |
+
114,
|
| 487 |
+
115,
|
| 488 |
+
116,
|
| 489 |
+
117,
|
| 490 |
+
118,
|
| 491 |
+
119,
|
| 492 |
+
120,
|
| 493 |
+
121,
|
| 494 |
+
122,
|
| 495 |
+
])
|
| 496 |
+
part_indices["upper"] = np.array([
|
| 497 |
+
12,
|
| 498 |
+
13,
|
| 499 |
+
14,
|
| 500 |
+
55,
|
| 501 |
+
56,
|
| 502 |
+
57,
|
| 503 |
+
58,
|
| 504 |
+
59,
|
| 505 |
+
60,
|
| 506 |
+
61,
|
| 507 |
+
62,
|
| 508 |
+
63,
|
| 509 |
+
64,
|
| 510 |
+
65,
|
| 511 |
+
66,
|
| 512 |
+
67,
|
| 513 |
+
68,
|
| 514 |
+
69,
|
| 515 |
+
70,
|
| 516 |
+
71,
|
| 517 |
+
72,
|
| 518 |
+
73,
|
| 519 |
+
74,
|
| 520 |
+
75,
|
| 521 |
+
76,
|
| 522 |
+
77,
|
| 523 |
+
78,
|
| 524 |
+
79,
|
| 525 |
+
80,
|
| 526 |
+
81,
|
| 527 |
+
82,
|
| 528 |
+
83,
|
| 529 |
+
84,
|
| 530 |
+
85,
|
| 531 |
+
86,
|
| 532 |
+
87,
|
| 533 |
+
88,
|
| 534 |
+
89,
|
| 535 |
+
90,
|
| 536 |
+
91,
|
| 537 |
+
92,
|
| 538 |
+
93,
|
| 539 |
+
94,
|
| 540 |
+
95,
|
| 541 |
+
96,
|
| 542 |
+
97,
|
| 543 |
+
98,
|
| 544 |
+
99,
|
| 545 |
+
100,
|
| 546 |
+
101,
|
| 547 |
+
102,
|
| 548 |
+
103,
|
| 549 |
+
104,
|
| 550 |
+
105,
|
| 551 |
+
106,
|
| 552 |
+
107,
|
| 553 |
+
108,
|
| 554 |
+
109,
|
| 555 |
+
110,
|
| 556 |
+
111,
|
| 557 |
+
112,
|
| 558 |
+
113,
|
| 559 |
+
114,
|
| 560 |
+
115,
|
| 561 |
+
116,
|
| 562 |
+
117,
|
| 563 |
+
118,
|
| 564 |
+
119,
|
| 565 |
+
120,
|
| 566 |
+
121,
|
| 567 |
+
122,
|
| 568 |
+
])
|
| 569 |
+
part_indices["hand"] = np.array([
|
| 570 |
+
20,
|
| 571 |
+
21,
|
| 572 |
+
25,
|
| 573 |
+
26,
|
| 574 |
+
27,
|
| 575 |
+
28,
|
| 576 |
+
29,
|
| 577 |
+
30,
|
| 578 |
+
31,
|
| 579 |
+
32,
|
| 580 |
+
33,
|
| 581 |
+
34,
|
| 582 |
+
35,
|
| 583 |
+
36,
|
| 584 |
+
37,
|
| 585 |
+
38,
|
| 586 |
+
39,
|
| 587 |
+
40,
|
| 588 |
+
41,
|
| 589 |
+
42,
|
| 590 |
+
43,
|
| 591 |
+
44,
|
| 592 |
+
45,
|
| 593 |
+
46,
|
| 594 |
+
47,
|
| 595 |
+
48,
|
| 596 |
+
49,
|
| 597 |
+
50,
|
| 598 |
+
51,
|
| 599 |
+
52,
|
| 600 |
+
53,
|
| 601 |
+
54,
|
| 602 |
+
128,
|
| 603 |
+
129,
|
| 604 |
+
130,
|
| 605 |
+
131,
|
| 606 |
+
133,
|
| 607 |
+
139,
|
| 608 |
+
140,
|
| 609 |
+
141,
|
| 610 |
+
142,
|
| 611 |
+
144,
|
| 612 |
+
])
|
| 613 |
+
part_indices["left_hand"] = np.array([
|
| 614 |
+
20,
|
| 615 |
+
25,
|
| 616 |
+
26,
|
| 617 |
+
27,
|
| 618 |
+
28,
|
| 619 |
+
29,
|
| 620 |
+
30,
|
| 621 |
+
31,
|
| 622 |
+
32,
|
| 623 |
+
33,
|
| 624 |
+
34,
|
| 625 |
+
35,
|
| 626 |
+
36,
|
| 627 |
+
37,
|
| 628 |
+
38,
|
| 629 |
+
39,
|
| 630 |
+
128,
|
| 631 |
+
129,
|
| 632 |
+
130,
|
| 633 |
+
131,
|
| 634 |
+
133,
|
| 635 |
+
])
|
| 636 |
+
part_indices["right_hand"] = np.array([
|
| 637 |
+
21,
|
| 638 |
+
40,
|
| 639 |
+
41,
|
| 640 |
+
42,
|
| 641 |
+
43,
|
| 642 |
+
44,
|
| 643 |
+
45,
|
| 644 |
+
46,
|
| 645 |
+
47,
|
| 646 |
+
48,
|
| 647 |
+
49,
|
| 648 |
+
50,
|
| 649 |
+
51,
|
| 650 |
+
52,
|
| 651 |
+
53,
|
| 652 |
+
54,
|
| 653 |
+
139,
|
| 654 |
+
140,
|
| 655 |
+
141,
|
| 656 |
+
142,
|
| 657 |
+
144,
|
| 658 |
+
])
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 659 |
# kinematic tree
|
| 660 |
head_kin_chain = [15, 12, 9, 6, 3, 0]
|
| 661 |
|
lib/pixielib/models/encoders.py
CHANGED
|
@@ -1,6 +1,6 @@
|
|
| 1 |
import numpy as np
|
| 2 |
-
import torch.nn as nn
|
| 3 |
import torch
|
|
|
|
| 4 |
import torch.nn.functional as F
|
| 5 |
|
| 6 |
|
|
|
|
| 1 |
import numpy as np
|
|
|
|
| 2 |
import torch
|
| 3 |
+
import torch.nn as nn
|
| 4 |
import torch.nn.functional as F
|
| 5 |
|
| 6 |
|