xinjie.wang commited on
Commit
32ccac0
·
1 Parent(s): 3b7b0b9
app.py CHANGED
@@ -21,20 +21,18 @@ os.environ["GRADIO_APP"] = "textto3d"
21
 
22
 
23
  import gradio as gr
 
24
  from common import (
25
  MAX_SEED,
26
  VERSION,
27
  active_btn_by_text_content,
28
- custom_theme,
29
  end_session,
30
  extract_3d_representations_v2,
31
  extract_urdf,
32
  get_cached_image,
33
  get_seed,
34
  get_selected_image,
35
- image_css,
36
  image_to_3d,
37
- lighting_css,
38
  start_session,
39
  text2image_fn,
40
  )
 
21
 
22
 
23
  import gradio as gr
24
+ from app_style import custom_theme, image_css, lighting_css
25
  from common import (
26
  MAX_SEED,
27
  VERSION,
28
  active_btn_by_text_content,
 
29
  end_session,
30
  extract_3d_representations_v2,
31
  extract_urdf,
32
  get_cached_image,
33
  get_seed,
34
  get_selected_image,
 
35
  image_to_3d,
 
36
  start_session,
37
  text2image_fn,
38
  )
app_style.py ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from gradio.themes import Soft
2
+ from gradio.themes.utils.colors import gray, neutral, slate, stone, teal, zinc
3
+
4
+ lighting_css = """
5
+ <style>
6
+ #lighter_mesh canvas {
7
+ filter: brightness(1.9) !important;
8
+ }
9
+ </style>
10
+ """
11
+
12
+ image_css = """
13
+ <style>
14
+ .image_fit .image-frame {
15
+ object-fit: contain !important;
16
+ height: 100% !important;
17
+ }
18
+ </style>
19
+ """
20
+
21
+ custom_theme = Soft(
22
+ primary_hue=stone,
23
+ secondary_hue=gray,
24
+ radius_size="md",
25
+ text_size="sm",
26
+ spacing_size="sm",
27
+ )
common.py CHANGED
@@ -30,8 +30,6 @@ import torch
30
  import torch.nn.functional as F
31
  import trimesh
32
  from easydict import EasyDict as edict
33
- from gradio.themes import Soft
34
- from gradio.themes.utils.colors import gray, neutral, slate, stone, teal, zinc
35
  from PIL import Image
36
  from embodied_gen.data.backproject_v2 import entrypoint as backproject_api
37
  from embodied_gen.data.differentiable_render import entrypoint as render_api
@@ -151,6 +149,7 @@ if os.getenv("GRADIO_APP") == "imageto3d":
151
  TMP_DIR = os.path.join(
152
  os.path.dirname(os.path.abspath(__file__)), "sessions/imageto3d"
153
  )
 
154
  elif os.getenv("GRADIO_APP") == "textto3d":
155
  RBG_REMOVER = RembgRemover()
156
  RBG14_REMOVER = BMGG14Remover()
@@ -168,6 +167,7 @@ elif os.getenv("GRADIO_APP") == "textto3d":
168
  TMP_DIR = os.path.join(
169
  os.path.dirname(os.path.abspath(__file__)), "sessions/textto3d"
170
  )
 
171
  elif os.getenv("GRADIO_APP") == "texture_edit":
172
  PIPELINE_IP = build_texture_gen_pipe(
173
  base_ckpt_dir="./weights",
@@ -182,34 +182,7 @@ elif os.getenv("GRADIO_APP") == "texture_edit":
182
  TMP_DIR = os.path.join(
183
  os.path.dirname(os.path.abspath(__file__)), "sessions/texture_edit"
184
  )
185
-
186
- os.makedirs(TMP_DIR, exist_ok=True)
187
-
188
-
189
- lighting_css = """
190
- <style>
191
- #lighter_mesh canvas {
192
- filter: brightness(1.9) !important;
193
- }
194
- </style>
195
- """
196
-
197
- image_css = """
198
- <style>
199
- .image_fit .image-frame {
200
- object-fit: contain !important;
201
- height: 100% !important;
202
- }
203
- </style>
204
- """
205
-
206
- custom_theme = Soft(
207
- primary_hue=stone,
208
- secondary_hue=gray,
209
- radius_size="md",
210
- text_size="sm",
211
- spacing_size="sm",
212
- )
213
 
214
 
215
  def start_session(req: gr.Request) -> None:
 
30
  import torch.nn.functional as F
31
  import trimesh
32
  from easydict import EasyDict as edict
 
 
33
  from PIL import Image
34
  from embodied_gen.data.backproject_v2 import entrypoint as backproject_api
35
  from embodied_gen.data.differentiable_render import entrypoint as render_api
 
149
  TMP_DIR = os.path.join(
150
  os.path.dirname(os.path.abspath(__file__)), "sessions/imageto3d"
151
  )
152
+ os.makedirs(TMP_DIR, exist_ok=True)
153
  elif os.getenv("GRADIO_APP") == "textto3d":
154
  RBG_REMOVER = RembgRemover()
155
  RBG14_REMOVER = BMGG14Remover()
 
167
  TMP_DIR = os.path.join(
168
  os.path.dirname(os.path.abspath(__file__)), "sessions/textto3d"
169
  )
170
+ os.makedirs(TMP_DIR, exist_ok=True)
171
  elif os.getenv("GRADIO_APP") == "texture_edit":
172
  PIPELINE_IP = build_texture_gen_pipe(
173
  base_ckpt_dir="./weights",
 
182
  TMP_DIR = os.path.join(
183
  os.path.dirname(os.path.abspath(__file__)), "sessions/texture_edit"
184
  )
185
+ os.makedirs(TMP_DIR, exist_ok=True)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
186
 
187
 
188
  def start_session(req: gr.Request) -> None:
embodied_gen/data/asset_converter.py CHANGED
@@ -5,6 +5,7 @@ import os
5
  import xml.etree.ElementTree as ET
6
  from abc import ABC, abstractmethod
7
  from dataclasses import dataclass
 
8
  from shutil import copy
9
 
10
  import trimesh
@@ -145,18 +146,20 @@ class MeshtoMJCFConverter(AssetConverterBase):
145
  texture=f"texture_{name}",
146
  reflectance=str(reflectance),
147
  )
148
- ET.SubElement(
149
- mujoco_element,
150
- "texture",
151
- name=f"texture_{name}",
152
- type="2d",
153
- file=f"{dirname}/material_0.png",
154
- )
155
 
156
- self._copy_asset_file(
157
- f"{input_dir}/{dirname}/material_0.png",
158
- f"{output_dir}/{dirname}/material_0.png",
159
- )
 
 
 
 
 
 
 
 
 
160
 
161
  return material
162
 
@@ -213,6 +216,163 @@ class MeshtoMJCFConverter(AssetConverterBase):
213
  logger.info(f"Successfully converted {urdf_path} → {mjcf_path}")
214
 
215
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
216
  class MeshtoUSDConverter(AssetConverterBase):
217
  """Convert Mesh file from URDF into USD format."""
218
 
@@ -455,34 +615,34 @@ class AssetConverterFactory:
455
 
456
 
457
  if __name__ == "__main__":
458
- # target_asset_type = AssetType.MJCF
459
- target_asset_type = AssetType.USD
460
-
461
- urdf_paths = [
462
- "outputs/embodiedgen_assets/demo_assets/remote_control/result/remote_control.urdf",
463
- ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
464
 
465
- if target_asset_type == AssetType.MJCF:
466
- output_files = [
467
- "outputs/embodiedgen_assets/demo_assets/remote_control/mjcf/remote_control.mjcf",
468
- ]
469
- asset_converter = AssetConverterFactory.create(
470
- target_type=AssetType.MJCF,
471
- source_type=AssetType.URDF,
472
- )
473
-
474
- elif target_asset_type == AssetType.USD:
475
- output_files = [
476
- "outputs/embodiedgen_assets/demo_assets/remote_control/usd/remote_control.usd",
477
- ]
478
- asset_converter = AssetConverterFactory.create(
479
- target_type=AssetType.USD,
480
- source_type=AssetType.MESH,
481
- )
482
-
483
- with asset_converter:
484
- for urdf_path, output_file in zip(urdf_paths, output_files):
485
- asset_converter.convert(urdf_path, output_file)
486
 
487
  # urdf_path = "outputs/embodiedgen_assets/demo_assets/remote_control/result/remote_control.urdf"
488
  # output_file = "outputs/embodiedgen_assets/demo_assets/remote_control/usd/remote_control.usd"
@@ -495,3 +655,9 @@ if __name__ == "__main__":
495
 
496
  # with asset_converter:
497
  # asset_converter.convert(urdf_path, output_file)
 
 
 
 
 
 
 
5
  import xml.etree.ElementTree as ET
6
  from abc import ABC, abstractmethod
7
  from dataclasses import dataclass
8
+ from glob import glob
9
  from shutil import copy
10
 
11
  import trimesh
 
146
  texture=f"texture_{name}",
147
  reflectance=str(reflectance),
148
  )
 
 
 
 
 
 
 
149
 
150
+ for path in glob(f"{input_dir}/{dirname}/*.png"):
151
+ file_name = os.path.basename(path)
152
+ self._copy_asset_file(
153
+ path,
154
+ f"{output_dir}/{dirname}/{file_name}",
155
+ )
156
+ ET.SubElement(
157
+ mujoco_element,
158
+ "texture",
159
+ name=f"texture_{name}_{os.path.splitext(file_name)[0]}",
160
+ type="2d",
161
+ file=f"{dirname}/{file_name}",
162
+ )
163
 
164
  return material
165
 
 
216
  logger.info(f"Successfully converted {urdf_path} → {mjcf_path}")
217
 
218
 
219
+ class URDFtoMJCFConverter(MeshtoMJCFConverter):
220
+ """Convert URDF files with joints to MJCF format, handling transformations from joints."""
221
+
222
+ def add_materials(
223
+ self,
224
+ mujoco_element: ET.Element,
225
+ link: ET.Element,
226
+ tag: str,
227
+ input_dir: str,
228
+ output_dir: str,
229
+ name: str,
230
+ reflectance: float = 0.2,
231
+ ) -> ET.Element:
232
+ """Add materials to the MJCF asset from the URDF link."""
233
+ element = link.find(tag)
234
+ geometry = element.find("geometry")
235
+ mesh = geometry.find("mesh")
236
+ filename = mesh.get("filename")
237
+ dirname = os.path.dirname(filename)
238
+
239
+ diffuse_texture = None
240
+ for path in glob(f"{input_dir}/{dirname}/*.png"):
241
+ file_name = os.path.basename(path)
242
+ self._copy_asset_file(
243
+ path,
244
+ f"{output_dir}/{dirname}/{file_name}",
245
+ )
246
+ texture_name = f"texture_{name}_{os.path.splitext(file_name)[0]}"
247
+ ET.SubElement(
248
+ mujoco_element,
249
+ "texture",
250
+ name=texture_name,
251
+ type="2d",
252
+ file=f"{dirname}/{file_name}",
253
+ )
254
+ if "diffuse" in file_name.lower():
255
+ diffuse_texture = texture_name
256
+
257
+ if diffuse_texture is None:
258
+ return None
259
+
260
+ material = ET.SubElement(
261
+ mujoco_element,
262
+ "material",
263
+ name=f"material_{name}",
264
+ texture=diffuse_texture,
265
+ reflectance=str(reflectance),
266
+ )
267
+
268
+ return material
269
+
270
+ def convert(self, urdf_path: str, mjcf_path: str, **kwargs) -> str:
271
+ """Convert a URDF file with joints to MJCF format."""
272
+ tree = ET.parse(urdf_path)
273
+ root = tree.getroot()
274
+
275
+ mujoco_struct = ET.Element("mujoco")
276
+ mujoco_struct.set("model", root.get("name"))
277
+ mujoco_asset = ET.SubElement(mujoco_struct, "asset")
278
+ mujoco_worldbody = ET.SubElement(mujoco_struct, "worldbody")
279
+
280
+ input_dir = os.path.dirname(urdf_path)
281
+ output_dir = os.path.dirname(mjcf_path)
282
+ os.makedirs(output_dir, exist_ok=True)
283
+
284
+ # Create a dictionary to store body elements for each link
285
+ body_dict = {}
286
+
287
+ # Process all links first
288
+ for idx, link in enumerate(root.findall("link")):
289
+ link_name = link.get("name", f"unnamed_link_{idx}")
290
+ body = ET.SubElement(mujoco_worldbody, "body", name=link_name)
291
+ body_dict[link_name] = body
292
+
293
+ # Add materials and geometry
294
+ visual_element = link.find("visual")
295
+ if visual_element is not None:
296
+ material = self.add_materials(
297
+ mujoco_asset,
298
+ link,
299
+ "visual",
300
+ input_dir,
301
+ output_dir,
302
+ name=str(idx),
303
+ )
304
+ self.add_geometry(
305
+ mujoco_asset,
306
+ link,
307
+ body,
308
+ "visual",
309
+ input_dir,
310
+ output_dir,
311
+ f"visual_mesh_{idx}",
312
+ material,
313
+ )
314
+
315
+ collision_element = link.find("collision")
316
+ if collision_element is not None:
317
+ self.add_geometry(
318
+ mujoco_asset,
319
+ link,
320
+ body,
321
+ "collision",
322
+ input_dir,
323
+ output_dir,
324
+ f"collision_mesh_{idx}",
325
+ is_collision=True,
326
+ )
327
+
328
+ # Process joints to set transformations and hierarchy
329
+ for joint in root.findall("joint"):
330
+ joint_type = joint.get("type")
331
+ if joint_type != "fixed":
332
+ logger.warning(
333
+ f"Skipping non-fixed joint: {joint.get('name')}"
334
+ )
335
+ continue
336
+
337
+ parent_link = joint.find("parent").get("link")
338
+ child_link = joint.find("child").get("link")
339
+ origin = joint.find("origin")
340
+
341
+ if parent_link not in body_dict or child_link not in body_dict:
342
+ logger.warning(
343
+ f"Parent or child link not found for joint: {joint.get('name')}"
344
+ )
345
+ continue
346
+
347
+ # Move child body under parent body in MJCF hierarchy
348
+ child_body = body_dict[child_link]
349
+ mujoco_worldbody.remove(child_body)
350
+ parent_body = body_dict[parent_link]
351
+ parent_body.append(child_body)
352
+
353
+ # Apply joint origin transformation to child body
354
+ if origin is not None:
355
+ xyz = origin.get("xyz", "0 0 0")
356
+ rpy = origin.get("rpy", "0 0 0")
357
+ child_body.set("pos", xyz)
358
+ # Convert rpy to MJCF euler format (degrees)
359
+ rpy_floats = list(map(float, rpy.split()))
360
+ rotation = Rotation.from_euler(
361
+ "xyz", rpy_floats, degrees=False
362
+ )
363
+ euler_deg = rotation.as_euler("xyz", degrees=True)
364
+ child_body.set(
365
+ "euler", f"{euler_deg[0]} {euler_deg[1]} {euler_deg[2]}"
366
+ )
367
+
368
+ tree = ET.ElementTree(mujoco_struct)
369
+ ET.indent(tree, space=" ", level=0)
370
+ tree.write(mjcf_path, encoding="utf-8", xml_declaration=True)
371
+ logger.info(f"Successfully converted {urdf_path} → {mjcf_path}")
372
+
373
+ return mjcf_path
374
+
375
+
376
  class MeshtoUSDConverter(AssetConverterBase):
377
  """Convert Mesh file from URDF into USD format."""
378
 
 
615
 
616
 
617
  if __name__ == "__main__":
618
+ # # target_asset_type = AssetType.MJCF
619
+ # target_asset_type = AssetType.USD
620
+
621
+ # urdf_paths = [
622
+ # "outputs/embodiedgen_assets/demo_assets/remote_control/result/remote_control.urdf",
623
+ # ]
624
+
625
+ # if target_asset_type == AssetType.MJCF:
626
+ # output_files = [
627
+ # "outputs/embodiedgen_assets/demo_assets/remote_control/mjcf/remote_control.mjcf",
628
+ # ]
629
+ # asset_converter = AssetConverterFactory.create(
630
+ # target_type=AssetType.MJCF,
631
+ # source_type=AssetType.URDF,
632
+ # )
633
+
634
+ # elif target_asset_type == AssetType.USD:
635
+ # output_files = [
636
+ # "outputs/embodiedgen_assets/demo_assets/remote_control/usd/remote_control.usd",
637
+ # ]
638
+ # asset_converter = AssetConverterFactory.create(
639
+ # target_type=AssetType.USD,
640
+ # source_type=AssetType.MESH,
641
+ # )
642
 
643
+ # with asset_converter:
644
+ # for urdf_path, output_file in zip(urdf_paths, output_files):
645
+ # asset_converter.convert(urdf_path, output_file)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
646
 
647
  # urdf_path = "outputs/embodiedgen_assets/demo_assets/remote_control/result/remote_control.urdf"
648
  # output_file = "outputs/embodiedgen_assets/demo_assets/remote_control/usd/remote_control.usd"
 
655
 
656
  # with asset_converter:
657
  # asset_converter.convert(urdf_path, output_file)
658
+
659
+ urdf_path = "/home/users/xinjie.wang/xinjie/infinigen/outputs/exports/kitchen_simple_solve_nos_i_urdf/export_scene/scene.urdf"
660
+ output_file = "/home/users/xinjie.wang/xinjie/infinigen/outputs/exports/kitchen_simple_solve_nos_i_urdf/mjcf/scene.urdf"
661
+ asset_converter = URDFtoMJCFConverter()
662
+ with asset_converter:
663
+ asset_converter.convert(urdf_path, output_file)
embodied_gen/data/convex_decomposer.py CHANGED
@@ -27,7 +27,7 @@ logger = logging.getLogger(__name__)
27
  __all__ = [
28
  "decompose_convex_coacd",
29
  "decompose_convex_mesh",
30
- "decompose_convex_process",
31
  ]
32
 
33
 
@@ -37,6 +37,7 @@ def decompose_convex_coacd(
37
  params: dict,
38
  verbose: bool = False,
39
  auto_scale: bool = True,
 
40
  ) -> None:
41
  coacd.set_log_level("info" if verbose else "warn")
42
 
@@ -44,14 +45,22 @@ def decompose_convex_coacd(
44
  mesh = coacd.Mesh(mesh.vertices, mesh.faces)
45
 
46
  result = coacd.run_coacd(mesh, **params)
47
- combined = sum([trimesh.Trimesh(*m) for m in result])
 
 
 
48
 
49
  # Compute collision_scale because convex decomposition usually makes the mesh larger.
50
  if auto_scale:
51
- convex_mesh_shape = np.ptp(combined.vertices, axis=0)
 
52
  visual_mesh_shape = np.ptp(mesh.vertices, axis=0)
53
- rescale = visual_mesh_shape / convex_mesh_shape
54
- combined.vertices *= rescale
 
 
 
 
55
 
56
  combined.export(outfile)
57
 
@@ -71,6 +80,7 @@ def decompose_convex_mesh(
71
  merge: bool = True,
72
  seed: int = 0,
73
  auto_scale: bool = True,
 
74
  verbose: bool = False,
75
  ) -> str:
76
  """Decompose a mesh into convex parts using the CoACD algorithm."""
@@ -95,7 +105,9 @@ def decompose_convex_mesh(
95
  )
96
 
97
  try:
98
- decompose_convex_coacd(filename, outfile, params, verbose, auto_scale)
 
 
99
  if os.path.exists(outfile):
100
  return outfile
101
  except Exception as e:
@@ -106,7 +118,7 @@ def decompose_convex_mesh(
106
  try:
107
  params["preprocess_mode"] = "on"
108
  decompose_convex_coacd(
109
- filename, outfile, params, verbose, auto_scale
110
  )
111
  if os.path.exists(outfile):
112
  return outfile
 
27
  __all__ = [
28
  "decompose_convex_coacd",
29
  "decompose_convex_mesh",
30
+ "decompose_convex_mp",
31
  ]
32
 
33
 
 
37
  params: dict,
38
  verbose: bool = False,
39
  auto_scale: bool = True,
40
+ scale_factor: float = 1.0,
41
  ) -> None:
42
  coacd.set_log_level("info" if verbose else "warn")
43
 
 
45
  mesh = coacd.Mesh(mesh.vertices, mesh.faces)
46
 
47
  result = coacd.run_coacd(mesh, **params)
48
+
49
+ meshes = []
50
+ for v, f in result:
51
+ meshes.append(trimesh.Trimesh(v, f))
52
 
53
  # Compute collision_scale because convex decomposition usually makes the mesh larger.
54
  if auto_scale:
55
+ all_mesh = sum([trimesh.Trimesh(*m) for m in result])
56
+ convex_mesh_shape = np.ptp(all_mesh.vertices, axis=0)
57
  visual_mesh_shape = np.ptp(mesh.vertices, axis=0)
58
+ scale_factor *= visual_mesh_shape / convex_mesh_shape
59
+
60
+ combined = trimesh.Scene()
61
+ for mesh_part in meshes:
62
+ mesh_part.vertices *= scale_factor
63
+ combined.add_geometry(mesh_part)
64
 
65
  combined.export(outfile)
66
 
 
80
  merge: bool = True,
81
  seed: int = 0,
82
  auto_scale: bool = True,
83
+ scale_factor: float = 1.005,
84
  verbose: bool = False,
85
  ) -> str:
86
  """Decompose a mesh into convex parts using the CoACD algorithm."""
 
105
  )
106
 
107
  try:
108
+ decompose_convex_coacd(
109
+ filename, outfile, params, verbose, auto_scale, scale_factor
110
+ )
111
  if os.path.exists(outfile):
112
  return outfile
113
  except Exception as e:
 
118
  try:
119
  params["preprocess_mode"] = "on"
120
  decompose_convex_coacd(
121
+ filename, outfile, params, verbose, auto_scale, scale_factor
122
  )
123
  if os.path.exists(outfile):
124
  return outfile
embodied_gen/data/mesh_operator.py CHANGED
@@ -403,6 +403,7 @@ class MeshFixer(object):
403
  )
404
  mesh.clean(inplace=True)
405
  mesh.clear_data()
 
406
  mesh = mesh.decimate(ratio, progress_bar=True)
407
 
408
  # Update vertices and faces
 
403
  )
404
  mesh.clean(inplace=True)
405
  mesh.clear_data()
406
+ mesh = mesh.triangulate()
407
  mesh = mesh.decimate(ratio, progress_bar=True)
408
 
409
  # Update vertices and faces
embodied_gen/envs/pick_embodiedgen.py CHANGED
@@ -74,7 +74,9 @@ class PickEmbodiedGen(BaseEnv):
74
  layout_file = kwargs.pop("layout_file", None)
75
  replace_objs = kwargs.pop("replace_objs", True)
76
  self.enable_grasp = kwargs.pop("enable_grasp", False)
77
- self.init_quat = kwargs.pop("init_quat", [0.7071, 0, 0, 0.7071])
 
 
78
  # Add small offset in z-axis to avoid collision.
79
  self.objs_z_offset = kwargs.pop("objs_z_offset", 0.002)
80
  self.robot_z_offset = kwargs.pop("robot_z_offset", 0.002)
@@ -107,7 +109,7 @@ class PickEmbodiedGen(BaseEnv):
107
  self.bg_images = dict()
108
  if self.render_mode == "hybrid":
109
  self.bg_images = self.render_gs3d_images(
110
- self.layouts, num_envs, self.init_quat
111
  )
112
 
113
  @staticmethod
 
74
  layout_file = kwargs.pop("layout_file", None)
75
  replace_objs = kwargs.pop("replace_objs", True)
76
  self.enable_grasp = kwargs.pop("enable_grasp", False)
77
+ self.init_3dgs_quat = kwargs.pop(
78
+ "init_3dgs_quat", [0.7071, 0, 0, 0.7071]
79
+ )
80
  # Add small offset in z-axis to avoid collision.
81
  self.objs_z_offset = kwargs.pop("objs_z_offset", 0.002)
82
  self.robot_z_offset = kwargs.pop("robot_z_offset", 0.002)
 
109
  self.bg_images = dict()
110
  if self.render_mode == "hybrid":
111
  self.bg_images = self.render_gs3d_images(
112
+ self.layouts, num_envs, self.init_3dgs_quat
113
  )
114
 
115
  @staticmethod
embodied_gen/models/layout.py CHANGED
@@ -77,10 +77,11 @@ LAYOUT_DISASSEMBLE_PROMPT = f"""
77
  - {Scene3DItemEnum.MANIPULATED_OBJS} and {Scene3DItemEnum.DISTRACTOR_OBJS} must be common
78
  household or office items or furniture, not abstract concepts, not too small like needle.
79
  - If the input includes a plural or grouped object (e.g., "pens", "bottles", "plates", "fruit"),
80
- you must decompose it into multiple individual instances (e.g., ["pen", "pen"], ["apple", "pear"]).
81
  - Containers that hold objects (e.g., "bowl of apples", "box of tools") must
82
- be separated into individual items (e.g., ["bowl", "apple", "apple"]).
83
  - Do not include transparent objects such as "glass", "plastic", etc.
 
84
  - The output must be in compact JSON format and use Markdown syntax, just like the output in the example below.
85
 
86
  Examples:
@@ -170,7 +171,7 @@ LAYOUT_DISASSEMBLE_PROMPT = f"""
170
  "robot": "franka",
171
  "background": "office",
172
  "context": "table",
173
- "manipulated_objs": ["pen", "pen", "grey bowl"],
174
  "distractor_objs": ["notepad", "cup"]
175
  }}
176
  ```
 
77
  - {Scene3DItemEnum.MANIPULATED_OBJS} and {Scene3DItemEnum.DISTRACTOR_OBJS} must be common
78
  household or office items or furniture, not abstract concepts, not too small like needle.
79
  - If the input includes a plural or grouped object (e.g., "pens", "bottles", "plates", "fruit"),
80
+ you must decompose it into multiple individual instances (e.g., ["pen1", "pen2"], ["apple", "pear"]).
81
  - Containers that hold objects (e.g., "bowl of apples", "box of tools") must
82
+ be separated into individual items (e.g., ["bowl", "apple1", "apple2"]).
83
  - Do not include transparent objects such as "glass", "plastic", etc.
84
+ - All {Scene3DItemEnum.MANIPULATED_OBJS} and {Scene3DItemEnum.DISTRACTOR_OBJS} must be child node of {Scene3DItemEnum.CONTEXT}.
85
  - The output must be in compact JSON format and use Markdown syntax, just like the output in the example below.
86
 
87
  Examples:
 
171
  "robot": "franka",
172
  "background": "office",
173
  "context": "table",
174
+ "manipulated_objs": ["pen1", "pen2", "grey bowl"],
175
  "distractor_objs": ["notepad", "cup"]
176
  }}
177
  ```
embodied_gen/scripts/compose_layout.py CHANGED
@@ -16,6 +16,7 @@
16
 
17
  import json
18
  import os
 
19
  from dataclasses import dataclass
20
 
21
  import tyro
@@ -51,6 +52,14 @@ def entrypoint(**kwargs):
51
  out_layout_path = f"{output_dir}/layout.json"
52
 
53
  layout_info = bfs_placement(args.layout_path, seed=args.seed)
 
 
 
 
 
 
 
 
54
  with open(out_layout_path, "w") as f:
55
  json.dump(layout_info.to_dict(), f, indent=4)
56
 
 
16
 
17
  import json
18
  import os
19
+ import shutil
20
  from dataclasses import dataclass
21
 
22
  import tyro
 
52
  out_layout_path = f"{output_dir}/layout.json"
53
 
54
  layout_info = bfs_placement(args.layout_path, seed=args.seed)
55
+ origin_dir = os.path.dirname(args.layout_path)
56
+ for key in layout_info.assets:
57
+ src = f"{origin_dir}/{layout_info.assets[key]}"
58
+ dst = f"{output_dir}/{layout_info.assets[key]}"
59
+ if src == dst:
60
+ continue
61
+ shutil.copytree(src, dst, dirs_exist_ok=True)
62
+
63
  with open(out_layout_path, "w") as f:
64
  json.dump(layout_info.to_dict(), f, indent=4)
65
 
embodied_gen/scripts/gen_layout.py CHANGED
@@ -115,7 +115,19 @@ def entrypoint() -> None:
115
  # Background GEN (for efficiency, temp use retrieval instead)
116
  bg_node = layout_info.relation[Scene3DItemEnum.BACKGROUND.value]
117
  text = layout_info.objs_desc[bg_node]
118
- match_key = SCENE_MATCHER.query(text, str(scene_dict))
 
 
 
 
 
 
 
 
 
 
 
 
119
  match_scene_path = f"{os.path.dirname(args.bg_list)}/{match_key}"
120
  bg_save_dir = os.path.join(output_root, "background")
121
  copytree(match_scene_path, bg_save_dir, dirs_exist_ok=True)
@@ -128,7 +140,6 @@ def entrypoint() -> None:
128
 
129
  layout_info = bfs_placement(
130
  layout_path,
131
- limit_reach_range=True if args.insert_robot else False,
132
  seed=args.seed_layout,
133
  )
134
  layout_path = f"{output_root}/layout.json"
 
115
  # Background GEN (for efficiency, temp use retrieval instead)
116
  bg_node = layout_info.relation[Scene3DItemEnum.BACKGROUND.value]
117
  text = layout_info.objs_desc[bg_node]
118
+ match_key = SCENE_MATCHER.query(
119
+ text, str(scene_dict), params=gpt_params
120
+ )
121
+ n_max_attempt = 10
122
+ while match_key not in scene_dict and n_max_attempt > 0:
123
+ logger.error(
124
+ f"Cannot find matched scene {match_key}, retrying left {n_max_attempt}..."
125
+ )
126
+ match_key = SCENE_MATCHER.query(
127
+ text, str(scene_dict), params=gpt_params
128
+ )
129
+ n_max_attempt -= 1
130
+
131
  match_scene_path = f"{os.path.dirname(args.bg_list)}/{match_key}"
132
  bg_save_dir = os.path.join(output_root, "background")
133
  copytree(match_scene_path, bg_save_dir, dirs_exist_ok=True)
 
140
 
141
  layout_info = bfs_placement(
142
  layout_path,
 
143
  seed=args.seed_layout,
144
  )
145
  layout_path = f"{output_root}/layout.json"
embodied_gen/scripts/simulate_sapien.py CHANGED
@@ -49,7 +49,7 @@ class SapienSimConfig:
49
  sim_freq: int = 200
50
  sim_step: int = 400
51
  z_offset: float = 0.004
52
- init_quat: list[float] = field(
53
  default_factory=lambda: [0.7071, 0, 0, 0.7071]
54
  ) # xyzw
55
  device: str = "cuda"
@@ -137,7 +137,7 @@ def entrypoint(**kwargs):
137
  gs_path = f"{asset_root}/{layout_data.assets[bg_node]}/gs_model.ply"
138
  gs_model: GaussianOperator = GaussianOperator.load_from_ply(gs_path)
139
  x, y, z, qx, qy, qz, qw = layout_data.position[bg_node]
140
- qx, qy, qz, qw = quaternion_multiply([qx, qy, qz, qw], cfg.init_quat)
141
  init_pose = torch.tensor([x, y, z, qx, qy, qz, qw])
142
  gs_model = gs_model.get_gaussians(instance_pose=init_pose)
143
 
 
49
  sim_freq: int = 200
50
  sim_step: int = 400
51
  z_offset: float = 0.004
52
+ init_3dgs_quat: list[float] = field(
53
  default_factory=lambda: [0.7071, 0, 0, 0.7071]
54
  ) # xyzw
55
  device: str = "cuda"
 
137
  gs_path = f"{asset_root}/{layout_data.assets[bg_node]}/gs_model.ply"
138
  gs_model: GaussianOperator = GaussianOperator.load_from_ply(gs_path)
139
  x, y, z, qx, qy, qz, qw = layout_data.position[bg_node]
140
+ qx, qy, qz, qw = quaternion_multiply([qx, qy, qz, qw], cfg.init_3dgs_quat)
141
  init_pose = torch.tensor([x, y, z, qx, qy, qz, qw])
142
  gs_model = gs_model.get_gaussians(instance_pose=init_pose)
143
 
embodied_gen/utils/geometry.py CHANGED
@@ -80,7 +80,7 @@ def pose_to_matrix(pose: list[float]) -> np.ndarray:
80
 
81
 
82
  def compute_xy_bbox(
83
- vertices: np.ndarray, col_x: int = 0, col_y: int = 2
84
  ) -> list[float]:
85
  x_vals = vertices[:, col_x]
86
  y_vals = vertices[:, col_y]
@@ -137,13 +137,16 @@ def with_seed(seed_attr_name: str = "seed"):
137
  def compute_convex_hull_path(
138
  vertices: np.ndarray,
139
  z_threshold: float = 0.05,
140
- interp_per_edge: int = 3,
141
  margin: float = -0.02,
 
 
 
142
  ) -> Path:
143
  top_vertices = vertices[
144
- vertices[:, 1] > vertices[:, 1].max() - z_threshold
145
  ]
146
- top_xy = top_vertices[:, [0, 2]]
147
 
148
  if len(top_xy) < 3:
149
  raise ValueError("Not enough points to form a convex hull")
@@ -184,11 +187,11 @@ def all_corners_inside(hull: Path, box: list, threshold: int = 3) -> bool:
184
  def compute_axis_rotation_quat(
185
  axis: Literal["x", "y", "z"], angle_rad: float
186
  ) -> list[float]:
187
- if axis.lower() == 'x':
188
  q = Quaternion(axis=[1, 0, 0], angle=angle_rad)
189
- elif axis.lower() == 'y':
190
  q = Quaternion(axis=[0, 1, 0], angle=angle_rad)
191
- elif axis.lower() == 'z':
192
  q = Quaternion(axis=[0, 0, 1], angle=angle_rad)
193
  else:
194
  raise ValueError(f"Unsupported axis '{axis}', must be one of x, y, z")
@@ -226,12 +229,36 @@ def bfs_placement(
226
  floor_margin: float = 0,
227
  beside_margin: float = 0.1,
228
  max_attempts: int = 3000,
 
229
  rotate_objs: bool = True,
230
  rotate_bg: bool = True,
231
- limit_reach_range: bool = True,
 
 
232
  robot_dim: float = 0.12,
233
  seed: int = None,
234
  ) -> LayoutInfo:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
235
  layout_info = LayoutInfo.from_dict(json.load(open(layout_file, "r")))
236
  asset_dir = os.path.dirname(layout_file)
237
  object_mapping = layout_info.objs_mapping
@@ -259,13 +286,23 @@ def bfs_placement(
259
  mesh_path = os.path.join(asset_dir, mesh_path)
260
  mesh_info[node]["path"] = mesh_path
261
  mesh = trimesh.load(mesh_path)
262
- vertices = mesh.vertices
263
- z1 = np.percentile(vertices[:, 1], 1)
264
- z2 = np.percentile(vertices[:, 1], 99)
 
265
 
266
  if object_mapping[node] == Scene3DItemEnum.CONTEXT.value:
267
  object_quat = [0, 0, 0, 1]
 
 
 
 
 
 
 
 
268
  mesh_info[node]["surface"] = compute_convex_hull_path(vertices)
 
269
  # Put robot in the CONTEXT edge.
270
  x, y = random.choice(mesh_info[node]["surface"].vertices)
271
  theta = np.arctan2(y, x)
@@ -288,9 +325,7 @@ def bfs_placement(
288
  axis="z", angle_rad=angle_rad
289
  )
290
  rotation = R.from_quat(object_quat).as_matrix()
291
- vertices = np.dot(mesh.vertices, rotation.T)
292
- z1 = np.percentile(vertices[:, 1], 1)
293
- z2 = np.percentile(vertices[:, 1], 99)
294
 
295
  x1, x2, y1, y2 = compute_xy_bbox(vertices)
296
  mesh_info[node]["pose"] = [x1, x2, y1, y2, z1, z2, *object_quat]
@@ -343,20 +378,40 @@ def bfs_placement(
343
  continue
344
  # Make sure the manipulated object is reachable by robot.
345
  if (
346
- limit_reach_range
347
  and object_mapping[node]
348
  == Scene3DItemEnum.MANIPULATED_OBJS.value
349
  ):
350
  cx = parent_pos[0] + node_box[0] + obj_dx / 2
351
  cy = parent_pos[1] + node_box[2] + obj_dy / 2
352
  cz = parent_pos[2] + p_z2 - z1
353
- robot_pose = position[robot_node][:3]
354
  if not check_reachable(
355
- base_xyz=np.array(robot_pose),
356
  reach_xyz=np.array([cx, cy, cz]),
 
 
357
  ):
358
  continue
359
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
360
  if not has_iou_conflict(
361
  node_box, placed_boxes_map[parent_node]
362
  ):
 
80
 
81
 
82
  def compute_xy_bbox(
83
+ vertices: np.ndarray, col_x: int = 0, col_y: int = 1
84
  ) -> list[float]:
85
  x_vals = vertices[:, col_x]
86
  y_vals = vertices[:, col_y]
 
137
  def compute_convex_hull_path(
138
  vertices: np.ndarray,
139
  z_threshold: float = 0.05,
140
+ interp_per_edge: int = 10,
141
  margin: float = -0.02,
142
+ x_axis: int = 0,
143
+ y_axis: int = 1,
144
+ z_axis: int = 2,
145
  ) -> Path:
146
  top_vertices = vertices[
147
+ vertices[:, z_axis] > vertices[:, z_axis].max() - z_threshold
148
  ]
149
+ top_xy = top_vertices[:, [x_axis, y_axis]]
150
 
151
  if len(top_xy) < 3:
152
  raise ValueError("Not enough points to form a convex hull")
 
187
  def compute_axis_rotation_quat(
188
  axis: Literal["x", "y", "z"], angle_rad: float
189
  ) -> list[float]:
190
+ if axis.lower() == "x":
191
  q = Quaternion(axis=[1, 0, 0], angle=angle_rad)
192
+ elif axis.lower() == "y":
193
  q = Quaternion(axis=[0, 1, 0], angle=angle_rad)
194
+ elif axis.lower() == "z":
195
  q = Quaternion(axis=[0, 0, 1], angle=angle_rad)
196
  else:
197
  raise ValueError(f"Unsupported axis '{axis}', must be one of x, y, z")
 
229
  floor_margin: float = 0,
230
  beside_margin: float = 0.1,
231
  max_attempts: int = 3000,
232
+ init_rpy: tuple = (1.5708, 0.0, 0.0),
233
  rotate_objs: bool = True,
234
  rotate_bg: bool = True,
235
+ rotate_context: bool = True,
236
+ limit_reach_range: tuple[float, float] | None = (0.20, 0.85),
237
+ max_orient_diff: float | None = 60,
238
  robot_dim: float = 0.12,
239
  seed: int = None,
240
  ) -> LayoutInfo:
241
+ """Place objects in the layout using BFS traversal.
242
+
243
+ Args:
244
+ layout_file: Path to the JSON file defining the layout structure and assets.
245
+ floor_margin: Z-offset for the background object, typically for objects placed on the floor.
246
+ beside_margin: Minimum margin for objects placed 'beside' their parent, used when 'on' placement fails.
247
+ max_attempts: Maximum number of attempts to find a non-overlapping position for an object.
248
+ init_rpy: Initial Roll-Pitch-Yaw rotation rad applied to all object meshes to align the mesh's
249
+ coordinate system with the world's (e.g., Z-up).
250
+ rotate_objs: If True, apply a random rotation around the Z-axis for manipulated and distractor objects.
251
+ rotate_bg: If True, apply a random rotation around the Y-axis for the background object.
252
+ rotate_context: If True, apply a random rotation around the Z-axis for the context object.
253
+ limit_reach_range: If set, enforce a check that manipulated objects are within the robot's reach range, in meter.
254
+ max_orient_diff: If set, enforce a check that manipulated objects are within the robot's orientation range, in degree.
255
+ robot_dim: The approximate dimension (e.g., diameter) of the robot for box representation.
256
+ seed: Random seed for reproducible placement.
257
+
258
+ Returns:
259
+ A :class:`LayoutInfo` object containing the objects and their final computed 7D poses
260
+ ([x, y, z, qx, qy, qz, qw]).
261
+ """
262
  layout_info = LayoutInfo.from_dict(json.load(open(layout_file, "r")))
263
  asset_dir = os.path.dirname(layout_file)
264
  object_mapping = layout_info.objs_mapping
 
286
  mesh_path = os.path.join(asset_dir, mesh_path)
287
  mesh_info[node]["path"] = mesh_path
288
  mesh = trimesh.load(mesh_path)
289
+ rotation = R.from_euler("xyz", init_rpy, degrees=False)
290
+ vertices = mesh.vertices @ rotation.as_matrix().T
291
+ z1 = np.percentile(vertices[:, 2], 1)
292
+ z2 = np.percentile(vertices[:, 2], 99)
293
 
294
  if object_mapping[node] == Scene3DItemEnum.CONTEXT.value:
295
  object_quat = [0, 0, 0, 1]
296
+ if rotate_context:
297
+ angle_rad = np.random.uniform(0, 2 * np.pi)
298
+ object_quat = compute_axis_rotation_quat(
299
+ axis="z", angle_rad=angle_rad
300
+ )
301
+ rotation = R.from_quat(object_quat).as_matrix()
302
+ vertices = vertices @ rotation.T
303
+
304
  mesh_info[node]["surface"] = compute_convex_hull_path(vertices)
305
+
306
  # Put robot in the CONTEXT edge.
307
  x, y = random.choice(mesh_info[node]["surface"].vertices)
308
  theta = np.arctan2(y, x)
 
325
  axis="z", angle_rad=angle_rad
326
  )
327
  rotation = R.from_quat(object_quat).as_matrix()
328
+ vertices = vertices @ rotation.T
 
 
329
 
330
  x1, x2, y1, y2 = compute_xy_bbox(vertices)
331
  mesh_info[node]["pose"] = [x1, x2, y1, y2, z1, z2, *object_quat]
 
378
  continue
379
  # Make sure the manipulated object is reachable by robot.
380
  if (
381
+ limit_reach_range is not None
382
  and object_mapping[node]
383
  == Scene3DItemEnum.MANIPULATED_OBJS.value
384
  ):
385
  cx = parent_pos[0] + node_box[0] + obj_dx / 2
386
  cy = parent_pos[1] + node_box[2] + obj_dy / 2
387
  cz = parent_pos[2] + p_z2 - z1
388
+ robot_pos = position[robot_node][:3]
389
  if not check_reachable(
390
+ base_xyz=np.array(robot_pos),
391
  reach_xyz=np.array([cx, cy, cz]),
392
+ min_reach=limit_reach_range[0],
393
+ max_reach=limit_reach_range[1],
394
  ):
395
  continue
396
 
397
+ # Make sure the manipulated object is inside the robot's orientation.
398
+ if (
399
+ max_orient_diff is not None
400
+ and object_mapping[node]
401
+ == Scene3DItemEnum.MANIPULATED_OBJS.value
402
+ ):
403
+ cx = parent_pos[0] + node_box[0] + obj_dx / 2
404
+ cy = parent_pos[1] + node_box[2] + obj_dy / 2
405
+ cx2, cy2 = position[robot_node][:2]
406
+ v1 = np.array([-cx2, -cy2])
407
+ v2 = np.array([cx - cx2, cy - cy2])
408
+ dot = np.dot(v1, v2)
409
+ norms = np.linalg.norm(v1) * np.linalg.norm(v2)
410
+ theta = np.arccos(np.clip(dot / norms, -1.0, 1.0))
411
+ theta = np.rad2deg(theta)
412
+ if theta > max_orient_diff:
413
+ continue
414
+
415
  if not has_iou_conflict(
416
  node_box, placed_boxes_map[parent_node]
417
  ):
embodied_gen/validators/quality_checkers.py CHANGED
@@ -513,21 +513,23 @@ class SemanticMatcher(BaseChecker):
513
  - If there are fewer than <return_num> distinct relevant matches, repeat the closest ones to make a list of <return_num>.
514
  - Only output the list of <return_num> scene IDs, sorted from most to less similar.
515
  - Do NOT use markdown, JSON code blocks, or any formatting syntax, only return a plain list like ["id1", ...].
 
 
516
 
517
  Input example:
518
  Dictionary:
519
  "{{
520
- "t_scene_008": "A study room with full bookshelves and a lamp in the corner.",
521
  "t_scene_019": "A child's bedroom with pink walls and a small desk.",
522
  "t_scene_020": "A living room with a wooden floor.",
523
  "t_scene_021": "A living room with toys scattered on the floor.",
524
  ...
525
- "t_scene_office_001": "A very spacious, modern open-plan office with wide desks and no people, panoramic view."
526
  }}"
527
  Text:
528
  "A traditional indoor room"
529
  Output:
530
- '["t_scene_office_001", ...]'
531
 
532
  Input:
533
  Dictionary:
@@ -552,9 +554,8 @@ class SemanticMatcher(BaseChecker):
552
 
553
 
554
  def test_semantic_matcher(
555
- bg_file: str = "outputs/bg_scenes/bg_scene_list.txt",
556
  ):
557
- bg_file = "outputs/bg_scenes/bg_scene_list.txt"
558
  scene_dict = {}
559
  with open(bg_file, "r") as f:
560
  for line in f:
@@ -575,7 +576,7 @@ def test_semantic_matcher(
575
  # "presence_penalty": 0.3,
576
  # }
577
  gpt_params = None
578
- match_key = SCENE_MATCHER.query(text, str(scene_dict))
579
  print(match_key, ",", scene_dict[match_key])
580
 
581
 
 
513
  - If there are fewer than <return_num> distinct relevant matches, repeat the closest ones to make a list of <return_num>.
514
  - Only output the list of <return_num> scene IDs, sorted from most to less similar.
515
  - Do NOT use markdown, JSON code blocks, or any formatting syntax, only return a plain list like ["id1", ...].
516
+ - The returned scene ID must exist in the dictionary and be in exactly the same format. For example,
517
+ if the key in the dictionary is "scene_0040", return "scene_0040"; if it is "scene_040", return "scene_040".
518
 
519
  Input example:
520
  Dictionary:
521
  "{{
522
+ "t_scene_0008": "A study room with full bookshelves and a lamp in the corner.",
523
  "t_scene_019": "A child's bedroom with pink walls and a small desk.",
524
  "t_scene_020": "A living room with a wooden floor.",
525
  "t_scene_021": "A living room with toys scattered on the floor.",
526
  ...
527
+ "t_scene_office_0001": "A very spacious, modern open-plan office with wide desks and no people, panoramic view."
528
  }}"
529
  Text:
530
  "A traditional indoor room"
531
  Output:
532
+ '["t_scene_office_0001", ...]'
533
 
534
  Input:
535
  Dictionary:
 
554
 
555
 
556
  def test_semantic_matcher(
557
+ bg_file: str = "outputs/bg_scenes/scene_list.txt",
558
  ):
 
559
  scene_dict = {}
560
  with open(bg_file, "r") as f:
561
  for line in f:
 
576
  # "presence_penalty": 0.3,
577
  # }
578
  gpt_params = None
579
+ match_key = SCENE_MATCHER.query(text, str(scene_dict), params=gpt_params)
580
  print(match_key, ",", scene_dict[match_key])
581
 
582
 
embodied_gen/validators/urdf_convertor.py CHANGED
@@ -282,16 +282,12 @@ class URDFGenerator(object):
282
  d_params = dict(
283
  threshold=0.05, max_convex_hull=100, verbose=False
284
  )
285
- filename = f"{os.path.splitext(obj_name)[0]}_collision.ply"
286
  output_path = os.path.join(mesh_folder, filename)
287
  decompose_convex_mesh(
288
  mesh_output_path, output_path, **d_params
289
  )
290
- obj_filename = filename.replace(".ply", ".obj")
291
- trimesh.load(output_path).export(
292
- f"{mesh_folder}/{obj_filename}"
293
- )
294
- collision_mesh = f"{self.output_mesh_dir}/{obj_filename}"
295
  except Exception as e:
296
  logger.warning(
297
  f"Convex decomposition failed for {output_path}, {e}."
 
282
  d_params = dict(
283
  threshold=0.05, max_convex_hull=100, verbose=False
284
  )
285
+ filename = f"{os.path.splitext(obj_name)[0]}_collision.obj"
286
  output_path = os.path.join(mesh_folder, filename)
287
  decompose_convex_mesh(
288
  mesh_output_path, output_path, **d_params
289
  )
290
+ collision_mesh = f"{self.output_mesh_dir}/{filename}"
 
 
 
 
291
  except Exception as e:
292
  logger.warning(
293
  f"Convex decomposition failed for {output_path}, {e}."