Delete openscene-v1.1/process_data
Browse files- openscene-v1.1/process_data/__init__.py +0 -0
- openscene-v1.1/process_data/create_nuplan_data_with_vis.py +0 -446
- openscene-v1.1/process_data/driving_command.py +0 -178
- openscene-v1.1/process_data/helpers/__init__.py +0 -0
- openscene-v1.1/process_data/helpers/canbus.py +0 -50
- openscene-v1.1/process_data/helpers/multiprocess_helper.py +0 -32
- openscene-v1.1/process_data/helpers/multisweep_helper.py +0 -66
- openscene-v1.1/process_data/helpers/transformation.py +0 -138
- openscene-v1.1/process_data/helpers/viz.py +0 -93
- openscene-v1.1/process_data/helpers/viz_box.py +0 -102
- openscene-v1.1/process_data/helpers/viz_box_2d.py +0 -135
- openscene-v1.1/process_data/process.sh +0 -32
openscene-v1.1/process_data/__init__.py
DELETED
|
File without changes
|
openscene-v1.1/process_data/create_nuplan_data_with_vis.py
DELETED
|
@@ -1,446 +0,0 @@
|
|
| 1 |
-
import argparse
|
| 2 |
-
import shutil
|
| 3 |
-
from typing import Dict, List
|
| 4 |
-
|
| 5 |
-
# import mmcv
|
| 6 |
-
import numpy as np
|
| 7 |
-
from os import listdir
|
| 8 |
-
from os.path import isfile, join
|
| 9 |
-
|
| 10 |
-
from pyquaternion import Quaternion
|
| 11 |
-
|
| 12 |
-
import cv2
|
| 13 |
-
|
| 14 |
-
from tqdm import tqdm
|
| 15 |
-
|
| 16 |
-
import os
|
| 17 |
-
|
| 18 |
-
import multiprocessing
|
| 19 |
-
import pickle
|
| 20 |
-
from nuplan.common.actor_state.state_representation import StateSE2
|
| 21 |
-
from nuplan.common.maps.abstract_map import AbstractMap
|
| 22 |
-
from nuplan.common.maps.nuplan_map.map_factory import get_maps_api
|
| 23 |
-
|
| 24 |
-
from nuplan.database.nuplan_db_orm.nuplandb_wrapper import NuPlanDBWrapper
|
| 25 |
-
from nuplan.database.nuplan_db_orm.lidar import Lidar
|
| 26 |
-
from nuplan.database.nuplan_db.nuplan_scenario_queries import (
|
| 27 |
-
get_traffic_light_status_for_lidarpc_token_from_db,
|
| 28 |
-
get_images_from_lidar_tokens,
|
| 29 |
-
get_cameras,
|
| 30 |
-
)
|
| 31 |
-
from nuplan.planning.scenario_builder.nuplan_db.nuplan_scenario import CameraChannel
|
| 32 |
-
from navsim.common.extraction.driving_command import get_driving_command
|
| 33 |
-
|
| 34 |
-
from helpers.multiprocess_helper import get_scenes_per_thread
|
| 35 |
-
from helpers.canbus import CanBus
|
| 36 |
-
from helpers.multisweep_helper import obtain_sensor2top
|
| 37 |
-
|
| 38 |
-
NUPLAN_MAPS_ROOT = os.environ["NUPLAN_MAPS_ROOT"]
|
| 39 |
-
filtered_classes = ["traffic_cone", "barrier", "czone_sign", "generic_object"]
|
| 40 |
-
|
| 41 |
-
|
| 42 |
-
def create_nuplan_info(
|
| 43 |
-
nuplan_db_wrapper: NuPlanDBWrapper, db_names: List[str], args
|
| 44 |
-
):
|
| 45 |
-
nuplan_sensor_root = args.nuplan_sensor_path
|
| 46 |
-
|
| 47 |
-
# get all db files & assign db files for current thread.
|
| 48 |
-
|
| 49 |
-
|
| 50 |
-
scene_dict = {}
|
| 51 |
-
log_sensors = os.listdir(nuplan_sensor_root)
|
| 52 |
-
|
| 53 |
-
# For each sequence...
|
| 54 |
-
for log_db_name in db_names:
|
| 55 |
-
log_db = nuplan_db_wrapper.get_log_db(log_db_name)
|
| 56 |
-
log_name = log_db.log_name
|
| 57 |
-
log_token = log_db.log.token
|
| 58 |
-
map_location = log_db.log.location
|
| 59 |
-
vehicle_name = log_db.log.vehicle_name
|
| 60 |
-
|
| 61 |
-
# NOTE: I am unsure why "us-nv-las-vegas-strip" is saved as "las_vegas" in db logs.
|
| 62 |
-
map_name = map_location if map_location != "las_vegas" else "us-nv-las-vegas-strip"
|
| 63 |
-
map_api = get_maps_api(NUPLAN_MAPS_ROOT, "nuplan-maps-v1.0", map_name) # NOTE: lru cached
|
| 64 |
-
|
| 65 |
-
log_file = os.path.join(nuplan_db_path, log_db_name + ".db")
|
| 66 |
-
if log_db_name not in log_sensors:
|
| 67 |
-
continue
|
| 68 |
-
|
| 69 |
-
frame_idx = 0
|
| 70 |
-
|
| 71 |
-
# list (sequence) of point clouds (each frame).
|
| 72 |
-
lidar_pc_list = log_db.lidar_pc
|
| 73 |
-
lidar_pcs = lidar_pc_list
|
| 74 |
-
|
| 75 |
-
# get log cam infos
|
| 76 |
-
log_cam_infos = {}
|
| 77 |
-
for cam in get_cameras(log_file, [str(channel.value) for channel in CameraChannel]):
|
| 78 |
-
intrinsics = np.array(pickle.loads(cam.intrinsic), dtype=np.float32)
|
| 79 |
-
translation = np.array(pickle.loads(cam.translation), dtype=np.float32)
|
| 80 |
-
rotation = np.array(pickle.loads(cam.rotation), dtype=np.float32)
|
| 81 |
-
rotation = Quaternion(rotation).rotation_matrix
|
| 82 |
-
distortion = np.array(pickle.loads(cam.distortion), dtype=np.float32)
|
| 83 |
-
c = dict(
|
| 84 |
-
intrinsic=intrinsics,
|
| 85 |
-
distortion=distortion,
|
| 86 |
-
translation=translation,
|
| 87 |
-
rotation=rotation,
|
| 88 |
-
)
|
| 89 |
-
log_cam_infos[cam.token] = c
|
| 90 |
-
|
| 91 |
-
# Find the first valid point clouds, with all 8 cameras available.
|
| 92 |
-
for start_idx in range(0, len(lidar_pcs)):
|
| 93 |
-
retrieved_images = get_images_from_lidar_tokens(
|
| 94 |
-
log_file,
|
| 95 |
-
[lidar_pcs[start_idx].token],
|
| 96 |
-
[str(channel.value) for channel in CameraChannel],
|
| 97 |
-
)
|
| 98 |
-
if len(list(retrieved_images)) == 8:
|
| 99 |
-
break
|
| 100 |
-
|
| 101 |
-
# Find the true LiDAR start_idx with the minimum timestamp difference with CAM_F0.
|
| 102 |
-
retrieved_images_0 = get_images_from_lidar_tokens(
|
| 103 |
-
log_file, [lidar_pcs[start_idx].token], ["CAM_F0"]
|
| 104 |
-
)
|
| 105 |
-
diff_0 = abs(list(retrieved_images_0)[0].timestamp - lidar_pcs[start_idx].timestamp)
|
| 106 |
-
retrieved_images_1 = get_images_from_lidar_tokens(
|
| 107 |
-
log_file, [lidar_pcs[start_idx + 1].token], ["CAM_F0"]
|
| 108 |
-
)
|
| 109 |
-
diff_1 = abs(list(retrieved_images_1)[0].timestamp - lidar_pcs[start_idx + 1].timestamp)
|
| 110 |
-
start_idx = start_idx if diff_0 < diff_1 else start_idx + 1
|
| 111 |
-
|
| 112 |
-
# Find key_frames (controled by args.sample_interval)
|
| 113 |
-
lidar_pc_list = lidar_pc_list[start_idx :: args.sample_interval]
|
| 114 |
-
index = -1
|
| 115 |
-
for lidar_pc in tqdm(lidar_pc_list, dynamic_ncols=True):
|
| 116 |
-
index += 1
|
| 117 |
-
|
| 118 |
-
# LiDAR attributes.
|
| 119 |
-
lidar_pc_token = lidar_pc.token
|
| 120 |
-
scene_token = lidar_pc.scene_token
|
| 121 |
-
pc_file_name = lidar_pc.filename
|
| 122 |
-
next_token = lidar_pc.next_token
|
| 123 |
-
prev_token = lidar_pc.prev_token
|
| 124 |
-
lidar_token = lidar_pc.lidar_token
|
| 125 |
-
time_stamp = lidar_pc.timestamp
|
| 126 |
-
scene_name = "log-" + lidar_pc.scene.name
|
| 127 |
-
lidar_boxes = lidar_pc.lidar_boxes
|
| 128 |
-
roadblock_ids = [
|
| 129 |
-
str(roadblock_id)
|
| 130 |
-
for roadblock_id in str(lidar_pc.scene.roadblock_ids).split(" ")
|
| 131 |
-
if len(roadblock_id) > 0
|
| 132 |
-
]
|
| 133 |
-
|
| 134 |
-
# Saving configurations.
|
| 135 |
-
if scene_token not in scene_dict.keys():
|
| 136 |
-
scene_dict[scene_token] = []
|
| 137 |
-
frame_idx = 0
|
| 138 |
-
if frame_idx == 0:
|
| 139 |
-
scene_dict[scene_token] = []
|
| 140 |
-
|
| 141 |
-
can_bus = CanBus(lidar_pc).tensor
|
| 142 |
-
lidar = log_db.session.query(Lidar).filter(Lidar.token == lidar_token).all()
|
| 143 |
-
pc_file_path = os.path.join(args.nuplan_sensor_path, pc_file_name)
|
| 144 |
-
|
| 145 |
-
if not os.path.exists(pc_file_path): # some lidar files are missing.
|
| 146 |
-
# print(pc_file_path)
|
| 147 |
-
with open("./nofile.log", "a") as f:
|
| 148 |
-
f.write(pc_file_path)
|
| 149 |
-
f.write("\n")
|
| 150 |
-
continue
|
| 151 |
-
|
| 152 |
-
traffic_lights = []
|
| 153 |
-
for traffic_light_status in get_traffic_light_status_for_lidarpc_token_from_db(
|
| 154 |
-
log_file, lidar_pc_token
|
| 155 |
-
):
|
| 156 |
-
lane_connector_id: int = traffic_light_status.lane_connector_id
|
| 157 |
-
is_red: bool = traffic_light_status.status.value == 2
|
| 158 |
-
traffic_lights.append((lane_connector_id, is_red))
|
| 159 |
-
|
| 160 |
-
ego_pose = StateSE2(
|
| 161 |
-
lidar_pc.ego_pose.x,
|
| 162 |
-
lidar_pc.ego_pose.y,
|
| 163 |
-
lidar_pc.ego_pose.quaternion.yaw_pitch_roll[0],
|
| 164 |
-
)
|
| 165 |
-
driving_command = get_driving_command(ego_pose, map_api, roadblock_ids)
|
| 166 |
-
|
| 167 |
-
info = {
|
| 168 |
-
"token": lidar_pc_token,
|
| 169 |
-
"frame_idx": frame_idx,
|
| 170 |
-
"timestamp": time_stamp,
|
| 171 |
-
"log_name": log_name,
|
| 172 |
-
"log_token": log_token,
|
| 173 |
-
"scene_name": scene_name,
|
| 174 |
-
"scene_token": scene_token,
|
| 175 |
-
"map_location": map_location,
|
| 176 |
-
"roadblock_ids": roadblock_ids,
|
| 177 |
-
"vehicle_name": vehicle_name,
|
| 178 |
-
"can_bus": can_bus,
|
| 179 |
-
"lidar_path": pc_file_name, # use the relative path.
|
| 180 |
-
"lidar2ego_translation": lidar[0].translation_np,
|
| 181 |
-
"lidar2ego_rotation": [
|
| 182 |
-
lidar[0].rotation.w,
|
| 183 |
-
lidar[0].rotation.x,
|
| 184 |
-
lidar[0].rotation.y,
|
| 185 |
-
lidar[0].rotation.z,
|
| 186 |
-
],
|
| 187 |
-
"ego2global_translation": can_bus[:3],
|
| 188 |
-
"ego2global_rotation": can_bus[3:7],
|
| 189 |
-
"ego_dynamic_state": [
|
| 190 |
-
lidar_pc.ego_pose.vx,
|
| 191 |
-
lidar_pc.ego_pose.vy,
|
| 192 |
-
lidar_pc.ego_pose.acceleration_x,
|
| 193 |
-
lidar_pc.ego_pose.acceleration_y,
|
| 194 |
-
],
|
| 195 |
-
"traffic_lights": traffic_lights,
|
| 196 |
-
"driving_command": driving_command,
|
| 197 |
-
"cams": dict(),
|
| 198 |
-
"prev_sweep_token": prev_token,
|
| 199 |
-
"next_sweep_token": next_token,
|
| 200 |
-
"sweeps": [],
|
| 201 |
-
}
|
| 202 |
-
info["sample_prev"] = None
|
| 203 |
-
info["sample_next"] = None
|
| 204 |
-
if index > 0: # find prev.
|
| 205 |
-
info["sample_prev"] = lidar_pc_list[index - 1].token
|
| 206 |
-
if index < len(lidar_pc_list) - 1: # find next.
|
| 207 |
-
next_key_token = lidar_pc_list[index + 1].token
|
| 208 |
-
next_key_scene = lidar_pc_list[index + 1].scene_token
|
| 209 |
-
info["sample_next"] = next_key_token
|
| 210 |
-
else:
|
| 211 |
-
next_key_token, next_key_scene = None, None
|
| 212 |
-
|
| 213 |
-
if next_key_token == None or next_key_token == "":
|
| 214 |
-
frame_idx = 0
|
| 215 |
-
else:
|
| 216 |
-
if next_key_scene != scene_token:
|
| 217 |
-
frame_idx = 0
|
| 218 |
-
else:
|
| 219 |
-
frame_idx += 1
|
| 220 |
-
|
| 221 |
-
# Parse lidar2ego translation.
|
| 222 |
-
l2e_r = info["lidar2ego_rotation"]
|
| 223 |
-
l2e_t = info["lidar2ego_translation"]
|
| 224 |
-
e2g_r = info["ego2global_rotation"]
|
| 225 |
-
e2g_t = info["ego2global_translation"]
|
| 226 |
-
l2e_r_mat = Quaternion(l2e_r).rotation_matrix
|
| 227 |
-
e2g_r_mat = Quaternion(e2g_r).rotation_matrix
|
| 228 |
-
|
| 229 |
-
# add lidar2global: map point coord in lidar to point coord in the global
|
| 230 |
-
l2e = np.eye(4)
|
| 231 |
-
l2e[:3, :3] = l2e_r_mat
|
| 232 |
-
l2e[:3, -1] = l2e_t
|
| 233 |
-
e2g = np.eye(4)
|
| 234 |
-
e2g[:3, :3] = e2g_r_mat
|
| 235 |
-
e2g[:3, -1] = e2g_t
|
| 236 |
-
lidar2global = np.dot(e2g, l2e)
|
| 237 |
-
info["ego2global"] = e2g
|
| 238 |
-
info["lidar2ego"] = l2e
|
| 239 |
-
info["lidar2global"] = lidar2global
|
| 240 |
-
|
| 241 |
-
# obtain 8 image's information per frame
|
| 242 |
-
retrieved_images = get_images_from_lidar_tokens(
|
| 243 |
-
log_file, [lidar_pc.token], [str(channel.value) for channel in CameraChannel]
|
| 244 |
-
)
|
| 245 |
-
cams = {}
|
| 246 |
-
for img in retrieved_images:
|
| 247 |
-
channel = img.channel
|
| 248 |
-
filename = img.filename_jpg
|
| 249 |
-
filepath = os.path.join(args.nuplan_sensor_path, filename)
|
| 250 |
-
if not os.path.exists(filepath):
|
| 251 |
-
frame_str = f"{log_db_name}, {lidar_pc_token}"
|
| 252 |
-
tqdm.tqdm.write(f"camera file missing: {frame_str}")
|
| 253 |
-
continue
|
| 254 |
-
cam_info = log_cam_infos[img.camera_token]
|
| 255 |
-
cams[channel] = dict(
|
| 256 |
-
data_path=filename, # use the relative path.
|
| 257 |
-
sensor2lidar_rotation=cam_info["rotation"],
|
| 258 |
-
sensor2lidar_translation=cam_info["translation"],
|
| 259 |
-
cam_intrinsic=cam_info["intrinsic"],
|
| 260 |
-
distortion=cam_info["distortion"],
|
| 261 |
-
)
|
| 262 |
-
if len(cams) != 8:
|
| 263 |
-
frame_str = f"{log_db_name}, {lidar_pc_token}"
|
| 264 |
-
tqdm.write(f"not all cameras are available: {frame_str}")
|
| 265 |
-
continue
|
| 266 |
-
info["cams"] = cams
|
| 267 |
-
|
| 268 |
-
# parse sweeps if assigned.
|
| 269 |
-
sweeps = []
|
| 270 |
-
tmp_info = info
|
| 271 |
-
count = 0
|
| 272 |
-
while len(sweeps) < args.max_sweeps:
|
| 273 |
-
if tmp_info["prev_sweep_token"] == None:
|
| 274 |
-
break
|
| 275 |
-
|
| 276 |
-
# Get the previous sweep and update info to previous sweep.
|
| 277 |
-
sweep = obtain_sensor2top(
|
| 278 |
-
tmp_info["prev_sweep_token"], log_db, l2e_t, l2e_r_mat, e2g_t, e2g_r_mat, args
|
| 279 |
-
)
|
| 280 |
-
|
| 281 |
-
# Save sweeps in every sweep_interval.
|
| 282 |
-
tmp_info = sweep
|
| 283 |
-
if count == args.sweep_interval:
|
| 284 |
-
if os.path.exists(sweep["data_path"]):
|
| 285 |
-
sweeps.append(sweep)
|
| 286 |
-
count = 0
|
| 287 |
-
else:
|
| 288 |
-
count += 1
|
| 289 |
-
info["sweeps"] = sweeps
|
| 290 |
-
|
| 291 |
-
# Parse 3D object labels.
|
| 292 |
-
if not args.is_test:
|
| 293 |
-
if args.filter_instance:
|
| 294 |
-
fg_lidar_boxes = [
|
| 295 |
-
box for box in lidar_boxes if box.category.name not in filtered_classes
|
| 296 |
-
]
|
| 297 |
-
else:
|
| 298 |
-
fg_lidar_boxes = lidar_boxes
|
| 299 |
-
|
| 300 |
-
instance_tokens = [item.token for item in fg_lidar_boxes]
|
| 301 |
-
track_tokens = [item.track_token for item in fg_lidar_boxes]
|
| 302 |
-
|
| 303 |
-
inv_ego_r = lidar_pc.ego_pose.trans_matrix_inv
|
| 304 |
-
ego_yaw = lidar_pc.ego_pose.quaternion.yaw_pitch_roll[0]
|
| 305 |
-
|
| 306 |
-
locs = np.array(
|
| 307 |
-
[
|
| 308 |
-
np.dot(
|
| 309 |
-
inv_ego_r[:3, :3],
|
| 310 |
-
(b.translation_np - lidar_pc.ego_pose.translation_np).T,
|
| 311 |
-
).T
|
| 312 |
-
for b in fg_lidar_boxes
|
| 313 |
-
]
|
| 314 |
-
).reshape(-1, 3)
|
| 315 |
-
dims = np.array([[b.length, b.width, b.height] for b in fg_lidar_boxes]).reshape(
|
| 316 |
-
-1, 3
|
| 317 |
-
)
|
| 318 |
-
rots = np.array([b.yaw for b in fg_lidar_boxes]).reshape(-1, 1)
|
| 319 |
-
rots = rots - ego_yaw
|
| 320 |
-
|
| 321 |
-
velocity = np.array([[b.vx, b.vy] for b in fg_lidar_boxes]).reshape(-1, 2)
|
| 322 |
-
velocity_3d = np.array([[b.vx, b.vy, b.vz] for b in fg_lidar_boxes]).reshape(-1, 3)
|
| 323 |
-
|
| 324 |
-
# convert velo from global to lidar: only need the rotation matrix
|
| 325 |
-
for i in range(len(fg_lidar_boxes)):
|
| 326 |
-
velo = np.array([*velocity[i], 0.0])
|
| 327 |
-
velo = velo @ np.linalg.inv(e2g_r_mat).T @ np.linalg.inv(l2e_r_mat).T
|
| 328 |
-
velocity[i] = velo[:2]
|
| 329 |
-
|
| 330 |
-
for i in range(len(fg_lidar_boxes)):
|
| 331 |
-
velo = velocity_3d[i]
|
| 332 |
-
velo = velo @ np.linalg.inv(e2g_r_mat).T @ np.linalg.inv(l2e_r_mat).T
|
| 333 |
-
velocity_3d[i] = velo
|
| 334 |
-
|
| 335 |
-
names = [box.category.name for box in fg_lidar_boxes]
|
| 336 |
-
names = np.array(names)
|
| 337 |
-
gt_boxes_nuplan = np.concatenate([locs, dims, rots], axis=1)
|
| 338 |
-
info["anns"] = dict(
|
| 339 |
-
gt_boxes=gt_boxes_nuplan,
|
| 340 |
-
gt_names=names,
|
| 341 |
-
gt_velocity_3d=velocity_3d.reshape(-1, 3),
|
| 342 |
-
instance_tokens=instance_tokens,
|
| 343 |
-
track_tokens=track_tokens,
|
| 344 |
-
)
|
| 345 |
-
scene_dict[scene_token].append(info)
|
| 346 |
-
del map_api
|
| 347 |
-
|
| 348 |
-
pkl_file_path = f"{args.out_dir}/{log_name}.pkl"
|
| 349 |
-
os.makedirs(args.out_dir, exist_ok=True)
|
| 350 |
-
with open(pkl_file_path, "wb") as f:
|
| 351 |
-
pickle.dump(dict(scene_dict), f, protocol=pickle.HIGHEST_PROTOCOL)
|
| 352 |
-
|
| 353 |
-
|
| 354 |
-
def parse_args():
|
| 355 |
-
parser = argparse.ArgumentParser(description="Train a detector")
|
| 356 |
-
parser.add_argument(
|
| 357 |
-
"--thread-num", type=int, default=50, help="number of threads for multi-processing."
|
| 358 |
-
)
|
| 359 |
-
|
| 360 |
-
# directory configurations.
|
| 361 |
-
parser.add_argument("--nuplan-root-path", help="the path to nuplan root path.")
|
| 362 |
-
parser.add_argument("--nuplan-db-path", help="the dir saving nuplan db.")
|
| 363 |
-
parser.add_argument("--nuplan-sensor-path", help="the dir to nuplan sensor data.")
|
| 364 |
-
parser.add_argument("--nuplan-map-version", help="nuplan mapping dataset version.")
|
| 365 |
-
parser.add_argument("--nuplan-map-root", help="path to nuplan map data.")
|
| 366 |
-
parser.add_argument("--out-dir", help="output path.")
|
| 367 |
-
|
| 368 |
-
# data configurations.
|
| 369 |
-
parser.add_argument("--max-sweeps", type=int, default=10, help="number of point cloud sweeps.")
|
| 370 |
-
parser.add_argument(
|
| 371 |
-
"--sweep-interval", type=int, default=5, help="interval of point cloud sweeps."
|
| 372 |
-
)
|
| 373 |
-
parser.add_argument(
|
| 374 |
-
"--sample-interval", type=int, default=10, help="interval of key frame samples."
|
| 375 |
-
)
|
| 376 |
-
parser.add_argument(
|
| 377 |
-
"--scene-process-type",
|
| 378 |
-
type=str,
|
| 379 |
-
default="skip",
|
| 380 |
-
help="process type when a scene is processed.",
|
| 381 |
-
)
|
| 382 |
-
|
| 383 |
-
# TODO.
|
| 384 |
-
parser.add_argument("--save-bev-images", action="store_true", help="XXX")
|
| 385 |
-
parser.add_argument("--save_surround_images", action="store_true", help="XXX")
|
| 386 |
-
|
| 387 |
-
# split.
|
| 388 |
-
parser.add_argument("--is-test", action="store_true", help="Dealing with Test set data.")
|
| 389 |
-
parser.add_argument(
|
| 390 |
-
"--filter-instance", action="store_true", help="Ignore instances in filtered_classes."
|
| 391 |
-
)
|
| 392 |
-
parser.add_argument("--split", type=str, default="train", help="Train/Val/Test set.")
|
| 393 |
-
|
| 394 |
-
args = parser.parse_args()
|
| 395 |
-
return args
|
| 396 |
-
|
| 397 |
-
|
| 398 |
-
if __name__ == "__main__":
|
| 399 |
-
args = parse_args()
|
| 400 |
-
|
| 401 |
-
nuplan_root_path = args.nuplan_root_path
|
| 402 |
-
nuplan_db_path = args.nuplan_db_path
|
| 403 |
-
nuplan_sensor_path = args.nuplan_sensor_path
|
| 404 |
-
nuplan_map_version = args.nuplan_map_version
|
| 405 |
-
nuplan_map_root = args.nuplan_map_root
|
| 406 |
-
out_dir = args.out_dir
|
| 407 |
-
|
| 408 |
-
nuplan_db_wrapper = NuPlanDBWrapper(
|
| 409 |
-
data_root=nuplan_root_path,
|
| 410 |
-
map_root=nuplan_map_root,
|
| 411 |
-
db_files=nuplan_db_path,
|
| 412 |
-
map_version=nuplan_map_version,
|
| 413 |
-
)
|
| 414 |
-
|
| 415 |
-
nuplan_db_path = args.nuplan_db_path
|
| 416 |
-
db_names_with_extension = [
|
| 417 |
-
f for f in listdir(nuplan_db_path) if isfile(join(nuplan_db_path, f))
|
| 418 |
-
]
|
| 419 |
-
db_names = [name[:-3] for name in db_names_with_extension]
|
| 420 |
-
db_names.sort()
|
| 421 |
-
|
| 422 |
-
print(db_names)
|
| 423 |
-
|
| 424 |
-
db_names_split = np.split(np.array(db_names), args.thread_num)
|
| 425 |
-
|
| 426 |
-
|
| 427 |
-
manager = multiprocessing.Manager()
|
| 428 |
-
# return_dict = manager.dict()
|
| 429 |
-
threads = []
|
| 430 |
-
for x in range(args.thread_num):
|
| 431 |
-
t = multiprocessing.Process(
|
| 432 |
-
target=create_nuplan_info,
|
| 433 |
-
name=str(x),
|
| 434 |
-
args=(nuplan_db_wrapper, db_names_split[x], args),
|
| 435 |
-
)
|
| 436 |
-
threads.append(t)
|
| 437 |
-
for thr in threads:
|
| 438 |
-
thr.start()
|
| 439 |
-
for thr in threads:
|
| 440 |
-
if thr.is_alive():
|
| 441 |
-
thr.join()
|
| 442 |
-
|
| 443 |
-
# pkl_file_path = f"{args.out_dir}/{args.split}.pkl"
|
| 444 |
-
# os.makedirs(args.out_dir, exist_ok=True)
|
| 445 |
-
# with open(pkl_file_path, "wb") as f:
|
| 446 |
-
# pickle.dump(dict(return_dict), f, protocol=pickle.HIGHEST_PROTOCOL)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
openscene-v1.1/process_data/driving_command.py
DELETED
|
@@ -1,178 +0,0 @@
|
|
| 1 |
-
from re import A
|
| 2 |
-
from typing import Dict, List, Tuple
|
| 3 |
-
import warnings
|
| 4 |
-
|
| 5 |
-
import numpy as np
|
| 6 |
-
import numpy.typing as npt
|
| 7 |
-
|
| 8 |
-
from shapely.geometry import Point
|
| 9 |
-
|
| 10 |
-
from nuplan.common.maps.abstract_map import AbstractMap
|
| 11 |
-
from nuplan.common.actor_state.state_representation import StateSE2
|
| 12 |
-
from nuplan.common.maps.maps_datatypes import SemanticMapLayer
|
| 13 |
-
from nuplan.common.maps.abstract_map_objects import (
|
| 14 |
-
LaneGraphEdgeMapObject,
|
| 15 |
-
RoadBlockGraphEdgeMapObject,
|
| 16 |
-
)
|
| 17 |
-
|
| 18 |
-
from navsim.planning.simulation.planner.pdm_planner.utils.route_utils import (
|
| 19 |
-
route_roadblock_correction,
|
| 20 |
-
)
|
| 21 |
-
from navsim.planning.simulation.planner.pdm_planner.utils.graph_search.dijkstra import (
|
| 22 |
-
Dijkstra,
|
| 23 |
-
)
|
| 24 |
-
from navsim.planning.simulation.planner.pdm_planner.utils.pdm_path import PDMPath
|
| 25 |
-
from navsim.planning.simulation.planner.pdm_planner.utils.pdm_geometry_utils import (
|
| 26 |
-
convert_absolute_to_relative_se2_array,
|
| 27 |
-
)
|
| 28 |
-
from navsim.planning.simulation.planner.pdm_planner.utils.pdm_enums import (
|
| 29 |
-
SE2Index,
|
| 30 |
-
)
|
| 31 |
-
|
| 32 |
-
# shapely runtime warning
|
| 33 |
-
warnings.filterwarnings("ignore", category=RuntimeWarning)
|
| 34 |
-
|
| 35 |
-
|
| 36 |
-
def get_driving_command(
|
| 37 |
-
ego_pose: StateSE2,
|
| 38 |
-
map_api: AbstractMap,
|
| 39 |
-
route_roadblock_ids: List[str],
|
| 40 |
-
distance: float = 20,
|
| 41 |
-
lateral_offset: float = 2,
|
| 42 |
-
) -> npt.NDArray[np.int]:
|
| 43 |
-
"""
|
| 44 |
-
Creates the one-hot (left, forward, right)driving command for the ego vehicle
|
| 45 |
-
:param ego_pose: (x, y, heading) object for global ego pose
|
| 46 |
-
:param map_api: nuPlan's map interface
|
| 47 |
-
:param route_roadblock_ids: roadblock ids of route
|
| 48 |
-
:param distance: longitudinal distance to interpolate on th centerline, defaults to 10
|
| 49 |
-
:param lateral_offset: lateral offset for left/right threshold, to defaults to 2
|
| 50 |
-
:return: numpy one-hot array of driving_command
|
| 51 |
-
"""
|
| 52 |
-
|
| 53 |
-
driving_command = np.zeros((3,), dtype=int) # one-hot: (left, forward, right)
|
| 54 |
-
|
| 55 |
-
# If no route available, go straight
|
| 56 |
-
if len(route_roadblock_ids) == 0:
|
| 57 |
-
driving_command[1] = 1
|
| 58 |
-
return driving_command
|
| 59 |
-
|
| 60 |
-
# Apply route correction on route_roadblock_ids
|
| 61 |
-
route_roadblock_dict, _ = get_route_dicts(map_api, route_roadblock_ids)
|
| 62 |
-
corrected_route_roadblock_ids = route_roadblock_correction(
|
| 63 |
-
ego_pose, map_api, route_roadblock_dict
|
| 64 |
-
)
|
| 65 |
-
route_roadblock_dict, route_lane_dict = get_route_dicts(map_api, corrected_route_roadblock_ids)
|
| 66 |
-
|
| 67 |
-
# Find the nearest lane, graph search, and centerline extraction
|
| 68 |
-
current_lane = get_current_lane(ego_pose, route_lane_dict)
|
| 69 |
-
discrete_centerline = get_discrete_centerline(
|
| 70 |
-
current_lane, route_roadblock_dict, route_lane_dict
|
| 71 |
-
)
|
| 72 |
-
centerline = PDMPath(discrete_centerline)
|
| 73 |
-
|
| 74 |
-
# Interpolate target distance on centerline
|
| 75 |
-
current_progress = centerline.project(Point(*ego_pose.array))
|
| 76 |
-
target_progress = current_progress + distance
|
| 77 |
-
|
| 78 |
-
current_pose_array, target_pose_array = centerline.interpolate(
|
| 79 |
-
[current_progress, target_progress], as_array=True
|
| 80 |
-
)
|
| 81 |
-
target_pose_array = convert_absolute_to_relative_se2_array(
|
| 82 |
-
StateSE2(*current_pose_array), target_pose_array[None,...]
|
| 83 |
-
)[0]
|
| 84 |
-
|
| 85 |
-
# Threshold for driving command
|
| 86 |
-
if target_pose_array[SE2Index.Y] >= lateral_offset:
|
| 87 |
-
driving_command[0] = 1
|
| 88 |
-
elif target_pose_array[SE2Index.Y] <= -lateral_offset:
|
| 89 |
-
driving_command[2] = 1
|
| 90 |
-
else:
|
| 91 |
-
driving_command[1] = 1
|
| 92 |
-
|
| 93 |
-
# delete some variables for memory management
|
| 94 |
-
del route_roadblock_dict, route_lane_dict, _, centerline
|
| 95 |
-
return driving_command
|
| 96 |
-
|
| 97 |
-
|
| 98 |
-
def get_route_dicts(
|
| 99 |
-
map_api: AbstractMap, route_roadblock_ids: List[str]
|
| 100 |
-
) -> Tuple[Dict[str, RoadBlockGraphEdgeMapObject], Dict[str, LaneGraphEdgeMapObject]]:
|
| 101 |
-
"""
|
| 102 |
-
Loads the roadblock and lane dicts
|
| 103 |
-
:param map_api: nuPlan's map interface
|
| 104 |
-
:param route_roadblock_ids: roadblock ids of route
|
| 105 |
-
:return: tuple of roadblock and lane dicts
|
| 106 |
-
"""
|
| 107 |
-
|
| 108 |
-
# remove repeated ids while remaining order in list
|
| 109 |
-
route_roadblock_ids = list(dict.fromkeys(route_roadblock_ids))
|
| 110 |
-
|
| 111 |
-
route_roadblock_dict: Dict[str, RoadBlockGraphEdgeMapObject] = {}
|
| 112 |
-
route_lane_dict: Dict[str, LaneGraphEdgeMapObject] = {}
|
| 113 |
-
|
| 114 |
-
for id_ in route_roadblock_ids:
|
| 115 |
-
block = map_api.get_map_object(id_, SemanticMapLayer.ROADBLOCK)
|
| 116 |
-
block = block or map_api.get_map_object(id_, SemanticMapLayer.ROADBLOCK_CONNECTOR)
|
| 117 |
-
route_roadblock_dict[block.id] = block
|
| 118 |
-
for lane in block.interior_edges:
|
| 119 |
-
route_lane_dict[lane.id] = lane
|
| 120 |
-
|
| 121 |
-
return route_roadblock_dict, route_lane_dict
|
| 122 |
-
|
| 123 |
-
|
| 124 |
-
def get_current_lane(
|
| 125 |
-
ego_pose: StateSE2, route_lane_dict: Dict[str, LaneGraphEdgeMapObject]
|
| 126 |
-
) -> LaneGraphEdgeMapObject:
|
| 127 |
-
"""
|
| 128 |
-
Find current lane, either if intersection with ego pose, or by distance.
|
| 129 |
-
:param ego_pose: (x, y, heading) object for global ego pose
|
| 130 |
-
:param route_lane_dict: Dictionary of roadblock ids and objects
|
| 131 |
-
:return: Lane object
|
| 132 |
-
"""
|
| 133 |
-
|
| 134 |
-
closest_distance = np.inf
|
| 135 |
-
starting_lane = None
|
| 136 |
-
for edge in route_lane_dict.values():
|
| 137 |
-
if edge.contains_point(ego_pose):
|
| 138 |
-
starting_lane = edge
|
| 139 |
-
break
|
| 140 |
-
|
| 141 |
-
distance = edge.polygon.distance(Point(*ego_pose.array))
|
| 142 |
-
if distance < closest_distance:
|
| 143 |
-
starting_lane = edge
|
| 144 |
-
closest_distance = distance
|
| 145 |
-
|
| 146 |
-
return starting_lane
|
| 147 |
-
|
| 148 |
-
|
| 149 |
-
def get_discrete_centerline(
|
| 150 |
-
current_lane: LaneGraphEdgeMapObject,
|
| 151 |
-
route_roadblock_dict: Dict[str, RoadBlockGraphEdgeMapObject],
|
| 152 |
-
route_lane_dict: Dict[str, LaneGraphEdgeMapObject],
|
| 153 |
-
search_depth: int = 30,
|
| 154 |
-
) -> List[StateSE2]:
|
| 155 |
-
"""
|
| 156 |
-
Given the current lane, apply graph search, and extract centerline.
|
| 157 |
-
:param current_lane: Lane object closest to ego
|
| 158 |
-
:param route_roadblock_dict: Dictionary of roadblock ids and objects
|
| 159 |
-
:param route_lane_dict: Dictionary of lane ids and objects
|
| 160 |
-
:param search_depth: max search depth of Dijkstra, defaults to 30
|
| 161 |
-
:return: List of (x, y, heading) objects
|
| 162 |
-
"""
|
| 163 |
-
|
| 164 |
-
roadblocks = list(route_roadblock_dict.values())
|
| 165 |
-
roadblock_ids = list(route_roadblock_dict.keys())
|
| 166 |
-
|
| 167 |
-
# find current roadblock index
|
| 168 |
-
start_idx = np.argmax(np.array(roadblock_ids) == current_lane.get_roadblock_id())
|
| 169 |
-
roadblock_window = roadblocks[start_idx : start_idx + search_depth]
|
| 170 |
-
|
| 171 |
-
graph_search = Dijkstra(current_lane, list(route_lane_dict.keys()))
|
| 172 |
-
route_plan, _ = graph_search.search(roadblock_window[-1])
|
| 173 |
-
|
| 174 |
-
centerline_discrete_path: List[StateSE2] = []
|
| 175 |
-
for lane in route_plan:
|
| 176 |
-
centerline_discrete_path.extend(lane.baseline_path.discrete_path)
|
| 177 |
-
|
| 178 |
-
return centerline_discrete_path
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
openscene-v1.1/process_data/helpers/__init__.py
DELETED
|
File without changes
|
openscene-v1.1/process_data/helpers/canbus.py
DELETED
|
@@ -1,50 +0,0 @@
|
|
| 1 |
-
import numpy as np
|
| 2 |
-
|
| 3 |
-
|
| 4 |
-
class CanBus:
|
| 5 |
-
"""Wrapper class to convert lidar_can_bus to numpy array"""
|
| 6 |
-
|
| 7 |
-
def __init__(self, lidar_pc):
|
| 8 |
-
self.x = lidar_pc.ego_pose.x
|
| 9 |
-
self.y = lidar_pc.ego_pose.y
|
| 10 |
-
self.z = lidar_pc.ego_pose.z
|
| 11 |
-
|
| 12 |
-
self.qw = lidar_pc.ego_pose.qw
|
| 13 |
-
self.qx = lidar_pc.ego_pose.qx
|
| 14 |
-
self.qy = lidar_pc.ego_pose.qy
|
| 15 |
-
self.qz = lidar_pc.ego_pose.qz
|
| 16 |
-
|
| 17 |
-
self.acceleration_x = lidar_pc.ego_pose.acceleration_x
|
| 18 |
-
self.acceleration_y = lidar_pc.ego_pose.acceleration_y
|
| 19 |
-
self.acceleration_z = lidar_pc.ego_pose.acceleration_z
|
| 20 |
-
|
| 21 |
-
self.vx = lidar_pc.ego_pose.vx
|
| 22 |
-
self.vy = lidar_pc.ego_pose.vy
|
| 23 |
-
self.vz = lidar_pc.ego_pose.vz
|
| 24 |
-
|
| 25 |
-
self.angular_rate_x = lidar_pc.ego_pose.angular_rate_x
|
| 26 |
-
self.angular_rate_y = lidar_pc.ego_pose.angular_rate_y
|
| 27 |
-
self.angular_rate_z = lidar_pc.ego_pose.angular_rate_z
|
| 28 |
-
|
| 29 |
-
self.tensor = np.array(
|
| 30 |
-
[
|
| 31 |
-
self.x,
|
| 32 |
-
self.y,
|
| 33 |
-
self.z,
|
| 34 |
-
self.qw,
|
| 35 |
-
self.qx,
|
| 36 |
-
self.qy,
|
| 37 |
-
self.qz,
|
| 38 |
-
self.acceleration_x,
|
| 39 |
-
self.acceleration_y,
|
| 40 |
-
self.acceleration_z,
|
| 41 |
-
self.vx,
|
| 42 |
-
self.vy,
|
| 43 |
-
self.vz,
|
| 44 |
-
self.angular_rate_x,
|
| 45 |
-
self.angular_rate_y,
|
| 46 |
-
self.angular_rate_z,
|
| 47 |
-
0.0,
|
| 48 |
-
0.0,
|
| 49 |
-
]
|
| 50 |
-
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
openscene-v1.1/process_data/helpers/multiprocess_helper.py
DELETED
|
@@ -1,32 +0,0 @@
|
|
| 1 |
-
import numpy as np
|
| 2 |
-
import multiprocessing
|
| 3 |
-
|
| 4 |
-
|
| 5 |
-
def get_scenes_per_thread(scenes, thread_num):
|
| 6 |
-
scenes = sorted(scenes)
|
| 7 |
-
num_tasks = thread_num
|
| 8 |
-
cur_id = int(multiprocessing.current_process().name)
|
| 9 |
-
|
| 10 |
-
num_scene = len(scenes)
|
| 11 |
-
a = num_scene // num_tasks
|
| 12 |
-
b = num_scene % num_tasks
|
| 13 |
-
|
| 14 |
-
if cur_id == 0:
|
| 15 |
-
print("num_scene:", num_scene)
|
| 16 |
-
|
| 17 |
-
process_num = []
|
| 18 |
-
for id in range(num_tasks):
|
| 19 |
-
if id >= b:
|
| 20 |
-
process_num.append(a)
|
| 21 |
-
else:
|
| 22 |
-
process_num.append(a + 1)
|
| 23 |
-
addsum = np.cumsum(process_num)
|
| 24 |
-
|
| 25 |
-
if cur_id == 0:
|
| 26 |
-
start = 0
|
| 27 |
-
end = addsum[0]
|
| 28 |
-
else:
|
| 29 |
-
start = addsum[cur_id - 1]
|
| 30 |
-
end = addsum[cur_id]
|
| 31 |
-
|
| 32 |
-
return scenes[start:end], start
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
openscene-v1.1/process_data/helpers/multisweep_helper.py
DELETED
|
@@ -1,66 +0,0 @@
|
|
| 1 |
-
import os
|
| 2 |
-
import numpy as np
|
| 3 |
-
from pyquaternion import Quaternion
|
| 4 |
-
|
| 5 |
-
from nuplan.database.nuplan_db_orm.lidar_pc import LidarPc
|
| 6 |
-
from nuplan.database.nuplan_db_orm.lidar import Lidar
|
| 7 |
-
|
| 8 |
-
from navsim.common.extraction.helpers.canbus import CanBus
|
| 9 |
-
|
| 10 |
-
|
| 11 |
-
def obtain_sensor2top(lidar_token, log_db, l2e_t, l2e_r_mat, e2g_t, e2g_r_mat, args):
|
| 12 |
-
"""Obtain the info with RT matric from other sensors to Top LiDAR.
|
| 13 |
-
|
| 14 |
-
Args:
|
| 15 |
-
lidar_token (str): Sample data token corresponding to the
|
| 16 |
-
specific sensor type.
|
| 17 |
-
log_db: To obtain LiDAR of corresponding token.
|
| 18 |
-
l2e_t (np.ndarray): Translation from lidar to ego in shape (1, 3).
|
| 19 |
-
l2e_r_mat (np.ndarray): Rotation matrix from lidar to ego
|
| 20 |
-
in shape (3, 3).
|
| 21 |
-
e2g_t (np.ndarray): Translation from ego to global in shape (1, 3).
|
| 22 |
-
e2g_r_mat (np.ndarray): Rotation matrix from ego to global
|
| 23 |
-
in shape (3, 3).
|
| 24 |
-
|
| 25 |
-
Returns:
|
| 26 |
-
sweep (dict): Sweep information after transformation.
|
| 27 |
-
"""
|
| 28 |
-
lidar_pc = log_db.session.query(LidarPc).filter(LidarPc.token == lidar_token).all()
|
| 29 |
-
lidar_pc = lidar_pc[0]
|
| 30 |
-
can_bus = CanBus(lidar_pc).tensor
|
| 31 |
-
|
| 32 |
-
lidar_sensor = log_db.session.query(Lidar).filter(Lidar.token == lidar_pc.lidar_token).all()
|
| 33 |
-
lidar_sensor = lidar_sensor[0]
|
| 34 |
-
|
| 35 |
-
sweep = {
|
| 36 |
-
"prev_sweep_token": lidar_pc.prev_token,
|
| 37 |
-
"data_path": os.path.join(args.nuplan_sensor_path, lidar_pc.filename),
|
| 38 |
-
"type": lidar_sensor.channel,
|
| 39 |
-
"sample_data_token": lidar_pc.token,
|
| 40 |
-
"sensor2ego_translation": lidar_sensor.translation_np,
|
| 41 |
-
"sensor2ego_rotation": lidar_sensor.quaternion,
|
| 42 |
-
"ego2global_translation": can_bus[:3],
|
| 43 |
-
"ego2global_rotation": can_bus[3:7],
|
| 44 |
-
"timestamp": lidar_pc.timestamp,
|
| 45 |
-
}
|
| 46 |
-
|
| 47 |
-
l2e_r_s = sweep["sensor2ego_rotation"]
|
| 48 |
-
l2e_t_s = sweep["sensor2ego_translation"]
|
| 49 |
-
e2g_r_s = sweep["ego2global_rotation"]
|
| 50 |
-
e2g_t_s = sweep["ego2global_translation"]
|
| 51 |
-
|
| 52 |
-
# obtain the RT from sensor to Top LiDAR
|
| 53 |
-
# sweep->ego->global->ego'->lidar
|
| 54 |
-
l2e_r_s_mat = Quaternion(l2e_r_s).rotation_matrix
|
| 55 |
-
e2g_r_s_mat = Quaternion(e2g_r_s).rotation_matrix
|
| 56 |
-
R = (l2e_r_s_mat.T @ e2g_r_s_mat.T) @ (np.linalg.inv(e2g_r_mat).T @ np.linalg.inv(l2e_r_mat).T)
|
| 57 |
-
T = (l2e_t_s @ e2g_r_s_mat.T + e2g_t_s) @ (
|
| 58 |
-
np.linalg.inv(e2g_r_mat).T @ np.linalg.inv(l2e_r_mat).T
|
| 59 |
-
)
|
| 60 |
-
T -= (
|
| 61 |
-
e2g_t @ (np.linalg.inv(e2g_r_mat).T @ np.linalg.inv(l2e_r_mat).T)
|
| 62 |
-
+ l2e_t @ np.linalg.inv(l2e_r_mat).T
|
| 63 |
-
)
|
| 64 |
-
sweep["sensor2lidar_rotation"] = R.T # points @ R.T + T
|
| 65 |
-
sweep["sensor2lidar_translation"] = T
|
| 66 |
-
return sweep
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
openscene-v1.1/process_data/helpers/transformation.py
DELETED
|
@@ -1,138 +0,0 @@
|
|
| 1 |
-
import numpy as np
|
| 2 |
-
from pyquaternion import Quaternion
|
| 3 |
-
|
| 4 |
-
from nuplan.database.utils.pointclouds.pointcloud import PointCloud
|
| 5 |
-
|
| 6 |
-
|
| 7 |
-
def _load_points(pc_file_name):
|
| 8 |
-
pc = PointCloud.parse_from_file(pc_file_name).to_pcd_bin2().T
|
| 9 |
-
return pc
|
| 10 |
-
|
| 11 |
-
|
| 12 |
-
def transform_pcs_to_images(
|
| 13 |
-
pc,
|
| 14 |
-
cam2lidar_rotation,
|
| 15 |
-
cam2lidar_translation,
|
| 16 |
-
cam_intrinsic,
|
| 17 |
-
img_shape=None,
|
| 18 |
-
eps=1e-3,
|
| 19 |
-
return_depth=False,
|
| 20 |
-
):
|
| 21 |
-
"""Transform point clouds from LiDAR coordinates to the camera coordinates.
|
| 22 |
-
|
| 23 |
-
Args:
|
| 24 |
-
pc: a numpy array with shape [-1, 6]
|
| 25 |
-
cam_infos: dict of camera information.
|
| 26 |
-
Return:
|
| 27 |
-
pc_cam: dict of 2d coordinates in corresponding camera space.
|
| 28 |
-
"""
|
| 29 |
-
pc_xyz = pc[:, :3]
|
| 30 |
-
|
| 31 |
-
lidar2cam_r = np.linalg.inv(cam2lidar_rotation)
|
| 32 |
-
lidar2cam_t = cam2lidar_translation @ lidar2cam_r.T
|
| 33 |
-
lidar2cam_rt = np.eye(4)
|
| 34 |
-
lidar2cam_rt[:3, :3] = lidar2cam_r.T
|
| 35 |
-
lidar2cam_rt[3, :3] = -lidar2cam_t
|
| 36 |
-
|
| 37 |
-
viewpad = np.eye(4)
|
| 38 |
-
viewpad[: cam_intrinsic.shape[0], : cam_intrinsic.shape[1]] = cam_intrinsic
|
| 39 |
-
lidar2img_rt = viewpad @ lidar2cam_rt.T
|
| 40 |
-
|
| 41 |
-
cur_pc_xyz = np.concatenate([pc_xyz, np.ones_like(pc_xyz)[:, :1]], -1)
|
| 42 |
-
cur_pc_cam = lidar2img_rt @ cur_pc_xyz.T
|
| 43 |
-
cur_pc_cam = cur_pc_cam.T
|
| 44 |
-
|
| 45 |
-
cur_pc_in_fov = cur_pc_cam[:, 2] > eps
|
| 46 |
-
depth = cur_pc_cam[..., 2:3]
|
| 47 |
-
|
| 48 |
-
cur_pc_cam = cur_pc_cam[..., 0:2] / np.maximum(
|
| 49 |
-
cur_pc_cam[..., 2:3], np.ones_like(cur_pc_cam[..., 2:3]) * eps
|
| 50 |
-
)
|
| 51 |
-
if img_shape is not None:
|
| 52 |
-
img_h, img_w = img_shape
|
| 53 |
-
cur_pc_in_fov = (
|
| 54 |
-
cur_pc_in_fov
|
| 55 |
-
& (cur_pc_cam[:, 0] < (img_w - 1))
|
| 56 |
-
& (cur_pc_cam[:, 0] > 0)
|
| 57 |
-
& (cur_pc_cam[:, 1] < (img_h - 1))
|
| 58 |
-
& (cur_pc_cam[:, 1] > 0)
|
| 59 |
-
)
|
| 60 |
-
if return_depth:
|
| 61 |
-
cur_pc_cam = np.concatenate([cur_pc_cam, depth], axis=-1)
|
| 62 |
-
return cur_pc_cam, cur_pc_in_fov
|
| 63 |
-
|
| 64 |
-
|
| 65 |
-
def transform_cam_to_img(pc_cam, cam_intrinsic, img_shape=None, eps=1e-3, return_depth=False):
|
| 66 |
-
"""Transform point clouds from LiDAR coordinates to the camera coordinates.
|
| 67 |
-
|
| 68 |
-
Args:
|
| 69 |
-
pc: a numpy array with shape [-1, 6]
|
| 70 |
-
cam_infos: dict of camera information.
|
| 71 |
-
Return:
|
| 72 |
-
pc_cam: dict of 2d coordinates in corresponding camera space.
|
| 73 |
-
"""
|
| 74 |
-
pc_cam = pc_cam[:, :3]
|
| 75 |
-
|
| 76 |
-
viewpad = np.eye(4)
|
| 77 |
-
viewpad[: cam_intrinsic.shape[0], : cam_intrinsic.shape[1]] = cam_intrinsic
|
| 78 |
-
|
| 79 |
-
pc_img = np.concatenate([pc_cam, np.ones_like(pc_cam)[:, :1]], -1)
|
| 80 |
-
pc_img = viewpad @ pc_img.T
|
| 81 |
-
pc_img = pc_img.T
|
| 82 |
-
|
| 83 |
-
cur_pc_in_fov = pc_img[:, 2] > eps
|
| 84 |
-
depth = pc_img[..., 2:3]
|
| 85 |
-
|
| 86 |
-
pc_img = pc_img[..., 0:2] / np.maximum(pc_img[..., 2:3], np.ones_like(pc_img[..., 2:3]) * eps)
|
| 87 |
-
if img_shape is not None:
|
| 88 |
-
img_h, img_w = img_shape
|
| 89 |
-
cur_pc_in_fov = (
|
| 90 |
-
cur_pc_in_fov
|
| 91 |
-
& (pc_img[:, 0] < (img_w - 1))
|
| 92 |
-
& (pc_img[:, 0] > 0)
|
| 93 |
-
& (pc_img[:, 1] < (img_h - 1))
|
| 94 |
-
& (pc_img[:, 1] > 0)
|
| 95 |
-
)
|
| 96 |
-
if return_depth:
|
| 97 |
-
pc_img = np.concatenate([pc_img, depth], axis=-1)
|
| 98 |
-
return pc_img, cur_pc_in_fov
|
| 99 |
-
|
| 100 |
-
|
| 101 |
-
def transform_nuplan_boxes_to_cam(box, cam2lidar_rotation, cam2lidar_translation):
|
| 102 |
-
"""Transform point clouds from LiDAR coordinates to the camera coordinates.
|
| 103 |
-
|
| 104 |
-
Args:
|
| 105 |
-
box: a numpy array with shape [-1, 7]
|
| 106 |
-
"""
|
| 107 |
-
locs, dims, rots = box[:, :3], box[:, 3:6], box[:, 6:]
|
| 108 |
-
dims_cams = dims[:, [0, 2, 1]] # l, w, h -> l, h, w
|
| 109 |
-
|
| 110 |
-
rots_cam = np.zeros_like(rots)
|
| 111 |
-
for idx, rot in enumerate(rots):
|
| 112 |
-
rot = Quaternion(axis=[0, 0, 1], radians=rot)
|
| 113 |
-
rot = Quaternion(matrix=cam2lidar_rotation).inverse * rot
|
| 114 |
-
rots_cam[idx] = -rot.yaw_pitch_roll[0]
|
| 115 |
-
|
| 116 |
-
lidar2cam_r = np.linalg.inv(cam2lidar_rotation)
|
| 117 |
-
lidar2cam_t = cam2lidar_translation @ lidar2cam_r.T
|
| 118 |
-
lidar2cam_rt = np.eye(4)
|
| 119 |
-
lidar2cam_rt[:3, :3] = lidar2cam_r.T
|
| 120 |
-
lidar2cam_rt[3, :3] = -lidar2cam_t
|
| 121 |
-
|
| 122 |
-
locs_cam = np.concatenate([locs, np.ones_like(locs)[:, :1]], -1) # -1, 4
|
| 123 |
-
locs_cam = lidar2cam_rt.T @ locs_cam.T
|
| 124 |
-
locs_cam = locs_cam.T
|
| 125 |
-
locs_cam = locs_cam[:, :-1]
|
| 126 |
-
return np.concatenate([locs_cam, dims_cams, rots_cam], -1)
|
| 127 |
-
|
| 128 |
-
|
| 129 |
-
def transform_sweep_pc_to_lidar_top(sweep_pc, sensor2lidar_rotation, sensor2lidar_translation):
|
| 130 |
-
sweep_xyz = sweep_pc[:, :3]
|
| 131 |
-
sweep_xyz = np.concatenate([sweep_xyz, np.ones_like(sweep_xyz)[:, :1]], -1)
|
| 132 |
-
|
| 133 |
-
sensor2lidar_rt = np.eye(4)
|
| 134 |
-
sensor2lidar_rt[:3, :3] = sensor2lidar_rotation.T
|
| 135 |
-
sensor2lidar_rt[3, :3] = sensor2lidar_translation
|
| 136 |
-
|
| 137 |
-
sweep_xyz = sweep_xyz @ sensor2lidar_rt
|
| 138 |
-
return sweep_xyz[:, :3]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
openscene-v1.1/process_data/helpers/viz.py
DELETED
|
@@ -1,93 +0,0 @@
|
|
| 1 |
-
import cv2
|
| 2 |
-
import numpy as np
|
| 3 |
-
|
| 4 |
-
from navsim.common.extraction.helpers import transformation
|
| 5 |
-
|
| 6 |
-
|
| 7 |
-
def draw_pcs_on_images(pc, cam_infos, eps=1e-3):
|
| 8 |
-
for cam_type, cam_info in cam_infos.items():
|
| 9 |
-
cur_img_path = cam_info["data_path"]
|
| 10 |
-
cur_img = cv2.imread(cur_img_path)
|
| 11 |
-
cur_img_h, cur_img_w = cur_img.shape[:2]
|
| 12 |
-
|
| 13 |
-
cur_pc_cam, cur_pc_in_fov = transformation.transform_pcs_to_images(
|
| 14 |
-
pc,
|
| 15 |
-
cam_info["sensor2lidar_rotation"],
|
| 16 |
-
cam_info["sensor2lidar_translation"],
|
| 17 |
-
cam_info["cam_intrinsic"],
|
| 18 |
-
img_shape=(cur_img_h, cur_img_w),
|
| 19 |
-
eps=eps,
|
| 20 |
-
)
|
| 21 |
-
|
| 22 |
-
cur_pc_cam = cur_pc_cam[cur_pc_in_fov]
|
| 23 |
-
for x, y in cur_pc_cam:
|
| 24 |
-
cv2.circle(cur_img, (int(x), int(y)), 3, (255, 0, 0), 3)
|
| 25 |
-
cv2.imwrite(f"dbg/{cam_type}.png", cur_img)
|
| 26 |
-
return None
|
| 27 |
-
|
| 28 |
-
|
| 29 |
-
def draw_pcs(points, labels, color_map):
|
| 30 |
-
"""Draw point cloud from BEV
|
| 31 |
-
|
| 32 |
-
Args:
|
| 33 |
-
points: A ndarray with shape as [-1, 3]
|
| 34 |
-
labels: the label of each point with shape [-1]
|
| 35 |
-
color_map: color of each label.
|
| 36 |
-
"""
|
| 37 |
-
import matplotlib.pyplot as plt
|
| 38 |
-
|
| 39 |
-
_, ax = plt.subplots(1, 1, figsize=(9, 9))
|
| 40 |
-
axes_limit = 40
|
| 41 |
-
# points: LiDAR points with shape [-1, 3]
|
| 42 |
-
viz_points = points
|
| 43 |
-
dists = np.sqrt(np.sum(viz_points[:, :2] ** 2, axis=1))
|
| 44 |
-
colors = np.minimum(1, dists / axes_limit / np.sqrt(2))
|
| 45 |
-
|
| 46 |
-
# prepare color_map
|
| 47 |
-
points_color = color_map[labels] / 255.0 # -1, 3
|
| 48 |
-
|
| 49 |
-
point_scale = 0.2
|
| 50 |
-
scatter = ax.scatter(viz_points[:, 0], viz_points[:, 1], c=points_color, s=point_scale)
|
| 51 |
-
|
| 52 |
-
ax.plot(0, 0, "x", color="red")
|
| 53 |
-
ax.set_xlim(-axes_limit, axes_limit)
|
| 54 |
-
ax.set_ylim(-axes_limit, axes_limit)
|
| 55 |
-
ax.axis("off")
|
| 56 |
-
ax.set_aspect("equal")
|
| 57 |
-
plt.savefig("dbg/dbg.png", bbox_inches="tight", pad_inches=0, dpi=200)
|
| 58 |
-
|
| 59 |
-
|
| 60 |
-
def draw_sweep_pcs(info, sweeps):
|
| 61 |
-
cur_pc_file = info["lidar_path"]
|
| 62 |
-
cur_pc = transformation._load_points(cur_pc_file)
|
| 63 |
-
viz_pcs = [cur_pc[:, :3]]
|
| 64 |
-
viz_labels = [np.ones_like(cur_pc)[:, 0] * 0.0]
|
| 65 |
-
|
| 66 |
-
for idx, sweep in enumerate(sweeps):
|
| 67 |
-
sweep_pc_file = sweep["data_path"]
|
| 68 |
-
sweep_pc = transformation._load_points(sweep_pc_file)
|
| 69 |
-
sweep_pc = transformation.transform_sweep_pc_to_lidar_top(
|
| 70 |
-
sweep_pc, sweep["sensor2lidar_rotation"], sweep["sensor2lidar_translation"]
|
| 71 |
-
)
|
| 72 |
-
|
| 73 |
-
viz_pcs.append(sweep_pc)
|
| 74 |
-
viz_labels.append(np.ones_like(sweep_pc)[:, 0] * (idx + 1))
|
| 75 |
-
|
| 76 |
-
viz_pcs = np.concatenate(viz_pcs, 0)
|
| 77 |
-
viz_labels = np.concatenate(viz_labels, 0).astype(np.int)
|
| 78 |
-
color_map = np.array(
|
| 79 |
-
[
|
| 80 |
-
[245, 150, 100],
|
| 81 |
-
[245, 230, 100],
|
| 82 |
-
[250, 80, 100],
|
| 83 |
-
[150, 60, 30],
|
| 84 |
-
[255, 0, 0],
|
| 85 |
-
[180, 30, 80],
|
| 86 |
-
[255, 0, 0],
|
| 87 |
-
[30, 30, 255],
|
| 88 |
-
[200, 40, 255],
|
| 89 |
-
[90, 30, 150],
|
| 90 |
-
]
|
| 91 |
-
)
|
| 92 |
-
draw_pcs(viz_pcs, viz_labels, color_map)
|
| 93 |
-
return None
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
openscene-v1.1/process_data/helpers/viz_box.py
DELETED
|
@@ -1,102 +0,0 @@
|
|
| 1 |
-
import numpy as np
|
| 2 |
-
|
| 3 |
-
from matplotlib.lines import Line2D
|
| 4 |
-
|
| 5 |
-
|
| 6 |
-
def rotz(t):
|
| 7 |
-
"""Rotation about the z-axis."""
|
| 8 |
-
c = np.cos(t)
|
| 9 |
-
s = np.sin(t)
|
| 10 |
-
return np.array([[c, -s, 0], [s, c, 0], [0, 0, 1]])
|
| 11 |
-
|
| 12 |
-
|
| 13 |
-
def draw_box_and_pc(points, boxes, names=None):
|
| 14 |
-
import matplotlib.pyplot as plt
|
| 15 |
-
|
| 16 |
-
_, ax = plt.subplots(1, 1, figsize=(9, 9))
|
| 17 |
-
axes_limit = 40
|
| 18 |
-
# points: LiDAR points with shape [-1, 3]
|
| 19 |
-
viz_points = points
|
| 20 |
-
dists = np.sqrt(np.sum(viz_points[:, :2] ** 2, axis=1))
|
| 21 |
-
colors = np.minimum(1, dists / axes_limit / np.sqrt(2))
|
| 22 |
-
|
| 23 |
-
# prepare color_map
|
| 24 |
-
points_color = np.array([[245, 150, 100]]).reshape(1, 3).repeat(points.shape[0], 0)
|
| 25 |
-
points_color = points_color / 255.0
|
| 26 |
-
point_scale = 0.2
|
| 27 |
-
scatter = ax.scatter(viz_points[:, 0], viz_points[:, 1], c=points_color, s=point_scale)
|
| 28 |
-
|
| 29 |
-
ax.plot(0, 0, "x", color="red")
|
| 30 |
-
ax.set_xlim(-axes_limit, axes_limit)
|
| 31 |
-
ax.set_ylim(-axes_limit, axes_limit)
|
| 32 |
-
|
| 33 |
-
if names is None:
|
| 34 |
-
draw_box_3d(boxes, ax)
|
| 35 |
-
else:
|
| 36 |
-
color_map = {
|
| 37 |
-
"vehicle": "green",
|
| 38 |
-
"bicycle": "blue",
|
| 39 |
-
"pedestrian": "red",
|
| 40 |
-
"traffic_cone": "darkred",
|
| 41 |
-
"barrier": "peru",
|
| 42 |
-
"czone_sign": "pink",
|
| 43 |
-
"generic_object": "black",
|
| 44 |
-
}
|
| 45 |
-
colors = [color_map[name] for name in names]
|
| 46 |
-
draw_box_3d(boxes, ax, linestyle="--", colors=colors)
|
| 47 |
-
|
| 48 |
-
ax.axis("off")
|
| 49 |
-
ax.set_aspect("equal")
|
| 50 |
-
plt.savefig("dbg/dbg.png", bbox_inches="tight", pad_inches=0, dpi=200)
|
| 51 |
-
|
| 52 |
-
|
| 53 |
-
def draw_box_3d(boxes, ax, linestyle="--", colors="cyan"):
|
| 54 |
-
for o in range(len(boxes)):
|
| 55 |
-
color = colors[o]
|
| 56 |
-
obj = boxes[o]
|
| 57 |
-
c_x, c_y, c_z, l, w, h, rz = obj[0], obj[1], obj[2], obj[3], obj[4], obj[5], obj[6]
|
| 58 |
-
|
| 59 |
-
R = rotz(rz)
|
| 60 |
-
|
| 61 |
-
# 3d bounding box corners
|
| 62 |
-
x_corners = [l / 2, l / 2, -l / 2, -l / 2, l / 2, l / 2, -l / 2, -l / 2]
|
| 63 |
-
y_corners = [w / 2, -w / 2, -w / 2, w / 2, w / 2, -w / 2, -w / 2, w / 2]
|
| 64 |
-
z_corners = [h, h, h, h, 0, 0, 0, 0]
|
| 65 |
-
|
| 66 |
-
# rotate and translate 3d bounding box
|
| 67 |
-
corners_3d = np.dot(R, np.vstack([x_corners, y_corners, z_corners]))
|
| 68 |
-
|
| 69 |
-
corners_3d[0, :] = corners_3d[0, :] + c_x
|
| 70 |
-
corners_3d[1, :] = corners_3d[1, :] + c_y
|
| 71 |
-
corners_3d[2, :] = corners_3d[2, :] + c_z
|
| 72 |
-
|
| 73 |
-
corners_3d_velo = np.transpose(corners_3d)
|
| 74 |
-
|
| 75 |
-
x1, x2, x3, x4 = corners_3d_velo[0:4, 0]
|
| 76 |
-
y1, y2, y3, y4 = corners_3d_velo[0:4, 1]
|
| 77 |
-
|
| 78 |
-
polygon = np.zeros([5, 2], dtype=np.float32)
|
| 79 |
-
polygon[0, 0] = x1
|
| 80 |
-
polygon[1, 0] = x2
|
| 81 |
-
polygon[2, 0] = x3
|
| 82 |
-
polygon[3, 0] = x4
|
| 83 |
-
polygon[4, 0] = x1
|
| 84 |
-
|
| 85 |
-
polygon[0, 1] = y1
|
| 86 |
-
polygon[1, 1] = y2
|
| 87 |
-
polygon[2, 1] = y3
|
| 88 |
-
polygon[3, 1] = y4
|
| 89 |
-
polygon[4, 1] = y1
|
| 90 |
-
|
| 91 |
-
line1 = [(x1, y1), (x2, y2)]
|
| 92 |
-
line2 = [(x2, y2), (x3, y3)]
|
| 93 |
-
line3 = [(x3, y3), (x4, y4)]
|
| 94 |
-
line4 = [(x4, y4), (x1, y1)]
|
| 95 |
-
(line1_xs, line1_ys) = zip(*line1)
|
| 96 |
-
(line2_xs, line2_ys) = zip(*line2)
|
| 97 |
-
(line3_xs, line3_ys) = zip(*line3)
|
| 98 |
-
(line4_xs, line4_ys) = zip(*line4)
|
| 99 |
-
ax.add_line(Line2D(line1_xs, line1_ys, linewidth=1, linestyle=linestyle, color=color))
|
| 100 |
-
ax.add_line(Line2D(line2_xs, line2_ys, linewidth=1, linestyle=linestyle, color=color))
|
| 101 |
-
ax.add_line(Line2D(line3_xs, line3_ys, linewidth=1, linestyle=linestyle, color=color))
|
| 102 |
-
ax.add_line(Line2D(line4_xs, line4_ys, linewidth=1, linestyle=linestyle, color=color))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
openscene-v1.1/process_data/helpers/viz_box_2d.py
DELETED
|
@@ -1,135 +0,0 @@
|
|
| 1 |
-
"""
|
| 2 |
-
Visualize 3D boxes in Image space.
|
| 3 |
-
Align the setting in mmdetection3d:
|
| 4 |
-
* Convert 3D box in nuplan coordinates to camera coordinates.
|
| 5 |
-
* draw 3D box in camera.
|
| 6 |
-
"""
|
| 7 |
-
|
| 8 |
-
import cv2
|
| 9 |
-
import numpy as np
|
| 10 |
-
from navsim.common.extraction.helpers import transformation
|
| 11 |
-
|
| 12 |
-
|
| 13 |
-
def rotation_3d_in_axis(points, angles, axis=0):
|
| 14 |
-
"""Rotate points by angles according to axis.
|
| 15 |
-
|
| 16 |
-
Args:
|
| 17 |
-
points (torch.Tensor): Points of shape (N, M, 3).
|
| 18 |
-
angles (torch.Tensor): Vector of angles in shape (N,)
|
| 19 |
-
axis (int, optional): The axis to be rotated. Defaults to 0.
|
| 20 |
-
|
| 21 |
-
Raises:
|
| 22 |
-
ValueError: when the axis is not in range [0, 1, 2], it will \
|
| 23 |
-
raise value error.
|
| 24 |
-
|
| 25 |
-
Returns:
|
| 26 |
-
torch.Tensor: Rotated points in shape (N, M, 3)
|
| 27 |
-
"""
|
| 28 |
-
rot_sin = np.sin(angles)
|
| 29 |
-
rot_cos = np.cos(angles)
|
| 30 |
-
ones = np.ones_like(rot_cos)
|
| 31 |
-
zeros = np.zeros_like(rot_cos)
|
| 32 |
-
if axis == 1:
|
| 33 |
-
rot_mat_T = np.stack(
|
| 34 |
-
[
|
| 35 |
-
np.stack([rot_cos, zeros, -rot_sin]),
|
| 36 |
-
np.stack([zeros, ones, zeros]),
|
| 37 |
-
np.stack([rot_sin, zeros, rot_cos]),
|
| 38 |
-
]
|
| 39 |
-
)
|
| 40 |
-
elif axis == 2 or axis == -1:
|
| 41 |
-
rot_mat_T = np.stack(
|
| 42 |
-
[
|
| 43 |
-
np.stack([rot_cos, -rot_sin, zeros]),
|
| 44 |
-
np.stack([rot_sin, rot_cos, zeros]),
|
| 45 |
-
np.stack([zeros, zeros, ones]),
|
| 46 |
-
]
|
| 47 |
-
)
|
| 48 |
-
elif axis == 0:
|
| 49 |
-
rot_mat_T = np.stack(
|
| 50 |
-
[
|
| 51 |
-
np.stack([zeros, rot_cos, -rot_sin]),
|
| 52 |
-
np.stack([zeros, rot_sin, rot_cos]),
|
| 53 |
-
np.stack([ones, zeros, zeros]),
|
| 54 |
-
]
|
| 55 |
-
)
|
| 56 |
-
else:
|
| 57 |
-
raise ValueError(f"axis should in range [0, 1, 2], got {axis}")
|
| 58 |
-
return np.einsum("aij,jka->aik", points, rot_mat_T)
|
| 59 |
-
|
| 60 |
-
|
| 61 |
-
def plot_rect3d_on_img(img, num_rects, rect_corners, color=(0, 255, 0), thickness=1):
|
| 62 |
-
"""Plot the boundary lines of 3D rectangular on 2D images.
|
| 63 |
-
|
| 64 |
-
Args:
|
| 65 |
-
img (numpy.array): The numpy array of image.
|
| 66 |
-
num_rects (int): Number of 3D rectangulars.
|
| 67 |
-
rect_corners (numpy.array): Coordinates of the corners of 3D
|
| 68 |
-
rectangulars. Should be in the shape of [num_rect, 8, 2].
|
| 69 |
-
color (tuple[int]): The color to draw bboxes. Default: (0, 255, 0).
|
| 70 |
-
thickness (int, optional): The thickness of bboxes. Default: 1.
|
| 71 |
-
"""
|
| 72 |
-
line_indices = (
|
| 73 |
-
(0, 1),
|
| 74 |
-
(0, 3),
|
| 75 |
-
(0, 4),
|
| 76 |
-
(1, 2),
|
| 77 |
-
(1, 5),
|
| 78 |
-
(3, 2),
|
| 79 |
-
(3, 7),
|
| 80 |
-
(4, 5),
|
| 81 |
-
(4, 7),
|
| 82 |
-
(2, 6),
|
| 83 |
-
(5, 6),
|
| 84 |
-
(6, 7),
|
| 85 |
-
)
|
| 86 |
-
for i in range(num_rects):
|
| 87 |
-
corners = rect_corners[i].astype(np.int)
|
| 88 |
-
for start, end in line_indices:
|
| 89 |
-
cv2.line(
|
| 90 |
-
img,
|
| 91 |
-
(corners[start, 0], corners[start, 1]),
|
| 92 |
-
(corners[end, 0], corners[end, 1]),
|
| 93 |
-
color,
|
| 94 |
-
thickness,
|
| 95 |
-
cv2.LINE_AA,
|
| 96 |
-
)
|
| 97 |
-
return img.astype(np.uint8)
|
| 98 |
-
|
| 99 |
-
|
| 100 |
-
def draw_boxes_nuplan_on_img(gt_boxes_nuplan, cam_infos, eps=1e-3):
|
| 101 |
-
for cam_type, cam_info in cam_infos.items():
|
| 102 |
-
cur_img_path = cam_info["data_path"]
|
| 103 |
-
cur_img = cv2.imread(cur_img_path)
|
| 104 |
-
cur_img_h, cur_img_w = cur_img.shape[:2]
|
| 105 |
-
|
| 106 |
-
gt_boxes_cams = transformation.transform_nuplan_boxes_to_cam(
|
| 107 |
-
gt_boxes_nuplan,
|
| 108 |
-
cam_info["sensor2lidar_rotation"],
|
| 109 |
-
cam_info["sensor2lidar_translation"],
|
| 110 |
-
)
|
| 111 |
-
|
| 112 |
-
# Then convert gt_boxes_cams to corners.
|
| 113 |
-
cur_locs, cur_dims, cur_rots = (
|
| 114 |
-
gt_boxes_cams[:, :3],
|
| 115 |
-
gt_boxes_cams[:, 3:6],
|
| 116 |
-
gt_boxes_cams[:, 6:],
|
| 117 |
-
)
|
| 118 |
-
corners_norm = np.stack(np.unravel_index(np.arange(8), [2] * 3), axis=1)
|
| 119 |
-
corners_norm = corners_norm[[0, 1, 3, 2, 4, 5, 7, 6]]
|
| 120 |
-
corners_norm = corners_norm - np.array([0.5, 0.5, 0.5])
|
| 121 |
-
corners = cur_dims.reshape([-1, 1, 3]) * corners_norm.reshape([1, 8, 3])
|
| 122 |
-
corners = rotation_3d_in_axis(corners, cur_rots.squeeze(-1), axis=1)
|
| 123 |
-
corners += cur_locs.reshape(-1, 1, 3)
|
| 124 |
-
|
| 125 |
-
# Then draw project corners to image.
|
| 126 |
-
corners_img, corners_pc_in_fov = transformation.transform_cam_to_img(
|
| 127 |
-
corners.reshape(-1, 3), cam_info["cam_intrinsic"], img_shape=(cur_img_h, cur_img_w)
|
| 128 |
-
)
|
| 129 |
-
corners_img = corners_img.reshape(-1, 8, 2)
|
| 130 |
-
corners_pc_in_fov = corners_pc_in_fov.reshape(-1, 8)
|
| 131 |
-
valid_corners = corners_pc_in_fov.all(-1)
|
| 132 |
-
corners_img = corners_img[valid_corners]
|
| 133 |
-
cur_img = plot_rect3d_on_img(cur_img, len(corners_img), corners_img)
|
| 134 |
-
cv2.imwrite(f"dbg/{cam_type}.png", cur_img)
|
| 135 |
-
return None
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
openscene-v1.1/process_data/process.sh
DELETED
|
@@ -1,32 +0,0 @@
|
|
| 1 |
-
# Scripts for processing nuplan data.
|
| 2 |
-
# export PYTHONPATH=/cpfs01/user/yangzetong/code/workshop_codes/nuplan-devkit:${PYTHONPATH}
|
| 3 |
-
|
| 4 |
-
|
| 5 |
-
# export NUPLAN_DATA_ROOT="$HOME/nuplan/dataset/"
|
| 6 |
-
# export NUPLAN_MAPS_ROOT="$HOME/nuplan/dataset/maps"
|
| 7 |
-
# export NUPLAN_EXP_ROOT="$HOME/nuplan/exp"
|
| 8 |
-
# export NUPLAN_DEVKIT_ROOT="$HOME/nuplan-devkit/"
|
| 9 |
-
|
| 10 |
-
# 0. Design nuPlan data path, and output data path.
|
| 11 |
-
NUPLAN_PATH=$HOME/nuplan/dataset/nuplan-v1.1/
|
| 12 |
-
NUPLAN_DB_PATH=$NUPLAN_PATH/splits/mini
|
| 13 |
-
NUPLAN_SENSOR_PATH=${NUPLAN_PATH}/sensor_blobs
|
| 14 |
-
NUPLAN_MAP_VERSION=nuplan-maps-v1.0
|
| 15 |
-
NUPLAN_MAPS_ROOT=${HOME}/nuplan/dataset/maps
|
| 16 |
-
|
| 17 |
-
OUT_DIR=/home/daniel/navsim_logs/mini
|
| 18 |
-
|
| 19 |
-
# 1. TODO: Generate train/val pickle.
|
| 20 |
-
python $NAVSIM_DEVKIT_ROOT/navsim/common/extraction/create_nuplan_data_with_vis.py \
|
| 21 |
-
--nuplan-root-path ${NUPLAN_PATH} \
|
| 22 |
-
--nuplan-db-path ${NUPLAN_DB_PATH} \
|
| 23 |
-
--nuplan-sensor-path ${NUPLAN_SENSOR_PATH} \
|
| 24 |
-
--nuplan-map-version ${NUPLAN_MAP_VERSION} \
|
| 25 |
-
--nuplan-map-root ${NUPLAN_MAPS_ROOT} \
|
| 26 |
-
--out-dir ${OUT_DIR} \
|
| 27 |
-
--split mini \
|
| 28 |
-
--thread-num 16
|
| 29 |
-
|
| 30 |
-
# 2. TODO: Extract Data Files from the generated pickle file.
|
| 31 |
-
|
| 32 |
-
# 3. TODO: Soft-link to some place similar to https://github.com/OpenDriveLab/OpenScene/blob/main/docs/dataset_stats.md#filesystem-hierarchy
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|