|
|
""" |
|
|
CHIP Dataset Usage Example |
|
|
|
|
|
This script demonstrates how to load and visualize data from the CHIP dataset, |
|
|
including RGB images, depth maps, camera parameters, and 3D object models. |
|
|
|
|
|
Requirements: |
|
|
pip install datasets huggingface_hub numpy opencv-python open3d torch |
|
|
""" |
|
|
|
|
|
import json |
|
|
import os |
|
|
import gc |
|
|
from typing import Tuple, Optional |
|
|
|
|
|
import numpy as np |
|
|
import cv2 |
|
|
import open3d as o3d |
|
|
import torch |
|
|
from datasets import load_dataset, get_dataset_infos |
|
|
from huggingface_hub import snapshot_download |
|
|
|
|
|
|
|
|
def lift_point_cloud( |
|
|
depth: torch.Tensor, |
|
|
camera_intrinsics: torch.Tensor, |
|
|
xy_indices: Optional[Tuple[torch.Tensor, torch.Tensor]] = None |
|
|
) -> torch.Tensor: |
|
|
""" |
|
|
Lift a depth image to a 3D point cloud using camera intrinsics. |
|
|
|
|
|
Args: |
|
|
depth: Depth image tensor of shape (H, W, C) where C >= 1. |
|
|
If C > 1, channels 1+ are treated as features (e.g., RGB). |
|
|
camera_intrinsics: Flattened camera intrinsic matrix [fx, 0, cx, 0, fy, cy, 0, 0, 1]. |
|
|
xy_indices: Optional tuple of (x_coords, y_coords) to lift only specific pixels. |
|
|
|
|
|
Returns: |
|
|
Point cloud tensor of shape (N, 3+F) where F is the number of feature channels. |
|
|
First 3 columns are XYZ coordinates, remaining columns are features. |
|
|
""" |
|
|
H, W, num_channels = depth.shape |
|
|
depth_values = depth[:, :, 0] |
|
|
|
|
|
if xy_indices is not None: |
|
|
x_coords, y_coords = xy_indices |
|
|
x_coords = x_coords.to(depth_values.device).float() |
|
|
y_coords = y_coords.to(depth_values.device).float() |
|
|
z_coords = depth_values[y_coords.long(), x_coords.long()] |
|
|
else: |
|
|
|
|
|
x_grid, y_grid = np.meshgrid( |
|
|
np.arange(W, dtype=np.float32), |
|
|
np.arange(H, dtype=np.float32), |
|
|
indexing='xy' |
|
|
) |
|
|
x_coords = torch.from_numpy(x_grid).flatten().to(depth_values.device) |
|
|
y_coords = torch.from_numpy(y_grid).flatten().to(depth_values.device) |
|
|
z_coords = depth_values.flatten() |
|
|
|
|
|
|
|
|
fx, fy = camera_intrinsics[0], camera_intrinsics[4] |
|
|
cx, cy = camera_intrinsics[2], camera_intrinsics[5] |
|
|
|
|
|
|
|
|
x_3d = (x_coords - cx) * z_coords / fx |
|
|
y_3d = (y_coords - cy) * z_coords / fy |
|
|
points_3d = torch.stack([x_3d, y_3d, z_coords], dim=1) |
|
|
|
|
|
|
|
|
if num_channels > 1: |
|
|
features = depth[y_coords.long(), x_coords.long(), 1:] |
|
|
if xy_indices is None: |
|
|
features = features.reshape(H * W, num_channels - 1) |
|
|
points_3d = torch.cat([points_3d, features], dim=1) |
|
|
|
|
|
return points_3d |
|
|
|
|
|
|
|
|
def back_project_rgbd( |
|
|
rgb: np.ndarray, |
|
|
depth: np.ndarray, |
|
|
camera_intrinsics: np.ndarray |
|
|
) -> torch.Tensor: |
|
|
""" |
|
|
Back-project RGB-D image to a colored point cloud. |
|
|
|
|
|
Args: |
|
|
rgb: RGB image array of shape (H, W, 3). |
|
|
depth: Depth map array of shape (H, W) with values in meters. |
|
|
camera_intrinsics: Flattened 3x3 camera intrinsic matrix. |
|
|
|
|
|
Returns: |
|
|
Point cloud tensor of shape (N, 6) with XYZ and RGB columns. |
|
|
""" |
|
|
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') |
|
|
|
|
|
|
|
|
valid_rows, valid_cols = np.where(depth > 0) |
|
|
xy_indices = torch.tensor( |
|
|
np.stack([valid_cols, valid_rows]), |
|
|
dtype=torch.long, |
|
|
device=device |
|
|
) |
|
|
|
|
|
|
|
|
depth_rgb = torch.cat([ |
|
|
torch.from_numpy(depth).unsqueeze(-1), |
|
|
torch.from_numpy(rgb) |
|
|
], dim=2).to(device) |
|
|
|
|
|
|
|
|
camera_tensor = torch.from_numpy(camera_intrinsics).to(device) |
|
|
point_cloud = lift_point_cloud(depth_rgb, camera_tensor, tuple(xy_indices)) |
|
|
|
|
|
return point_cloud |
|
|
|
|
|
|
|
|
def visualize_chip_sample( |
|
|
repo_id: str = "FBK-TeV/CHIP", |
|
|
target_dir: str = "./chip_data", |
|
|
num_samples: int = 1, |
|
|
show_2d: bool = False |
|
|
) -> None: |
|
|
""" |
|
|
Load and visualize samples from the CHIP dataset. |
|
|
|
|
|
Args: |
|
|
repo_id: Hugging Face dataset repository ID. |
|
|
target_dir: Local directory to store downloaded model files. |
|
|
num_samples: Number of samples to visualize. |
|
|
show_2d: If True, display RGB and depth images in OpenCV windows. |
|
|
If False, only show 3D point cloud visualization. |
|
|
""" |
|
|
|
|
|
info = get_dataset_infos(repo_id) |
|
|
print(f"Dataset info: {info}\n") |
|
|
|
|
|
|
|
|
print("Downloading 3D models...") |
|
|
local_path = snapshot_download( |
|
|
repo_id=repo_id, |
|
|
repo_type="dataset", |
|
|
local_dir=target_dir, |
|
|
allow_patterns=["models/*"] |
|
|
) |
|
|
print(f"Models downloaded to: {local_path}\n") |
|
|
|
|
|
|
|
|
dataset = load_dataset(repo_id, streaming=True) |
|
|
|
|
|
for idx, example in enumerate(dataset['test'].take(num_samples)): |
|
|
print(f"Processing sample {idx + 1}/{num_samples}...") |
|
|
|
|
|
|
|
|
rgb_image = np.array(example['image']) |
|
|
rgb_bgr = cv2.cvtColor(rgb_image, cv2.COLOR_RGB2BGR) |
|
|
|
|
|
|
|
|
depth_map = np.array(example['depth'], dtype=np.uint16).astype(np.float32) |
|
|
|
|
|
|
|
|
depth_vis = cv2.normalize(depth_map, None, 0, 255, cv2.NORM_MINMAX) |
|
|
depth_vis = depth_vis.astype(np.uint8) |
|
|
|
|
|
|
|
|
camera_params = json.loads(example['camera_params']) |
|
|
intrinsics_matrix = np.array(camera_params['cam_K']).reshape(3, 3) |
|
|
depth_scale = camera_params['depth_scale'] |
|
|
|
|
|
print(f"Camera intrinsics:\n{intrinsics_matrix}") |
|
|
print(f"Depth scale: {depth_scale}") |
|
|
|
|
|
|
|
|
labels = json.loads(example['labels']) |
|
|
label = labels[0] |
|
|
|
|
|
rotation_matrix = np.array(label['cam_R_m2c_flat']).reshape(3, 3) |
|
|
translation_vector = np.array(label['cam_t_m2c']) |
|
|
bbox = [ |
|
|
label['bbox_x'], |
|
|
label['bbox_y'], |
|
|
label['bbox_width'], |
|
|
label['bbox_height'] |
|
|
] |
|
|
|
|
|
print(f"\nObject ID: {label['obj_id']}") |
|
|
print(f"Rotation matrix:\n{rotation_matrix}") |
|
|
print(f"Translation vector: {translation_vector}") |
|
|
print(f"Bounding box (x, y, w, h): {bbox}\n") |
|
|
|
|
|
|
|
|
if show_2d: |
|
|
x, y, w, h = bbox |
|
|
rgb_with_bbox = rgb_bgr.copy() |
|
|
cv2.rectangle(rgb_with_bbox, (x, y), (x + w, y + h), (0, 255, 0), 2) |
|
|
|
|
|
cv2.imshow("RGB Image", rgb_with_bbox) |
|
|
cv2.imshow("Depth Map", depth_vis) |
|
|
print("Displaying 2D images (press any key to continue to 3D)...") |
|
|
cv2.waitKey(0) |
|
|
|
|
|
|
|
|
depth_metric = depth_map * depth_scale |
|
|
point_cloud = back_project_rgbd(rgb_image, depth_metric, intrinsics_matrix.flatten()) |
|
|
|
|
|
|
|
|
pcd_o3d = o3d.geometry.PointCloud() |
|
|
pcd_o3d.points = o3d.utility.Vector3dVector(point_cloud[:, :3].cpu().numpy()) |
|
|
if point_cloud.shape[1] > 3: |
|
|
pcd_o3d.colors = o3d.utility.Vector3dVector( |
|
|
point_cloud[:, 3:].cpu().numpy() / 255.0 |
|
|
) |
|
|
|
|
|
|
|
|
model_path = os.path.join(target_dir, "models", f"obj_{label['obj_id']:06d}.ply") |
|
|
model_mesh = o3d.io.read_triangle_mesh(model_path) |
|
|
model_mesh.paint_uniform_color([1.0, 0.0, 0.0]) |
|
|
|
|
|
|
|
|
pose_matrix = np.eye(4) |
|
|
pose_matrix[:3, :3] = rotation_matrix |
|
|
pose_matrix[:3, 3] = translation_vector |
|
|
model_mesh.transform(pose_matrix) |
|
|
|
|
|
|
|
|
print("Displaying 3D visualization (close window to continue)...") |
|
|
o3d.visualization.draw_geometries( |
|
|
[pcd_o3d, model_mesh], |
|
|
window_name=f"CHIP Sample {idx + 1}: Scene + Model (Red)" |
|
|
) |
|
|
|
|
|
if show_2d: |
|
|
cv2.destroyAllWindows() |
|
|
else: |
|
|
|
|
|
cv2.waitKey(100) |
|
|
|
|
|
|
|
|
del dataset |
|
|
gc.collect() |
|
|
print("\nVisualization complete!") |
|
|
|
|
|
|
|
|
if __name__ == "__main__": |
|
|
|
|
|
|
|
|
|
|
|
visualize_chip_sample( |
|
|
repo_id="FBK-TeV/CHIP", |
|
|
target_dir="./chip_data", |
|
|
num_samples=1, |
|
|
show_2d=True |
|
|
) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|