-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathkinect_wrapper.py
More file actions
executable file
·79 lines (69 loc) · 2.43 KB
/
kinect_wrapper.py
File metadata and controls
executable file
·79 lines (69 loc) · 2.43 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
import open3d as o3d
import json
from capture import Capture
import numpy as np
import os
def _get_intrinsics(config):
recorder = o3d.io.AzureKinectRecorder(config, 0)
recorder.init_sensor()
recorder.open_record('tmp.mkv')
f = None
while f is None:
f = recorder.record_frame(True, True)
recorder.close_record()
del recorder
reader = o3d.io.AzureKinectMKVReader()
reader.open('tmp.mkv')
metadata = reader.get_metadata()
o3d.io.write_azure_kinect_mkv_metadata('tmp.json', metadata)
f = open('tmp.json')
info = json.load(f)
mat = info['intrinsic_matrix']
fx, fy, cx, cy = mat[0], mat[4], mat[6], mat[7]
height = info['height']
width = info['width']
os.remove('tmp.mkv')
os.remove('tmp.json')
return o3d.camera.PinholeCameraIntrinsic(width, height, fx, fy, cx, cy)
def _to_rgb(bgr):
b = np.copy(bgr[:, 0])
r = bgr[:, 2]
bgr[:, 0] = r
bgr[:, 2] = b
return bgr
class KinectWrapper(object):
def __init__(self):
self.config = o3d.io.AzureKinectSensorConfig(
{
'color_resolution': 'K4A_COLOR_RESOLUTION_3072P',
'depth_mode': 'K4A_DEPTH_MODE_NFOV_UNBINNED',
'synchronized_images_only': 'true',
'camera_fps': 'K4A_FRAMES_PER_SECOND_15'
}
)
self.cam_to_world = np.linalg.inv(_get_intrinsics(self.config).intrinsic_matrix)
self.ka = o3d.io.AzureKinectSensor(self.config)
self.ka.connect(0)
def get_capture(self):
f = None
while f is None:
f = self.ka.capture_frame(True)
return Capture.create_from_kinect_capture(f, np.copy(self.cam_to_world))
def get_verts_and_colors(self):
cap = self.ka.get_capture()
while not (np.any(cap.depth) and np.any(cap.color)):
cap = self.ka.get_capture()
points = cap.depth_point_cloud.reshape(-1, 3)
colors = ka.color_image_to_depth_camera(cap.color, cap.depth, self.ka.calibration, True)
colors = colors.reshape(-1, 4)
colors = _to_rgb(colors)*(1.0/255)
return points*0.05, colors
def get_point_cloud(self):
points, colors = self.get_verts_and_colors()
pc = o3d.geometry.PointCloud(
o3d.utility.Vector3dVector(
points
)
)
pc.colors = o3d.utility.Vector3dVector(colors[:,:3])
return pc