-
Notifications
You must be signed in to change notification settings - Fork 0
/
Plane_estimation.py
148 lines (106 loc) · 4.09 KB
/
Plane_estimation.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
#!/usr/bin/env python
# coding: utf-8
import glob
import time
import cv2
import cv2 as cv
import numpy as np
import pypuclib
from pypuclib import (PUC_DATA_MODE, Camera, CameraFactory, Decoder,
PUCException, Resolution, XferData)
class plane_estimation:
def __init__(self):
self.x = 0
self.y = 0
self.w = 1246
self.h = 624
self.square_size = 1.5 # 正方形の1辺のサイズ[cm]
self.pattern_size = (3, 4) # 交差ポイントの数(チェッカーのマスの数ではない)
self.pattern_points = np.zeros(
(np.prod(self.pattern_size), 3), np.float64
)
self.pattern_points[:, :2] = np.indices(self.pattern_size).T.reshape(
-1, 2
)
self.pattern_points *= self.square_size
# 推定した位置姿勢を分かりやすく可視化するための仮想的な物体
self.cube = np.float64(
[
[0, 0, 0],
[0, 3, 0],
[3, 3, 0],
[3, 0, 0],
[0, 0, -3],
[0, 3, -3],
[3, 3, -3],
[3, 0, -3],
]
)
self.mtx = np.load("../Calibration/data/A/mtx.npy")
self.dist = np.load("../Calibration/data/A/dist.npy")
def save_plane_coordinate(self):
cam = CameraFactory().create()
cam.setFramerateShutter(125, 125)
decoder = cam.decoder()
loop = 0
while True:
xferData = cam.grab()
# Decode the data can be used as image
frame = decoder.decode(xferData, self.x, self.y, self.w, self.h)
# フレームを表示する
cv2.imshow("captured image", frame)
c_frame = np.copy(frame)
ret, corners = cv.findChessboardCorners(
frame, self.pattern_size, None
)
if ret == True:
# 座標の精度を上げる
criteria = (
cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_COUNT,
100,
0.001,
)
corners = cv.cornerSubPix(
frame, corners, (5, 5), (-1, -1), criteria
)
# solvePnP を用いて外部パラメータ (物体の位置姿勢に相当) だけを推定します。
err, rvecs, tvecs = cv.solvePnP(
self.pattern_points, corners, self.mtx, self.dist
)
# 推定結果を可視化するために、物体 cube を画像に投影してみます。
imgpts, jac = cv.projectPoints(
self.cube, rvecs, tvecs, self.mtx, self.dist
)
frame = cv2.cvtColor(frame, cv2.COLOR_GRAY2BGR)
c_frame = self.draw(frame, imgpts)
if loop % 100 == 0:
# カメラ作業台の同次変換行列
T_ext = np.zeros((4, 4), np.float64)
R_ext, jac = cv2.Rodrigues(rvecs)
T_ext[0:3, 0:3] = R_ext
T_ext[0:3, 3:4] = tvecs[0:3]
T_ext[3][3] = 1.0
print("T\n", T_ext)
cv2.imshow("plane coordinate", c_frame)
k = cv.waitKey(1)
loop += 1
if k == 27:
break
cv2.destroyAllWindows()
np.save("data/plane/rvecs_plane", rvecs)
np.save("data/plane/tvecs_plane", tvecs)
def draw(self, img, imgpts):
imgpts = np.int32(imgpts).reshape(-1, 2)
# draw ground floor in green
img = cv.drawContours(img, [imgpts[:4]], -1, (0, 255, 0), -3)
# draw pillars in blue color
for i, j in zip(range(4), range(4, 8)):
img = cv.line(img, tuple(imgpts[i]), tuple(imgpts[j]), (255), 3)
# draw top layer in red color
img = cv.drawContours(img, [imgpts[4:]], -1, (0, 0, 255), 3)
return img
def main():
p_est = plane_estimation()
p_est.save_plane_coordinate()
if __name__ == "__main__":
main()