-
Notifications
You must be signed in to change notification settings - Fork 14
/
Copy pathcalibration_2_cameras.cc
142 lines (112 loc) · 4.5 KB
/
calibration_2_cameras.cc
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
#include <unistd.h>
#include <iostream>
#include <visionloc.hh>
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
/*
* This routine helps with the calibration of two cameras that overlap each other. For making it work, you need to place a marker(s) in the overlapped area,
* then the output of the program will generate the World Coordinates parameters for the camerasInfo.xml .
*/
struct cam_properties {
int id;
int width;
int height;
std::string window;
cv::Mat frame;
};
int main(void)
{
std::vector<Marker> markers[2];
start_visionloc();
std::vector<int> cam_ids = get_active_cameras();
if (cam_ids.size() != 2) {
std::cerr << "2 cameras needed, " << cam_ids.size() << " active" << std::endl;
stop_visionloc();
return -1;
}
struct cam_properties cams[2];
for (int n = 0; n < 2; n++) {
cams[n].id = cam_ids[n];
cams[n].width = get_cam_width(cams[n].id);
cams[n].height = get_cam_height(cams[n].id);
std::ostringstream oss;
oss << "Cam" << cams[n].id;
cams[n].window = oss.str();
cv::namedWindow(cams[n].window, CV_WINDOW_AUTOSIZE);
std::cout << " Cam " << cams[n].id << ": " <<
cams[n].width << "x" <<
cams[n].height << std::endl;
set_expected_num_of_markers(cams[n].id, 2);
}
int i;
for(i = 0; i < 10; i++) {
for (int n = 0; n < 2; n++) {
cv::Mat *grayframe = get_frame_from_camera(cams[n].id);
cvtColor(*grayframe, cams[n].frame, CV_GRAY2RGB);
delete grayframe;
markers[n] = get_markers_from_camera(cams[n].id);
for (std::vector<Marker>::iterator itm = markers[n].begin();
itm != markers[n].end(); ++itm){
std::ostringstream oss;
oss << (int)(itm->id);
cv::putText(cams[n].frame,
oss.str(),
cv::Point(itm->cam_center_posX, cams[n].height - itm->cam_center_posY),
cv::FONT_HERSHEY_PLAIN,
5,
cv::Scalar(0,255,0),
5);
}
cv::imshow(cams[n].window, cams[n].frame);
}
if (markers[0].size() == 2 && markers[1].size() == 2) {
// they must be equal
break;
}
if (cv::waitKey(30) == 27){
stop_visionloc();
return 0;
}
sleep(1);
}
stop_visionloc();
int Pidx, Qidx;
if (markers[1][0].id == markers[0][0].id) {
Pidx=0;
} else if (markers[1][1].id == markers[0][0].id) {
Pidx=1;
} else {
std::cerr << "Marker " << (int)(markers[0][0].id) << " not found on camera B:" << std::endl;
std::cerr << "Camera A markers: " << (int)(markers[0][0].id) << ", " << (int)(markers[0][1].id) << std::endl;
std::cerr << "Camera B markers: " << (int)(markers[1][0].id) << ", " << (int)(markers[1][1].id) << std::endl;
return -1;
}
Qidx = Pidx^1;
if (markers[1][Qidx].id != markers[0][1].id) {
std::cerr << "Marker " << (int)(markers[0][1].id) << " not found on camera B:" << std::endl;
std::cerr << "Camera A markers: " << (int)(markers[0][0].id) << ", " << (int)(markers[0][1].id) << std::endl;
std::cerr << "Camera B markers: " << (int)(markers[1][0].id) << ", " << (int)(markers[1][1].id) << std::endl;
return -1;
}
double PAx = markers[0][0].wc_center_posX;
double PAy = markers[0][0].wc_center_posY;
double QAx = markers[0][1].wc_center_posX;
double QAy = markers[0][1].wc_center_posY;
double PBx = markers[1][Pidx].cam_center_posX;
double PBy = markers[1][Pidx].cam_center_posY;
double QBx = markers[1][Qidx].cam_center_posX;
double QBy = markers[1][Qidx].cam_center_posY;
double aA = atan2(QAy-PAy, QAx-PAx);
double aB = atan2(QBy-PBy, QBx-PBx);
double lAcm = sqrt((PAx-QAx)*(PAx-QAx)+(PAy-QAy)*(PAy-QAy));
double lBpx = sqrt((PBx-QBx)*(PBx-QBx)+(PBy-QBy)*(PBy-QBy));
double angle = aB -aA;
double resB = lBpx / lAcm;
double offset_x = PAx - (cos(angle)*PBx/resB - sin(angle)*PBy/resB);
double offset_y = PAy - (sin(angle)*PBx/resB + cos(angle)*PBy/resB);
std::cout << "WCHeight: " << cams[1].height/resB << std::endl;
std::cout << "WCOffX: " << offset_x << std::endl;
std::cout << "WCOffy: " << offset_y << std::endl;
std::cout << "WCOffAngle: " << angle << std::endl;
}