OSVR Framework (Internal Development Docs)  0.6-1962-g59773924
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Modules Pages
Oculus_DK2.cpp
Go to the documentation of this file.
1 
11 // Copyright 2015 Sensics, Inc.
12 //
13 // Licensed under the Apache License, Version 2.0 (the "License");
14 // you may not use this file except in compliance with the License.
15 // You may obtain a copy of the License at
16 //
17 // http://www.apache.org/licenses/LICENSE-2.0
18 //
19 // Unless required by applicable law or agreed to in writing, software
20 // distributed under the License is distributed on an "AS IS" BASIS,
21 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
22 // See the License for the specific language governing permissions and
23 // limitations under the License.
24 
25 #include "Oculus_DK2.h"
26 //#include <opencv2/core/operations.hpp>
27 #include <opencv2/imgproc/imgproc.hpp> // for image scaling
28 
29 using namespace osvr;
30 using namespace oculus_dk2;
31 
32 static const vrpn_uint16 OCULUS_VENDOR = 0x2833;
33 static const vrpn_uint16 DK2_PRODUCT = 0x0021;
34 
35 Oculus_DK2_HID::Oculus_DK2_HID(double keepAliveSeconds)
36  : vrpn_HidInterface(
37  m_filter = new vrpn_HidProductAcceptor(OCULUS_VENDOR, DK2_PRODUCT)) {
38  // Store keep-alive interval.
39  m_keepAliveSeconds = keepAliveSeconds;
40 
41  // Send a command to turn on the LEDs and record the time at which
42  // we did so.
43  writeLEDControl();
44  vrpn_gettimeofday(&m_lastKeepAlive, NULL);
45 }
46 
47 Oculus_DK2_HID::~Oculus_DK2_HID() {
48  // Turn off the LEDs
49  writeLEDControl(false);
50 
51  // Clean up our memory.
52  delete m_filter;
53 }
54 
55 std::vector<OCULUS_IMU_REPORT> Oculus_DK2_HID::poll() {
56  // See if it has been long enough to send another keep-alive to
57  // the LEDs.
58  struct timeval now;
59  vrpn_gettimeofday(&now, NULL);
60  if (vrpn_TimevalDurationSeconds(now, m_lastKeepAlive) >=
61  m_keepAliveSeconds) {
62  writeKeepAlive();
63  m_lastKeepAlive = now;
64  }
65 
66  // Clear old reports, which will have already been returned.
67  // Read and parse any available IMU reports from the DK2, which will put
68  // them into the report vector.
69  m_reports.clear();
70  update();
71 
72  return m_reports;
73 }
74 
75 // Thank you to Oliver Kreylos for the info needed to write this function.
76 // It is based on his OculusRiftHIDReports.cpp, used with permission.
77 void Oculus_DK2_HID::writeLEDControl(
78  bool enable, vrpn_uint16 exposureLength, vrpn_uint16 frameInterval,
79  vrpn_uint16 vSyncOffset, vrpn_uint8 dutyCycle, vrpn_uint8 pattern,
80  bool autoIncrement, bool useCarrier, bool syncInput, bool vSyncLock,
81  bool customPattern, vrpn_uint16 commandId) {
82  // Buffer to store our report in.
83  vrpn_uint8 pktBuffer[13];
84 
85  /* Pack the packet buffer, using little-endian packing: */
86  vrpn_uint8 *bufptr = pktBuffer;
87  vrpn_int32 buflen = sizeof(pktBuffer);
88  vrpn_buffer_to_little_endian(&bufptr, &buflen, vrpn_uint8(0x0cU));
89  vrpn_buffer_to_little_endian(&bufptr, &buflen, commandId);
90  vrpn_buffer_to_little_endian(&bufptr, &buflen, pattern);
91  vrpn_uint8 flags = 0x00U;
92  if (enable) {
93  flags |= 0x01U;
94  }
95  if (autoIncrement) {
96  flags |= 0x02U;
97  }
98  if (useCarrier) {
99  flags |= 0x04U;
100  }
101  if (syncInput) {
102  flags |= 0x08U;
103  }
104  if (vSyncLock) {
105  flags |= 0x10U;
106  }
107  if (customPattern) {
108  flags |= 0x20U;
109  }
110  vrpn_buffer_to_little_endian(&bufptr, &buflen, flags);
111  vrpn_buffer_to_little_endian(&bufptr, &buflen,
112  vrpn_uint8(0x0cU)); // Reserved byte
113  vrpn_buffer_to_little_endian(&bufptr, &buflen, exposureLength);
114  vrpn_buffer_to_little_endian(&bufptr, &buflen, frameInterval);
115  vrpn_buffer_to_little_endian(&bufptr, &buflen, vSyncOffset);
116  vrpn_buffer_to_little_endian(&bufptr, &buflen, dutyCycle);
117 
118  /* Write the LED control feature report: */
119  send_feature_report(sizeof(pktBuffer), pktBuffer);
120 }
121 
122 // Thank you to Oliver Kreylos for the info needed to write this function.
123 // It is based on his OculusRiftHIDReports.cpp, used with permission.
124 void Oculus_DK2_HID::writeKeepAlive(bool keepLEDs, vrpn_uint16 interval,
125  vrpn_uint16 commandId) {
126  // Buffer to store our report in.
127  vrpn_uint8 pktBuffer[6];
128 
129  /* Pack the packet buffer, using little-endian packing: */
130  vrpn_uint8 *bufptr = pktBuffer;
131  vrpn_int32 buflen = sizeof(pktBuffer);
132  vrpn_buffer_to_little_endian(&bufptr, &buflen, vrpn_uint8(0x11U));
133  vrpn_buffer_to_little_endian(&bufptr, &buflen, commandId);
134  vrpn_uint8 flags = keepLEDs ? 0x0bU : 0x01U;
135  vrpn_buffer_to_little_endian(&bufptr, &buflen, flags);
136  vrpn_buffer_to_little_endian(&bufptr, &buflen, interval);
137 
138  /* Write the LED control feature report: */
139  send_feature_report(sizeof(pktBuffer), pktBuffer);
140 }
141 
142 void Oculus_DK2_HID::on_data_received(size_t bytes, vrpn_uint8 *buffer) {
143  // Fill new entries into the vector that will be passed back
144  // on the next poll().
145  // TODO: Read the values from the IMU and store them into the
146  // vector that we'll return on the next call to poll().
147  // XXX
148 }
149 
150 cv::Mat osvr::oculus_dk2::unscramble_image(const cv::Mat &image) {
151  // From the documentation: "Note OpenCV 1.x
152  // functions cvRetrieveFrame and cv.RetrieveFrame return image
153  // stored inside the video capturing structure. It is not
154  // allowed to modify or release the image! You can copy
155  // the frame using cvCloneImage() and then do whatever
156  // you want with the copy." (This comes from the web page:
157  // http://docs.opencv.org/modules/highgui/doc/reading_and_writing_images_and_video.html)
158 
159  // From http://doc-ok.org/?p=1095
160  // "It advertises itself as having a resolution of 376×480
161  // pixels, and a YUYV pixel format (downsampled and interleaved
162  // luminance/chroma channels, typical for webcams). In reality,
163  // the camera has a resolution of 752×480 pixels, and uses a
164  // simple Y8 greyscale pixel format."
165  // From http://www.fourcc.org/yuv.php
166  // "...most popular of the various YUV 4:2:2 formats.
167  // Horizontal sample period for Y = 1, V = 2, U = 2.
168  // Macropixel = 2 image pixels. U0Y0V0Y1"
169  // This seems inconsistent. From
170  // http://www.digitalpreservation.gov/formats/fdd/fdd000365.shtml
171  // "Byte 0=8-bit Cb; Byte 1=8-bit Y'0", which seems to
172  // say that the first byte is used to determine color
173  // and the second to determine luminance, so we should
174  // convert color back into another luminance. Every
175  // other byte is a color byte (half of them Cb and half
176  // of them Cr).
177  // NOTE: We'd like not to have to try and invert the bogus
178  // transformation to get back to two luminance channels, but
179  // rather just tell the camera to change its decoder. It turns
180  // out that the set(CV_CAP_PROP_FOURCC) is not actually implemented
181  // for OpenCV (see cap_unicap.cpp) so we can't do that directly.
182  // NOTE: OpenCV uses FFMPEG, which is able to read from cameras,
183  // so may use it to capture data from cameras. If so, that is the
184  // driver to be adjusted.
185  // NOTE: https://trac.ffmpeg.org/wiki/DirectShow talks about how
186  // to get FFMPEG to tell you what formats each camera can produce
187  // on Windows. It lists only YUV for this camera.
188  // NOTE: http://www.equasys.de/colorconversion.html provides
189  // color conversion matrices to transform between RGB and other
190  // color formats (including YCbCr); there are several different
191  // matrices depending on intent.
192  // NOTE: The FFMPEG file libavfilter/vf_colormatrix.c contains the
193  // same coefficients as one of the conversions listed there, which
194  // has the following conversion description:
195  // |Y | | 0 | | 0.299 0.587 0.114| |R|
196  // |Cb| = |128| + |-0.169 -0.133 0.500|.|G|
197  // |Cr| |128| | 0.500 -0.419 -0.081| |B|
198  //
199  // |R| |1.000 0.000 1.400| | Y |
200  // |G|=|1.000 -0.343 -0.711|.|Cb - 128|
201  // |B| |1.000 1.765 0.000| |Cr - 128|
202  // but the code uses coefficients with more resolution (and
203  // stored in a different order):
204  // { { +0.5870, +0.1140, +0.2990 }, // Rec.601 (ITU-R BT.470-2/SMPTE 170M)
205  // (2)
206  // { -0.3313, +0.5000, -0.1687 },
207  // { -0.4187, -0.0813, +0.5000 } }
208  // (but this is only one of four choices, choice 2 of 0-3).
209  // These are used in filter_frame(), which switches based on
210  // the color space of the source and destination. The FFMPEG
211  // output on the DK2 does not specify the color space, just the
212  // encoding format.
213 
214  // Okay, so here we convert from BGR back into YUV.
215  cv::Mat yuvImage;
216  cv::cvtColor(image, yuvImage, cv::COLOR_BGR2YCrCb);
217 
218  // Then we repack the individual components; every Y is used
219  // (second and fourth entry), but Cb is the first entry of
220  // four and Cr the third: Cb0 Y0 Cr0 Y1 Cb2 Y2 Cr2 Y3.
221  // So the image itself has interplated Cb and Cr values...
222  // For now, we do a brain-dead conversion, where we double the width
223  // of the YUV image, make it grayscale, and copy the Y channels from
224  // the input image into neighboring pixels in the output image; doubling
225  // every one.
226  // TODO: Invert the transformation used to get from YUV to BGR and
227  // determine the actual components, which are in fact a set of greyscale
228  // values.
229  cv::Mat outImage(yuvImage.rows, yuvImage.cols * 2, CV_8UC1, cv::Scalar(0));
230  for (int r = 0; r < yuvImage.rows; r++) {
231  for (int c = 0; c < yuvImage.cols; c++) {
232  outImage.at<unsigned char>(r, c * 2) =
233  yuvImage.at<cv::Vec3b>(r, c)[0];
234  outImage.at<unsigned char>(r, c * 2 + 1) =
235  yuvImage.at<cv::Vec3b>(r, c)[0];
236  }
237  }
238 
239  return outImage;
240 }
Header file describing interface for an Oculus DK2 device.
std::vector< OCULUS_IMU_REPORT > poll()
Definition: Oculus_DK2.cpp:55
double Scalar
Common scalar type.