• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2017 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include "RenderTopView.h"
18 
19 #include "VideoTex.h"
20 #include "glError.h"
21 #include "shader.h"
22 #include "shader_projectedTex.h"
23 #include "shader_simpleTex.h"
24 
25 #include <android-base/logging.h>
26 #include <math/mat4.h>
27 #include <math/vec3.h>
28 
29 namespace {
30 
31 using aidl::android::hardware::automotive::evs::BufferDesc;
32 using aidl::android::hardware::automotive::evs::IEvsEnumerator;
33 
34 // Simple aliases to make geometric math using vectors more readable
35 const unsigned X = 0;
36 const unsigned Y = 1;
37 const unsigned Z = 2;
38 
39 // Since we assume no roll in these views, we can simplify the required math
unitVectorFromPitchAndYaw(float pitch,float yaw)40 android::vec3 unitVectorFromPitchAndYaw(float pitch, float yaw) {
41     float sinPitch, cosPitch;
42     sincosf(pitch, &sinPitch, &cosPitch);
43     float sinYaw, cosYaw;
44     sincosf(yaw, &sinYaw, &cosYaw);
45     return android::vec3(cosPitch * -sinYaw, cosPitch * cosYaw, sinPitch);
46 }
47 
48 // Helper function to set up a perspective matrix with independent horizontal and vertical
49 // angles of view.
perspective(float hfov,float vfov,float near,float far)50 android::mat4 perspective(float hfov, float vfov, float near, float far) {
51     const float tanHalfFovX = tanf(hfov * 0.5f);
52     const float tanHalfFovY = tanf(vfov * 0.5f);
53 
54     android::mat4 p(0.0f);
55     p[0][0] = 1.0f / tanHalfFovX;
56     p[1][1] = 1.0f / tanHalfFovY;
57     p[2][2] = -(far + near) / (far - near);
58     p[2][3] = -1.0f;
59     p[3][2] = -(2.0f * far * near) / (far - near);
60     return p;
61 }
62 
63 // Helper function to set up a view matrix for a camera given it's yaw & pitch & location
64 // Yes, with a bit of work, we could use lookAt, but it does a lot of extra work
65 // internally that we can short cut.
cameraLookMatrix(const ConfigManager::CameraInfo & cam)66 android::mat4 cameraLookMatrix(const ConfigManager::CameraInfo& cam) {
67     float sinYaw, cosYaw;
68     sincosf(cam.yaw, &sinYaw, &cosYaw);
69 
70     // Construct principal unit vectors
71     android::vec3 vAt = unitVectorFromPitchAndYaw(cam.pitch, cam.yaw);
72     android::vec3 vRt = android::vec3(cosYaw, sinYaw, 0.0f);
73     android::vec3 vUp = -cross(vAt, vRt);
74     android::vec3 eye = android::vec3(cam.position[X], cam.position[Y], cam.position[Z]);
75 
76     android::mat4 Result(1.0f);
77     Result[0][0] = vRt.x;
78     Result[1][0] = vRt.y;
79     Result[2][0] = vRt.z;
80     Result[0][1] = vUp.x;
81     Result[1][1] = vUp.y;
82     Result[2][1] = vUp.z;
83     Result[0][2] = -vAt.x;
84     Result[1][2] = -vAt.y;
85     Result[2][2] = -vAt.z;
86     Result[3][0] = -dot(vRt, eye);
87     Result[3][1] = -dot(vUp, eye);
88     Result[3][2] = dot(vAt, eye);
89     return Result;
90 }
91 
92 }  // namespace
93 
RenderTopView(std::shared_ptr<IEvsEnumerator> enumerator,const std::vector<ConfigManager::CameraInfo> & camList,const ConfigManager & mConfig)94 RenderTopView::RenderTopView(std::shared_ptr<IEvsEnumerator> enumerator,
95                              const std::vector<ConfigManager::CameraInfo>& camList,
96                              const ConfigManager& mConfig) :
97       mEnumerator(enumerator), mConfig(mConfig) {
98     // Copy the list of cameras we're to employ into our local storage.  We'll create and
99     // associate a streaming video texture when we are activated.
100     mActiveCameras.reserve(camList.size());
101     for (unsigned i = 0; i < camList.size(); i++) {
102         mActiveCameras.emplace_back(camList[i]);
103     }
104 }
105 
activate()106 bool RenderTopView::activate() {
107     // Ensure GL is ready to go...
108     if (!prepareGL()) {
109         LOG(ERROR) << "Error initializing GL";
110         return false;
111     }
112 
113     // Load our shader programs
114     mPgmAssets.simpleTexture =
115             buildShaderProgram(vtxShader_simpleTexture, pixShader_simpleTexture, "simpleTexture");
116     if (!mPgmAssets.simpleTexture) {
117         LOG(ERROR) << "Failed to build shader program";
118         return false;
119     }
120     mPgmAssets.projectedTexture =
121             buildShaderProgram(vtxShader_projectedTexture, pixShader_projectedTexture,
122                                "projectedTexture");
123     if (!mPgmAssets.projectedTexture) {
124         LOG(ERROR) << "Failed to build shader program";
125         return false;
126     }
127 
128     // Load the checkerboard text image
129     mTexAssets.checkerBoard.reset(
130             createTextureFromPng("/system/etc/automotive/evs/LabeledChecker.png"));
131     if (!mTexAssets.checkerBoard) {
132         LOG(ERROR) << "Failed to load checkerboard texture";
133         return false;
134     }
135 
136     // Load the car image
137     mTexAssets.carTopView.reset(createTextureFromPng("/system/etc/automotive/evs/CarFromTop.png"));
138     if (!mTexAssets.carTopView) {
139         LOG(ERROR) << "Failed to load carTopView texture";
140         return false;
141     }
142 
143     // Set up streaming video textures for our associated cameras
144     for (auto&& cam : mActiveCameras) {
145         cam.tex.reset(
146                 createVideoTexture(mEnumerator, cam.info.cameraId.c_str(), nullptr, sDisplay));
147         if (!cam.tex) {
148             LOG(ERROR) << "Failed to set up video texture for " << cam.info.cameraId << " ("
149                        << cam.info.function << ")";
150             return false;
151         }
152     }
153 
154     return true;
155 }
156 
deactivate()157 void RenderTopView::deactivate() {
158     // Release our video textures
159     // We can't hold onto it because some other Render object might need the same camera
160     for (auto&& cam : mActiveCameras) {
161         cam.tex = nullptr;
162     }
163 }
164 
drawFrame(const BufferDesc & tgtBuffer)165 bool RenderTopView::drawFrame(const BufferDesc& tgtBuffer) {
166     // Tell GL to render to the given buffer
167     if (!attachRenderTarget(tgtBuffer)) {
168         LOG(ERROR) << "Failed to attached render target";
169         return false;
170     }
171 
172     // Set up our top down projection matrix from car space (world units, Xfwd, Yright, Zup)
173     // to view space (-1 to 1)
174     const float top = mConfig.getDisplayTopLocation();
175     const float bottom = mConfig.getDisplayBottomLocation();
176     const float right = mConfig.getDisplayRightLocation(sAspectRatio);
177     const float left = mConfig.getDisplayLeftLocation(sAspectRatio);
178 
179     const float near = 10.0f;  // arbitrary top of view volume
180     const float far = 0.0f;    // ground plane is at zero
181 
182     // We can use a simple, unrotated ortho view since the screen and car space axis are
183     // naturally aligned in the top down view.
184     orthoMatrix = android::mat4::ortho(left, right, top, bottom, near, far);
185 
186     // Refresh our video texture contents.  We do it all at once in hopes of getting
187     // better coherence among images.  This does not guarantee synchronization, of course...
188     for (auto&& cam : mActiveCameras) {
189         if (cam.tex) {
190             cam.tex->refresh();
191         }
192     }
193 
194     // Iterate over all the cameras and project their images onto the ground plane
195     for (auto&& cam : mActiveCameras) {
196         renderCameraOntoGroundPlane(cam);
197     }
198 
199     // Draw the car image
200     renderCarTopView();
201 
202     // Now that everythign is submitted, release our hold on the texture resource
203     detachRenderTarget();
204 
205     // Wait for the rendering to finish
206     glFinish();
207     detachRenderTarget();
208     return true;
209 }
210 
211 //
212 // Responsible for drawing the car's self image in the top down view.
213 // Draws in car model space (units of meters with origin at center of rear axel)
214 // NOTE:  We probably want to eventually switch to using a VertexArray based model system.
215 //
renderCarTopView()216 void RenderTopView::renderCarTopView() {
217     // Compute the corners of our image footprint in car space
218     const float carLengthInTexels = mConfig.carGraphicRearPixel() - mConfig.carGraphicFrontPixel();
219     const float carSpaceUnitsPerTexel = mConfig.getCarLength() / carLengthInTexels;
220     const float textureHeightInCarSpace = mTexAssets.carTopView->height() * carSpaceUnitsPerTexel;
221     const float textureAspectRatio =
222             (float)mTexAssets.carTopView->width() / mTexAssets.carTopView->height();
223     const float pixelsBehindCarInImage =
224             mTexAssets.carTopView->height() - mConfig.carGraphicRearPixel();
225     const float textureExtentBehindCarInCarSpace = pixelsBehindCarInImage * carSpaceUnitsPerTexel;
226 
227     const float btCS = mConfig.getRearLocation() - textureExtentBehindCarInCarSpace;
228     const float tpCS = textureHeightInCarSpace + btCS;
229     const float ltCS = 0.5f * textureHeightInCarSpace * textureAspectRatio;
230     const float rtCS = -ltCS;
231 
232     GLfloat vertsCarPos[] = {
233             ltCS, tpCS, 0.0f,  // left top in car space
234             rtCS, tpCS, 0.0f,  // right top
235             ltCS, btCS, 0.0f,  // left bottom
236             rtCS, btCS, 0.0f   // right bottom
237     };
238     // NOTE:  We didn't flip the image in the texture, so V=0 is actually the top of the image
239     GLfloat vertsCarTex[] = {
240             0.0f, 0.0f,  // left top
241             1.0f, 0.0f,  // right top
242             0.0f, 1.0f,  // left bottom
243             1.0f, 1.0f   // right bottom
244     };
245     glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 0, vertsCarPos);
246     glVertexAttribPointer(1, 2, GL_FLOAT, GL_FALSE, 0, vertsCarTex);
247     glEnableVertexAttribArray(0);
248     glEnableVertexAttribArray(1);
249 
250     glEnable(GL_BLEND);
251     glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA);
252 
253     glUseProgram(mPgmAssets.simpleTexture);
254     GLint loc = glGetUniformLocation(mPgmAssets.simpleTexture, "cameraMat");
255     glUniformMatrix4fv(loc, 1, false, orthoMatrix.asArray());
256     glBindTexture(GL_TEXTURE_2D, mTexAssets.carTopView->glId());
257 
258     glDrawArrays(GL_TRIANGLE_STRIP, 0, 4);
259 
260     glDisable(GL_BLEND);
261 
262     glDisableVertexAttribArray(0);
263     glDisableVertexAttribArray(1);
264 }
265 
266 // NOTE:  Might be worth reviewing the ideas at
267 // http://math.stackexchange.com/questions/1691895/inverse-of-perspective-matrix
268 // to see if that simplifies the math, although we'll still want to compute the actual ground
269 // interception points taking into account the pitchLimit as below.
renderCameraOntoGroundPlane(const ActiveCamera & cam)270 void RenderTopView::renderCameraOntoGroundPlane(const ActiveCamera& cam) {
271     // How far is the farthest any camera should even consider projecting it's image?
272     const float visibleSizeV = mConfig.getDisplayTopLocation() - mConfig.getDisplayBottomLocation();
273     const float visibleSizeH = visibleSizeV * sAspectRatio;
274     const float maxRange = (visibleSizeH > visibleSizeV) ? visibleSizeH : visibleSizeV;
275 
276     // Construct the projection matrix (View + Projection) associated with this sensor
277     const android::mat4 V = cameraLookMatrix(cam.info);
278     const android::mat4 P =
279             perspective(cam.info.hfov, cam.info.vfov, cam.info.position[Z], maxRange);
280     const android::mat4 projectionMatix = P * V;
281 
282     // Just draw the whole darn ground plane for now -- we're wasting fill rate, but so what?
283     // A 2x optimization would be to draw only the 1/2 space of the window in the direction
284     // the sensor is facing.  A more complex solution would be to construct the intersection
285     // of the sensor volume with the ground plane and render only that geometry.
286     const float top = mConfig.getDisplayTopLocation();
287     const float bottom = mConfig.getDisplayBottomLocation();
288     const float wsHeight = top - bottom;
289     const float wsWidth = wsHeight * sAspectRatio;
290     const float right = wsWidth * 0.5f;
291     const float left = -right;
292 
293     const android::vec3 topLeft(left, top, 0.0f);
294     const android::vec3 topRight(right, top, 0.0f);
295     const android::vec3 botLeft(left, bottom, 0.0f);
296     const android::vec3 botRight(right, bottom, 0.0f);
297 
298     GLfloat vertsPos[] = {
299             topLeft[X], topLeft[Y], topLeft[Z], topRight[X], topRight[Y], topRight[Z],
300             botLeft[X], botLeft[Y], botLeft[Z], botRight[X], botRight[Y], botRight[Z],
301     };
302     glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 0, vertsPos);
303     glEnableVertexAttribArray(0);
304 
305     glDisable(GL_BLEND);
306 
307     glUseProgram(mPgmAssets.projectedTexture);
308     GLint locCam = glGetUniformLocation(mPgmAssets.projectedTexture, "cameraMat");
309     glUniformMatrix4fv(locCam, 1, false, orthoMatrix.asArray());
310     GLint locProj = glGetUniformLocation(mPgmAssets.projectedTexture, "projectionMat");
311     glUniformMatrix4fv(locProj, 1, false, projectionMatix.asArray());
312 
313     GLuint texId;
314     if (cam.tex) {
315         texId = cam.tex->glId();
316     } else {
317         texId = mTexAssets.checkerBoard->glId();
318     }
319     glBindTexture(GL_TEXTURE_2D, texId);
320 
321     glDrawArrays(GL_TRIANGLE_STRIP, 0, 4);
322 
323     glDisableVertexAttribArray(0);
324 }
325