• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2012 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 //#define LOG_NDEBUG 0
18 //#define LOG_NNDEBUG 0
19 #define LOG_TAG "EmulatedCamera2_Sensor"
20 
21 #ifdef LOG_NNDEBUG
22 #define ALOGVV(...) ALOGV(__VA_ARGS__)
23 #else
24 #define ALOGVV(...) ((void)0)
25 #endif
26 
27 #include <utils/Log.h>
28 
29 #include "../EmulatedFakeCamera2.h"
30 #include "Sensor.h"
31 #include <cmath>
32 #include <cstdlib>
33 #include "system/camera_metadata.h"
34 
35 namespace android {
36 
37 const unsigned int Sensor::kResolution[2]  = {640, 480};
38 
39 const nsecs_t Sensor::kExposureTimeRange[2] =
40     {1000L, 30000000000L} ; // 1 us - 30 sec
41 const nsecs_t Sensor::kFrameDurationRange[2] =
42     {33331760L, 30000000000L}; // ~1/30 s - 30 sec
43 const nsecs_t Sensor::kMinVerticalBlank = 10000L;
44 
45 const uint8_t Sensor::kColorFilterArrangement = ANDROID_SENSOR_RGGB;
46 
47 // Output image data characteristics
48 const uint32_t Sensor::kMaxRawValue = 4000;
49 const uint32_t Sensor::kBlackLevel  = 1000;
50 
51 // Sensor sensitivity
52 const float Sensor::kSaturationVoltage      = 0.520f;
53 const uint32_t Sensor::kSaturationElectrons = 2000;
54 const float Sensor::kVoltsPerLuxSecond      = 0.100f;
55 
56 const float Sensor::kElectronsPerLuxSecond =
57         Sensor::kSaturationElectrons / Sensor::kSaturationVoltage
58         * Sensor::kVoltsPerLuxSecond;
59 
60 const float Sensor::kBaseGainFactor = (float)Sensor::kMaxRawValue /
61             Sensor::kSaturationElectrons;
62 
63 const float Sensor::kReadNoiseStddevBeforeGain = 1.177; // in electrons
64 const float Sensor::kReadNoiseStddevAfterGain =  2.100; // in digital counts
65 const float Sensor::kReadNoiseVarBeforeGain =
66             Sensor::kReadNoiseStddevBeforeGain *
67             Sensor::kReadNoiseStddevBeforeGain;
68 const float Sensor::kReadNoiseVarAfterGain =
69             Sensor::kReadNoiseStddevAfterGain *
70             Sensor::kReadNoiseStddevAfterGain;
71 
72 // While each row has to read out, reset, and then expose, the (reset +
73 // expose) sequence can be overlapped by other row readouts, so the final
74 // minimum frame duration is purely a function of row readout time, at least
75 // if there's a reasonable number of rows.
76 const nsecs_t Sensor::kRowReadoutTime =
77             Sensor::kFrameDurationRange[0] / Sensor::kResolution[1];
78 
79 const uint32_t Sensor::kAvailableSensitivities[5] =
80     {100, 200, 400, 800, 1600};
81 const uint32_t Sensor::kDefaultSensitivity = 100;
82 
83 /** A few utility functions for math, normal distributions */
84 
85 // Take advantage of IEEE floating-point format to calculate an approximate
86 // square root. Accurate to within +-3.6%
sqrtf_approx(float r)87 float sqrtf_approx(float r) {
88     // Modifier is based on IEEE floating-point representation; the
89     // manipulations boil down to finding approximate log2, dividing by two, and
90     // then inverting the log2. A bias is added to make the relative error
91     // symmetric about the real answer.
92     const int32_t modifier = 0x1FBB4000;
93 
94     int32_t r_i = *(int32_t*)(&r);
95     r_i = (r_i >> 1) + modifier;
96 
97     return *(float*)(&r_i);
98 }
99 
100 
101 
Sensor(EmulatedFakeCamera2 * parent)102 Sensor::Sensor(EmulatedFakeCamera2 *parent):
103         Thread(false),
104         mParent(parent),
105         mGotVSync(false),
106         mExposureTime(kFrameDurationRange[0]-kMinVerticalBlank),
107         mFrameDuration(kFrameDurationRange[0]),
108         mGainFactor(kDefaultSensitivity),
109         mNextBuffers(NULL),
110         mCapturedBuffers(NULL),
111         mScene(kResolution[0], kResolution[1], kElectronsPerLuxSecond)
112 {
113 
114 }
115 
~Sensor()116 Sensor::~Sensor() {
117     shutDown();
118 }
119 
startUp()120 status_t Sensor::startUp() {
121     ALOGV("%s: E", __FUNCTION__);
122 
123     int res;
124     mCapturedBuffers = NULL;
125     res = run("EmulatedFakeCamera2::Sensor",
126             ANDROID_PRIORITY_URGENT_DISPLAY);
127 
128     if (res != OK) {
129         ALOGE("Unable to start up sensor capture thread: %d", res);
130     }
131     return res;
132 }
133 
shutDown()134 status_t Sensor::shutDown() {
135     ALOGV("%s: E", __FUNCTION__);
136 
137     int res;
138     res = requestExitAndWait();
139     if (res != OK) {
140         ALOGE("Unable to shut down sensor capture thread: %d", res);
141     }
142     return res;
143 }
144 
getScene()145 Scene &Sensor::getScene() {
146     return mScene;
147 }
148 
setExposureTime(uint64_t ns)149 void Sensor::setExposureTime(uint64_t ns) {
150     Mutex::Autolock lock(mControlMutex);
151     ALOGVV("Exposure set to %f", ns/1000000.f);
152     mExposureTime = ns;
153 }
154 
setFrameDuration(uint64_t ns)155 void Sensor::setFrameDuration(uint64_t ns) {
156     Mutex::Autolock lock(mControlMutex);
157     ALOGVV("Frame duration set to %f", ns/1000000.f);
158     mFrameDuration = ns;
159 }
160 
setSensitivity(uint32_t gain)161 void Sensor::setSensitivity(uint32_t gain) {
162     Mutex::Autolock lock(mControlMutex);
163     ALOGVV("Gain set to %d", gain);
164     mGainFactor = gain;
165 }
166 
setDestinationBuffers(Buffers * buffers)167 void Sensor::setDestinationBuffers(Buffers *buffers) {
168     Mutex::Autolock lock(mControlMutex);
169     mNextBuffers = buffers;
170 }
171 
waitForVSync(nsecs_t reltime)172 bool Sensor::waitForVSync(nsecs_t reltime) {
173     int res;
174     Mutex::Autolock lock(mControlMutex);
175 
176     mGotVSync = false;
177     res = mVSync.waitRelative(mControlMutex, reltime);
178     if (res != OK && res != TIMED_OUT) {
179         ALOGE("%s: Error waiting for VSync signal: %d", __FUNCTION__, res);
180         return false;
181     }
182     return mGotVSync;
183 }
184 
waitForNewFrame(nsecs_t reltime,nsecs_t * captureTime)185 bool Sensor::waitForNewFrame(nsecs_t reltime,
186         nsecs_t *captureTime) {
187     Mutex::Autolock lock(mReadoutMutex);
188     uint8_t *ret;
189     if (mCapturedBuffers == NULL) {
190         int res;
191         res = mReadoutAvailable.waitRelative(mReadoutMutex, reltime);
192         if (res == TIMED_OUT) {
193             return false;
194         } else if (res != OK || mCapturedBuffers == NULL) {
195             ALOGE("Error waiting for sensor readout signal: %d", res);
196             return false;
197         }
198     } else {
199         mReadoutComplete.signal();
200     }
201 
202     *captureTime = mCaptureTime;
203     mCapturedBuffers = NULL;
204     return true;
205 }
206 
readyToRun()207 status_t Sensor::readyToRun() {
208     ALOGV("Starting up sensor thread");
209     mStartupTime = systemTime();
210     mNextCaptureTime = 0;
211     mNextCapturedBuffers = NULL;
212     return OK;
213 }
214 
threadLoop()215 bool Sensor::threadLoop() {
216     /**
217      * Sensor capture operation main loop.
218      *
219      * Stages are out-of-order relative to a single frame's processing, but
220      * in-order in time.
221      */
222 
223     /**
224      * Stage 1: Read in latest control parameters
225      */
226     uint64_t exposureDuration;
227     uint64_t frameDuration;
228     uint32_t gain;
229     Buffers *nextBuffers;
230     {
231         Mutex::Autolock lock(mControlMutex);
232         exposureDuration = mExposureTime;
233         frameDuration    = mFrameDuration;
234         gain             = mGainFactor;
235         nextBuffers      = mNextBuffers;
236         // Don't reuse a buffer set
237         mNextBuffers = NULL;
238 
239         // Signal VSync for start of readout
240         ALOGVV("Sensor VSync");
241         mGotVSync = true;
242         mVSync.signal();
243     }
244 
245     /**
246      * Stage 3: Read out latest captured image
247      */
248 
249     Buffers *capturedBuffers = NULL;
250     nsecs_t captureTime = 0;
251 
252     nsecs_t startRealTime  = systemTime();
253     // Stagefright cares about system time for timestamps, so base simulated
254     // time on that.
255     nsecs_t simulatedTime    = startRealTime;
256     nsecs_t frameEndRealTime = startRealTime + frameDuration;
257     nsecs_t frameReadoutEndRealTime = startRealTime +
258             kRowReadoutTime * kResolution[1];
259 
260     if (mNextCapturedBuffers != NULL) {
261         ALOGVV("Sensor starting readout");
262         // Pretend we're doing readout now; will signal once enough time has elapsed
263         capturedBuffers = mNextCapturedBuffers;
264         captureTime    = mNextCaptureTime;
265     }
266     simulatedTime += kRowReadoutTime + kMinVerticalBlank;
267 
268     // TODO: Move this signal to another thread to simulate readout
269     // time properly
270     if (capturedBuffers != NULL) {
271         ALOGVV("Sensor readout complete");
272         Mutex::Autolock lock(mReadoutMutex);
273         if (mCapturedBuffers != NULL) {
274             ALOGV("Waiting for readout thread to catch up!");
275             mReadoutComplete.wait(mReadoutMutex);
276         }
277 
278         mCapturedBuffers = capturedBuffers;
279         mCaptureTime = captureTime;
280         mReadoutAvailable.signal();
281         capturedBuffers = NULL;
282     }
283 
284     /**
285      * Stage 2: Capture new image
286      */
287 
288     mNextCaptureTime = simulatedTime;
289     mNextCapturedBuffers = nextBuffers;
290 
291     if (mNextCapturedBuffers != NULL) {
292         ALOGVV("Starting next capture: Exposure: %f ms, gain: %d",
293                 (float)exposureDuration/1e6, gain);
294         mScene.setExposureDuration((float)exposureDuration/1e9);
295         mScene.calculateScene(mNextCaptureTime);
296 
297         // Might be adding more buffers, so size isn't constant
298         for (size_t i = 0; i < mNextCapturedBuffers->size(); i++) {
299             const StreamBuffer &b = (*mNextCapturedBuffers)[i];
300             ALOGVV("Sensor capturing buffer %d: stream %d,"
301                     " %d x %d, format %x, stride %d, buf %p, img %p",
302                     i, b.streamId, b.width, b.height, b.format, b.stride,
303                     b.buffer, b.img);
304             switch(b.format) {
305                 case HAL_PIXEL_FORMAT_RAW_SENSOR:
306                     captureRaw(b.img, gain, b.stride);
307                     break;
308                 case HAL_PIXEL_FORMAT_RGB_888:
309                     captureRGB(b.img, gain, b.stride);
310                     break;
311                 case HAL_PIXEL_FORMAT_RGBA_8888:
312                     captureRGBA(b.img, gain, b.stride);
313                     break;
314                 case HAL_PIXEL_FORMAT_BLOB:
315                     // Add auxillary buffer of the right size
316                     // Assumes only one BLOB (JPEG) buffer in
317                     // mNextCapturedBuffers
318                     StreamBuffer bAux;
319                     bAux.streamId = 0;
320                     bAux.width = b.width;
321                     bAux.height = b.height;
322                     bAux.format = HAL_PIXEL_FORMAT_RGB_888;
323                     bAux.stride = b.width;
324                     bAux.buffer = NULL;
325                     // TODO: Reuse these
326                     bAux.img = new uint8_t[b.width * b.height * 3];
327                     mNextCapturedBuffers->push_back(bAux);
328                     break;
329                 case HAL_PIXEL_FORMAT_YCrCb_420_SP:
330                     captureNV21(b.img, gain, b.stride);
331                     break;
332                 case HAL_PIXEL_FORMAT_YV12:
333                     // TODO:
334                     ALOGE("%s: Format %x is TODO", __FUNCTION__, b.format);
335                     break;
336                 default:
337                     ALOGE("%s: Unknown format %x, no output", __FUNCTION__,
338                             b.format);
339                     break;
340             }
341         }
342     }
343 
344     ALOGVV("Sensor vertical blanking interval");
345     nsecs_t workDoneRealTime = systemTime();
346     const nsecs_t timeAccuracy = 2e6; // 2 ms of imprecision is ok
347     if (workDoneRealTime < frameEndRealTime - timeAccuracy) {
348         timespec t;
349         t.tv_sec = (frameEndRealTime - workDoneRealTime)  / 1000000000L;
350         t.tv_nsec = (frameEndRealTime - workDoneRealTime) % 1000000000L;
351 
352         int ret;
353         do {
354             ret = nanosleep(&t, &t);
355         } while (ret != 0);
356     }
357     nsecs_t endRealTime = systemTime();
358     ALOGVV("Frame cycle took %d ms, target %d ms",
359             (int)((endRealTime - startRealTime)/1000000),
360             (int)(frameDuration / 1000000));
361     return true;
362 };
363 
captureRaw(uint8_t * img,uint32_t gain,uint32_t stride)364 void Sensor::captureRaw(uint8_t *img, uint32_t gain, uint32_t stride) {
365     float totalGain = gain/100.0 * kBaseGainFactor;
366     float noiseVarGain =  totalGain * totalGain;
367     float readNoiseVar = kReadNoiseVarBeforeGain * noiseVarGain
368             + kReadNoiseVarAfterGain;
369 
370     int bayerSelect[4] = {Scene::R, Scene::Gr, Scene::Gb, Scene::B}; // RGGB
371     mScene.setReadoutPixel(0,0);
372     for (unsigned int y = 0; y < kResolution[1]; y++ ) {
373         int *bayerRow = bayerSelect + (y & 0x1) * 2;
374         uint16_t *px = (uint16_t*)img + y * stride;
375         for (unsigned int x = 0; x < kResolution[0]; x++) {
376             uint32_t electronCount;
377             electronCount = mScene.getPixelElectrons()[bayerRow[x & 0x1]];
378 
379             // TODO: Better pixel saturation curve?
380             electronCount = (electronCount < kSaturationElectrons) ?
381                     electronCount : kSaturationElectrons;
382 
383             // TODO: Better A/D saturation curve?
384             uint16_t rawCount = electronCount * totalGain;
385             rawCount = (rawCount < kMaxRawValue) ? rawCount : kMaxRawValue;
386 
387             // Calculate noise value
388             // TODO: Use more-correct Gaussian instead of uniform noise
389             float photonNoiseVar = electronCount * noiseVarGain;
390             float noiseStddev = sqrtf_approx(readNoiseVar + photonNoiseVar);
391             // Scaled to roughly match gaussian/uniform noise stddev
392             float noiseSample = std::rand() * (2.5 / (1.0 + RAND_MAX)) - 1.25;
393 
394             rawCount += kBlackLevel;
395             rawCount += noiseStddev * noiseSample;
396 
397             *px++ = rawCount;
398         }
399         // TODO: Handle this better
400         //simulatedTime += kRowReadoutTime;
401     }
402     ALOGVV("Raw sensor image captured");
403 }
404 
captureRGBA(uint8_t * img,uint32_t gain,uint32_t stride)405 void Sensor::captureRGBA(uint8_t *img, uint32_t gain, uint32_t stride) {
406     float totalGain = gain/100.0 * kBaseGainFactor;
407     // In fixed-point math, calculate total scaling from electrons to 8bpp
408     int scale64x = 64 * totalGain * 255 / kMaxRawValue;
409     uint32_t inc = kResolution[0] / stride;
410 
411     for (unsigned int y = 0, outY = 0; y < kResolution[1]; y+=inc, outY++ ) {
412         uint8_t *px = img + outY * stride * 4;
413         mScene.setReadoutPixel(0, y);
414         for (unsigned int x = 0; x < kResolution[0]; x+=inc) {
415             uint32_t rCount, gCount, bCount;
416             // TODO: Perfect demosaicing is a cheat
417             const uint32_t *pixel = mScene.getPixelElectrons();
418             rCount = pixel[Scene::R]  * scale64x;
419             gCount = pixel[Scene::Gr] * scale64x;
420             bCount = pixel[Scene::B]  * scale64x;
421 
422             *px++ = rCount < 255*64 ? rCount / 64 : 255;
423             *px++ = gCount < 255*64 ? gCount / 64 : 255;
424             *px++ = bCount < 255*64 ? bCount / 64 : 255;
425             *px++ = 255;
426             for (unsigned int j = 1; j < inc; j++)
427                 mScene.getPixelElectrons();
428         }
429         // TODO: Handle this better
430         //simulatedTime += kRowReadoutTime;
431     }
432     ALOGVV("RGBA sensor image captured");
433 }
434 
captureRGB(uint8_t * img,uint32_t gain,uint32_t stride)435 void Sensor::captureRGB(uint8_t *img, uint32_t gain, uint32_t stride) {
436     float totalGain = gain/100.0 * kBaseGainFactor;
437     // In fixed-point math, calculate total scaling from electrons to 8bpp
438     int scale64x = 64 * totalGain * 255 / kMaxRawValue;
439     uint32_t inc = kResolution[0] / stride;
440 
441     for (unsigned int y = 0, outY = 0; y < kResolution[1]; y += inc, outY++ ) {
442         mScene.setReadoutPixel(0, y);
443         uint8_t *px = img + outY * stride * 3;
444         for (unsigned int x = 0; x < kResolution[0]; x += inc) {
445             uint32_t rCount, gCount, bCount;
446             // TODO: Perfect demosaicing is a cheat
447             const uint32_t *pixel = mScene.getPixelElectrons();
448             rCount = pixel[Scene::R]  * scale64x;
449             gCount = pixel[Scene::Gr] * scale64x;
450             bCount = pixel[Scene::B]  * scale64x;
451 
452             *px++ = rCount < 255*64 ? rCount / 64 : 255;
453             *px++ = gCount < 255*64 ? gCount / 64 : 255;
454             *px++ = bCount < 255*64 ? bCount / 64 : 255;
455             for (unsigned int j = 1; j < inc; j++)
456                 mScene.getPixelElectrons();
457         }
458         // TODO: Handle this better
459         //simulatedTime += kRowReadoutTime;
460     }
461     ALOGVV("RGB sensor image captured");
462 }
463 
captureNV21(uint8_t * img,uint32_t gain,uint32_t stride)464 void Sensor::captureNV21(uint8_t *img, uint32_t gain, uint32_t stride) {
465     float totalGain = gain/100.0 * kBaseGainFactor;
466     // In fixed-point math, calculate total scaling from electrons to 8bpp
467     int scale64x = 64 * totalGain * 255 / kMaxRawValue;
468 
469     // TODO: Make full-color
470     uint32_t inc = kResolution[0] / stride;
471     uint32_t outH = kResolution[1] / inc;
472     for (unsigned int y = 0, outY = 0, outUV = outH;
473          y < kResolution[1]; y+=inc, outY++, outUV ) {
474         uint8_t *pxY = img + outY * stride;
475         mScene.setReadoutPixel(0,y);
476         for (unsigned int x = 0; x < kResolution[0]; x+=inc) {
477             uint32_t rCount, gCount, bCount;
478             // TODO: Perfect demosaicing is a cheat
479             const uint32_t *pixel = mScene.getPixelElectrons();
480             rCount = pixel[Scene::R]  * scale64x;
481             gCount = pixel[Scene::Gr] * scale64x;
482             bCount = pixel[Scene::B]  * scale64x;
483             uint32_t avg = (rCount + gCount + bCount) / 3;
484             *pxY++ = avg < 255*64 ? avg / 64 : 255;
485             for (unsigned int j = 1; j < inc; j++)
486                 mScene.getPixelElectrons();
487         }
488     }
489     for (unsigned int y = 0, outY = outH; y < kResolution[1]/2; y+=inc, outY++) {
490         uint8_t *px = img + outY * stride;
491         for (unsigned int x = 0; x < kResolution[0]; x+=inc) {
492             // UV to neutral
493             *px++ = 128;
494             *px++ = 128;
495         }
496     }
497     ALOGVV("NV21 sensor image captured");
498 }
499 
500 } // namespace android
501