1 /*
2 *
3 * SPDX-License-Identifier: GPL-2.0
4 *
5 * Copyright (C) 2011-2018 ARM or its affiliates
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; version 2.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
12 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 * for more details.
14 * You should have received a copy of the GNU General Public License along
15 * with this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
17 *
18 */
19
20 #include <linux/device.h>
21 #include <linux/slab.h>
22 #include <linux/mm.h>
23 #include <linux/fs.h>
24 #include <linux/miscdevice.h>
25 #include <linux/uaccess.h>
26 #include <linux/spinlock_types.h>
27 #include <linux/wait.h>
28 #include <linux/version.h>
29 #include "sbuf.h"
30 #include "acamera.h"
31 #include "sbuf_fsm.h"
32 #include "acamera_firmware_settings.h"
33
34
35 #ifdef LOG_MODULE
36 #undef LOG_MODULE
37 #define LOG_MODULE LOG_MODULE_SBUF
38 #endif
39
40 static const char *sbuf_status_str[] = {
41 "DATA_EMPTY",
42 "DATA_PREPARE",
43 "DATA_DONE",
44 "DATA_USING",
45 "ERROR"};
46
47 static const char *sbuf_type_str[] = {
48 "AE",
49 "AWB",
50 "AF",
51 "GAMMA",
52 "IRIDIX",
53 "ERROR"};
54
55 /**
56 * sbuf_item_arr_info - struct to describle sbuf array current status
57 *
58 * @item_total_count: number of sbuf for each buffer type.
59 * @item_status_count: number of sbuf of each buffer status, sum of this
60 * array should equals to item_total_count after each operation.
61 * @write_idx: array index of sbuf for next write, the status should be DATA_EMPTY.
62 * @read_idx: array index of sbuf for next read, the status should be DATA_DONE.
63 *
64 */
65 struct sbuf_item_arr_info {
66 uint32_t item_total_count;
67 uint32_t item_status_count[SBUF_STATUS_MAX];
68 uint32_t write_idx;
69 uint32_t read_idx;
70 };
71
72 struct sbuf_mgr {
73 spinlock_t sbuf_lock;
74 int sbuf_inited;
75
76 uint32_t len_allocated;
77 uint32_t len_used;
78 void *buf_allocated;
79 void *buf_used;
80
81 uint32_t cur_wdr_mode;
82
83 struct fw_sbuf *sbuf_base;
84
85 #if defined( ISP_HAS_AE_MANUAL_FSM )
86 /* AE: array to describe sbuf item status */
87 struct sbuf_item ae_sbuf_arr[SBUF_STATS_ARRAY_SIZE];
88 /* AE: structure to describe array */
89 struct sbuf_item_arr_info ae_arr_info;
90 #endif
91
92 #if defined( ISP_HAS_AWB_MANUAL_FSM )
93 /* AWB: array to describe sbuf item status */
94 struct sbuf_item awb_sbuf_arr[SBUF_STATS_ARRAY_SIZE];
95 /* AWB: structure to describe array */
96 struct sbuf_item_arr_info awb_arr_info;
97 #endif
98
99 #if defined( ISP_HAS_AF_MANUAL_FSM )
100 /* AF: array to describe sbuf item status */
101 struct sbuf_item af_sbuf_arr[SBUF_STATS_ARRAY_SIZE];
102 /* AF: structure to describe array */
103 struct sbuf_item_arr_info af_arr_info;
104 #endif
105
106 #if defined( ISP_HAS_GAMMA_MANUAL_FSM )
107 /* Gamma: array to describe sbuf item status */
108 struct sbuf_item gamma_sbuf_arr[SBUF_STATS_ARRAY_SIZE];
109 struct sbuf_item_arr_info gamma_arr_info;
110 #endif
111
112 #if defined( ISP_HAS_IRIDIX_MANUAL_FSM ) || defined( ISP_HAS_IRIDIX8_MANUAL_FSM )
113 /* Gamma: array to describe sbuf item status */
114 struct sbuf_item iridix_sbuf_arr[SBUF_STATS_ARRAY_SIZE];
115 struct sbuf_item_arr_info iridix_arr_info;
116 #endif
117 };
118
119 struct sbuf_context {
120 struct sbuf_mgr sbuf_mgr;
121
122 struct mutex fops_lock;
123 struct miscdevice sbuf_dev;
124 int dev_minor_id;
125 char dev_name[SBUF_DEV_NAME_LEN];
126 int dev_opened;
127
128 int fw_id;
129 sbuf_fsm_t *p_fsm;
130
131 struct mutex idx_set_lock;
132 struct sbuf_idx_set idx_set;
133 wait_queue_head_t idx_set_wait_queue;
134 };
135
136 static struct sbuf_context sbuf_contexts[FIRMWARE_CONTEXT_NUMBER];
137
is_sbuf_inited(struct sbuf_mgr * p_sbuf_mgr)138 static int is_sbuf_inited( struct sbuf_mgr *p_sbuf_mgr )
139 {
140 int tmp_inited;
141 unsigned long irq_flags;
142
143 spin_lock_irqsave( &p_sbuf_mgr->sbuf_lock, irq_flags );
144 tmp_inited = p_sbuf_mgr->sbuf_inited;
145 spin_unlock_irqrestore( &p_sbuf_mgr->sbuf_lock, irq_flags );
146
147 return tmp_inited;
148 }
149
sbuf_mgr_alloc_sbuf(struct sbuf_mgr * p_sbuf_mgr)150 static int sbuf_mgr_alloc_sbuf( struct sbuf_mgr *p_sbuf_mgr )
151 {
152 int i;
153
154 if ( is_sbuf_inited( p_sbuf_mgr ) ) {
155 LOG( LOG_ERR, "Error: sbuf alloc should not be called twice." );
156 return -1;
157 }
158
159 /* round up to whole number of pages */
160 p_sbuf_mgr->len_used = ( sizeof( struct fw_sbuf ) + 1 + PAGE_SIZE ) & PAGE_MASK;
161
162 /* allocate one more page for user-sapce mapping */
163 p_sbuf_mgr->len_allocated = p_sbuf_mgr->len_used - 1 + PAGE_SIZE;
164
165 p_sbuf_mgr->buf_allocated = kzalloc( p_sbuf_mgr->len_allocated, GFP_KERNEL );
166 if ( !p_sbuf_mgr->buf_allocated ) {
167 LOG( LOG_CRIT, "alloc memory failed." );
168 return -ENOMEM;
169 }
170
171 /* make the used buffer page aligned */
172 p_sbuf_mgr->buf_used = (void *)( ( (unsigned long)p_sbuf_mgr->buf_allocated + PAGE_SIZE - 1 ) & PAGE_MASK );
173
174 LOG( LOG_CRIT, "sbuf: len_needed: %zu, len_alloc: %u, len_used: %u, page_size: %lu, buf_alloc: %lu, buf_used: %lu.",
175 sizeof( struct fw_sbuf ), p_sbuf_mgr->len_allocated, p_sbuf_mgr->len_used, PAGE_SIZE, (unsigned long)p_sbuf_mgr->buf_allocated, (unsigned long)p_sbuf_mgr->buf_used );
176
177 /* set the page as reserved so that it won't be swapped out */
178 for ( i = 0; i < p_sbuf_mgr->len_used; i += PAGE_SIZE ) {
179 SetPageReserved( virt_to_page( p_sbuf_mgr->buf_used + i ) );
180 }
181
182 return 0;
183 }
184
sbuf_mgr_init_sbuf(struct sbuf_mgr * p_sbuf_mgr)185 static void sbuf_mgr_init_sbuf( struct sbuf_mgr *p_sbuf_mgr )
186 {
187 int i;
188 unsigned long irq_flags;
189
190 spin_lock_irqsave( &p_sbuf_mgr->sbuf_lock, irq_flags );
191 p_sbuf_mgr->sbuf_inited = 1;
192 p_sbuf_mgr->sbuf_base = (struct fw_sbuf *)p_sbuf_mgr->buf_used;
193 p_sbuf_mgr->cur_wdr_mode = 0xFFFF; // Invalid
194
195 #if defined( ISP_HAS_AE_MANUAL_FSM )
196 /*** For AE ***/
197 for ( i = 0; i < SBUF_STATS_ARRAY_SIZE; i++ ) {
198 p_sbuf_mgr->ae_sbuf_arr[i].buf_idx = i;
199 p_sbuf_mgr->ae_sbuf_arr[i].buf_status = SBUF_STATUS_DATA_EMPTY;
200 p_sbuf_mgr->ae_sbuf_arr[i].buf_type = SBUF_TYPE_AE;
201 p_sbuf_mgr->ae_sbuf_arr[i].buf_base = (void *)&( p_sbuf_mgr->sbuf_base->ae_sbuf[i] );
202 }
203
204 memset( &p_sbuf_mgr->ae_arr_info, 0, sizeof( p_sbuf_mgr->ae_arr_info ) );
205 p_sbuf_mgr->ae_arr_info.item_total_count = SBUF_STATS_ARRAY_SIZE;
206 p_sbuf_mgr->ae_arr_info.item_status_count[SBUF_STATUS_DATA_EMPTY] = SBUF_STATS_ARRAY_SIZE;
207
208 /* init read_idx is ARRAY_SIZE, which is invalid. */
209 p_sbuf_mgr->ae_arr_info.write_idx = 0;
210 p_sbuf_mgr->ae_arr_info.read_idx = SBUF_STATS_ARRAY_SIZE;
211 #endif
212
213 #if defined( ISP_HAS_AWB_MANUAL_FSM )
214 /*** For AWB ***/
215 for ( i = 0; i < SBUF_STATS_ARRAY_SIZE; i++ ) {
216 p_sbuf_mgr->awb_sbuf_arr[i].buf_idx = i;
217 p_sbuf_mgr->awb_sbuf_arr[i].buf_status = SBUF_STATUS_DATA_EMPTY;
218 p_sbuf_mgr->awb_sbuf_arr[i].buf_type = SBUF_TYPE_AWB;
219 p_sbuf_mgr->awb_sbuf_arr[i].buf_base = (void *)&( p_sbuf_mgr->sbuf_base->awb_sbuf[i] );
220 }
221
222 memset( &p_sbuf_mgr->awb_arr_info, 0, sizeof( p_sbuf_mgr->awb_arr_info ) );
223 p_sbuf_mgr->awb_arr_info.item_total_count = SBUF_STATS_ARRAY_SIZE;
224 p_sbuf_mgr->awb_arr_info.item_status_count[SBUF_STATUS_DATA_EMPTY] = SBUF_STATS_ARRAY_SIZE;
225
226 p_sbuf_mgr->awb_arr_info.write_idx = 0;
227 p_sbuf_mgr->awb_arr_info.read_idx = SBUF_STATS_ARRAY_SIZE;
228 #endif
229
230 #if defined( ISP_HAS_AF_MANUAL_FSM )
231 /*** For AF ***/
232 for ( i = 0; i < SBUF_STATS_ARRAY_SIZE; i++ ) {
233 p_sbuf_mgr->af_sbuf_arr[i].buf_idx = i;
234 p_sbuf_mgr->af_sbuf_arr[i].buf_status = SBUF_STATUS_DATA_EMPTY;
235 p_sbuf_mgr->af_sbuf_arr[i].buf_type = SBUF_TYPE_AF;
236 p_sbuf_mgr->af_sbuf_arr[i].buf_base = (void *)&( p_sbuf_mgr->sbuf_base->af_sbuf[i] );
237 }
238
239 memset( &p_sbuf_mgr->af_arr_info, 0, sizeof( p_sbuf_mgr->af_arr_info ) );
240 p_sbuf_mgr->af_arr_info.item_total_count = SBUF_STATS_ARRAY_SIZE;
241 p_sbuf_mgr->af_arr_info.item_status_count[SBUF_STATUS_DATA_EMPTY] = SBUF_STATS_ARRAY_SIZE;
242
243 p_sbuf_mgr->af_arr_info.write_idx = 0;
244 p_sbuf_mgr->af_arr_info.read_idx = SBUF_STATS_ARRAY_SIZE;
245 #endif
246
247 #if defined( ISP_HAS_GAMMA_MANUAL_FSM )
248 /*** For Gamma Stats ***/
249 for ( i = 0; i < SBUF_STATS_ARRAY_SIZE; i++ ) {
250 p_sbuf_mgr->gamma_sbuf_arr[i].buf_idx = i;
251 p_sbuf_mgr->gamma_sbuf_arr[i].buf_status = SBUF_STATUS_DATA_EMPTY;
252 p_sbuf_mgr->gamma_sbuf_arr[i].buf_type = SBUF_TYPE_GAMMA;
253 p_sbuf_mgr->gamma_sbuf_arr[i].buf_base = (void *)&( p_sbuf_mgr->sbuf_base->gamma_sbuf[i] );
254 }
255
256 memset( &p_sbuf_mgr->gamma_arr_info, 0, sizeof( p_sbuf_mgr->gamma_arr_info ) );
257 p_sbuf_mgr->gamma_arr_info.item_total_count = SBUF_STATS_ARRAY_SIZE;
258 p_sbuf_mgr->gamma_arr_info.item_status_count[SBUF_STATUS_DATA_EMPTY] = SBUF_STATS_ARRAY_SIZE;
259
260 p_sbuf_mgr->gamma_arr_info.write_idx = 0;
261 p_sbuf_mgr->gamma_arr_info.read_idx = SBUF_STATS_ARRAY_SIZE;
262 #endif
263
264 #if defined( ISP_HAS_IRIDIX_MANUAL_FSM ) || defined( ISP_HAS_IRIDIX8_MANUAL_FSM )
265 /*** For Iridix ***/
266 for ( i = 0; i < SBUF_STATS_ARRAY_SIZE; i++ ) {
267 p_sbuf_mgr->iridix_sbuf_arr[i].buf_idx = i;
268 p_sbuf_mgr->iridix_sbuf_arr[i].buf_status = SBUF_STATUS_DATA_EMPTY;
269 p_sbuf_mgr->iridix_sbuf_arr[i].buf_type = SBUF_TYPE_IRIDIX;
270 p_sbuf_mgr->iridix_sbuf_arr[i].buf_base = (void *)&( p_sbuf_mgr->sbuf_base->iridix_sbuf[i] );
271 }
272
273 memset( &p_sbuf_mgr->iridix_arr_info, 0, sizeof( p_sbuf_mgr->iridix_arr_info ) );
274 p_sbuf_mgr->iridix_arr_info.item_total_count = SBUF_STATS_ARRAY_SIZE;
275 p_sbuf_mgr->iridix_arr_info.item_status_count[SBUF_STATUS_DATA_EMPTY] = SBUF_STATS_ARRAY_SIZE;
276
277 p_sbuf_mgr->iridix_arr_info.write_idx = 0;
278 p_sbuf_mgr->iridix_arr_info.read_idx = SBUF_STATS_ARRAY_SIZE;
279 #endif
280
281 spin_unlock_irqrestore( &p_sbuf_mgr->sbuf_lock, irq_flags );
282 }
283
sbuf_mgr_init(struct sbuf_mgr * p_sbuf_mgr)284 static int sbuf_mgr_init( struct sbuf_mgr *p_sbuf_mgr )
285 {
286 int rc;
287
288 p_sbuf_mgr->sbuf_inited = 0;
289 spin_lock_init( &( p_sbuf_mgr->sbuf_lock ) );
290
291 rc = sbuf_mgr_alloc_sbuf( p_sbuf_mgr );
292 if ( rc ) {
293 LOG( LOG_ERR, "sbuf_mgr alloc buffer failed, ret: %d.", rc );
294 return rc;
295 }
296
297 sbuf_mgr_init_sbuf( p_sbuf_mgr );
298
299 return 0;
300 }
301
302 #define _GET_LUT_SIZE( lut ) ( lut->rows * lut->cols * lut->width )
303
get_cur_calibration_total_size(void * fw_instance)304 static uint32_t get_cur_calibration_total_size( void *fw_instance )
305 {
306 uint32_t result = 0;
307 uint32_t idx = 0;
308 LookupTable *p_lut = NULL;
309
310 LOG( LOG_ERR, "fw_instance: %p, CALIBRATION_TOTAL_SIZE: %d.", fw_instance, CALIBRATION_TOTAL_SIZE );
311
312 for ( idx = 0; idx < CALIBRATION_TOTAL_SIZE; idx++ ) {
313
314 p_lut = _GET_LOOKUP_PTR( fw_instance, idx );
315
316 if ( p_lut ) {
317 result += _GET_LUT_SIZE( p_lut );
318 } else {
319 LOG( LOG_ERR, "Error: LUT %d is NULL(%p), not inited.", idx, p_lut );
320 }
321
322 result += sizeof( LookupTable );
323 }
324
325 LOG( LOG_ERR, "Total size for all IQ LUTs is %d bytes", result );
326 return result;
327 }
328
329 /*
330 sbuf calibration memory layout: N is CALIBRATION_TOTAL_SIZE
331
332 ----------
333 | lut1 |---
334 |--------| |
335 | lut2 | |
336 |--------| |
337 | .... | |
338 |--------| |
339 | lutN |--|----
340 |--------| | |
341 | data1 |<-- |
342 |--------| |
343 | data2 | |
344 |--------| |
345 | .... | |
346 |--------| |
347 | dataN |<------
348 |--------|
349 */
update_cur_calibration_to_sbuf(void * fw_instance,struct sbuf_mgr * p_sbuf_mgr)350 static int update_cur_calibration_to_sbuf( void *fw_instance, struct sbuf_mgr *p_sbuf_mgr )
351 {
352 int rc = 0;
353 uint32_t idx = 0;
354 uint32_t lut_size = 0;
355 LookupTable *p_lut = NULL;
356
357 uint8_t *sbuf_cali_base = (uint8_t *)&p_sbuf_mgr->sbuf_base->kf_info.cali_info.cali_data;
358 struct sbuf_lookup_table *p_sbuf_lut_arr = (struct sbuf_lookup_table *)sbuf_cali_base;
359 uint8_t *p_sbuf_cali_data = sbuf_cali_base + sizeof( struct sbuf_lookup_table ) * CALIBRATION_TOTAL_SIZE;
360
361
362 LOG( LOG_ERR, "sbuf_cali_base: %p, p_sbuf_lut_arr: %p, p_sbuf_cali_data: %p", sbuf_cali_base, p_sbuf_lut_arr, p_sbuf_cali_data );
363
364 for ( idx = 0; idx < CALIBRATION_TOTAL_SIZE; idx++ ) {
365 p_lut = _GET_LOOKUP_PTR( fw_instance, idx );
366 if ( !p_lut ) {
367 // NOTE: Don't touch ptr values, UF will manage it.
368 // p_sbuf_lut_arr[idx].ptr = 0;
369 p_sbuf_lut_arr[idx].rows = 0;
370 p_sbuf_lut_arr[idx].cols = 0;
371 p_sbuf_lut_arr[idx].width = 0;
372 continue;
373 }
374
375 lut_size = _GET_LUT_SIZE( p_lut );
376
377 if ( !p_lut->ptr ) {
378 rc = -1;
379 LOG( LOG_CRIT, "IQ LUT %d is NULL", idx );
380 break;
381 }
382
383 // NOTE: Don't touch ptr values, UF will manage it.
384 // p_sbuf_lut_arr[idx].ptr = NULL;
385 p_sbuf_lut_arr[idx].rows = p_lut->rows;
386 p_sbuf_lut_arr[idx].cols = p_lut->cols;
387 p_sbuf_lut_arr[idx].width = p_lut->width;
388
389 memcpy( p_sbuf_cali_data, p_lut->ptr, lut_size );
390 p_sbuf_cali_data += lut_size;
391 }
392
393 p_sbuf_mgr->sbuf_base->kf_info.cali_info.is_fetched = 0;
394
395 return rc;
396 }
397
sbuf_mgr_item_count_in_using(struct sbuf_mgr * p_sbuf_mgr)398 static uint32_t sbuf_mgr_item_count_in_using( struct sbuf_mgr *p_sbuf_mgr )
399 {
400 uint32_t rc = 0;
401 unsigned long irq_flags;
402
403 spin_lock_irqsave( &p_sbuf_mgr->sbuf_lock, irq_flags );
404
405 #if defined( ISP_HAS_AE_MANUAL_FSM )
406 rc += p_sbuf_mgr->ae_arr_info.item_status_count[SBUF_STATUS_DATA_USING];
407 #endif
408
409 #if defined( ISP_HAS_AWB_MANUAL_FSM )
410 rc += p_sbuf_mgr->awb_arr_info.item_status_count[SBUF_STATUS_DATA_USING];
411 #endif
412
413 #if defined( ISP_HAS_AF_MANUAL_FSM )
414 rc += p_sbuf_mgr->af_arr_info.item_status_count[SBUF_STATUS_DATA_USING];
415 #endif
416
417 #if defined( ISP_HAS_GAMMA_MANUAL_FSM )
418 rc += p_sbuf_mgr->gamma_arr_info.item_status_count[SBUF_STATUS_DATA_USING];
419 #endif
420
421 #if defined( ISP_HAS_IRIDIX_MANUAL_FSM )
422 rc += p_sbuf_mgr->iridix_arr_info.item_status_count[SBUF_STATUS_DATA_USING];
423 #endif
424
425 spin_unlock_irqrestore( &p_sbuf_mgr->sbuf_lock, irq_flags );
426
427 LOG( LOG_ERR, "sbuf item using total count: %u.", rc );
428
429 return rc;
430 }
431
sbuf_calibration_is_ready_to_update(struct sbuf_context * p_ctx)432 static uint8_t sbuf_calibration_is_ready_to_update( struct sbuf_context *p_ctx )
433 {
434 uint32_t rc = 1;
435
436 // We can update the sbuf-calibration if no sbuf item used by UF.
437 // or if UF is running but not fetched yet.
438 if ( sbuf_mgr_item_count_in_using( &p_ctx->sbuf_mgr ) ||
439 ( p_ctx->dev_opened && ( p_ctx->sbuf_mgr.sbuf_base->kf_info.cali_info.is_fetched == 0 ) ) ) {
440 rc = 0;
441 }
442
443 return rc;
444 }
445
sbuf_calibration_init(struct sbuf_context * p_ctx)446 static int sbuf_calibration_init( struct sbuf_context *p_ctx )
447 {
448 int rc = 0;
449 uint32_t cnt = 0;
450 uint32_t cali_total_size = 0;
451
452 LOG( LOG_ERR, "sbuf_calibration_init." );
453
454 uint32_t wdr_mode = 0;
455
456 acamera_fsm_mgr_get_param( p_ctx->p_fsm->cmn.p_fsm_mgr, FSM_PARAM_GET_WDR_MODE, NULL, 0, &wdr_mode, sizeof( wdr_mode ) );
457
458 if ( wdr_mode == p_ctx->sbuf_mgr.cur_wdr_mode ) {
459 LOG( LOG_INFO, "same wdr_mode, already inited, return." );
460 return 0;
461 }
462
463 p_ctx->p_fsm->is_paused = 1;
464
465 // get total calibration size, including LUTs
466 cali_total_size = get_cur_calibration_total_size( ACAMERA_FSM2CTX_PTR( p_ctx->p_fsm ) );
467
468 if ( cali_total_size > ISP_MAX_CALIBRATION_DATA_SIZE ) {
469 LOG( LOG_CRIT, "Error: Not enough memory for calibration data, total_size: %d, max_size: %d.", cali_total_size, ISP_MAX_CALIBRATION_DATA_SIZE );
470 p_ctx->p_fsm->is_paused = 0;
471 return -1;
472 }
473
474 while ( !sbuf_calibration_is_ready_to_update( p_ctx ) ) {
475 LOG( LOG_ERR, "wait for UF to finish using" );
476 // sleep 3 ms
477 system_timer_usleep( 3 * 1000 );
478
479 cnt++;
480
481 // 3 seconds timeout
482 if ( cnt >= 1000 ) {
483 LOG( LOG_CRIT, "timeout to wait for calibration ready to update" );
484 p_ctx->p_fsm->is_paused = 0;
485 return -1;
486 }
487 }
488
489 rc = update_cur_calibration_to_sbuf( ACAMERA_FSM2CTX_PTR( p_ctx->p_fsm ), &p_ctx->sbuf_mgr );
490
491 p_ctx->p_fsm->is_paused = 0;
492
493 LOG( LOG_CRIT, "sbuf_calibration wdr_mode updated: %d -> %d.", p_ctx->sbuf_mgr.cur_wdr_mode, wdr_mode );
494
495 p_ctx->sbuf_mgr.cur_wdr_mode = wdr_mode;
496
497 return rc;
498 }
499
500
sbuf_update_calibration_data(sbuf_fsm_ptr_t p_fsm)501 void sbuf_update_calibration_data( sbuf_fsm_ptr_t p_fsm )
502 {
503 uint32_t fw_id = p_fsm->cmn.ctx_id;
504 struct sbuf_context *p_ctx = NULL;
505
506 p_ctx = &( sbuf_contexts[fw_id] );
507 if ( p_ctx->fw_id != fw_id ) {
508 LOG( LOG_CRIT, "Error: ctx_id not match, fsm fw_id: %d, ctx_id: %d.", fw_id, p_ctx->fw_id );
509 return;
510 }
511
512 sbuf_calibration_init( p_ctx );
513 }
514
515
sbuf_ctx_init(struct sbuf_context * p_ctx)516 static int sbuf_ctx_init( struct sbuf_context *p_ctx )
517 {
518 int rc;
519
520 rc = sbuf_mgr_init( &p_ctx->sbuf_mgr );
521 if ( rc ) {
522 LOG( LOG_ERR, "init failed, error: sbuf_mgr init failed, ret: %d.", rc );
523
524 return rc;
525 }
526
527 rc = sbuf_calibration_init( p_ctx );
528 if ( rc ) {
529 LOG( LOG_ERR, "init failed, error: calibration init failed, ret: %d.", rc );
530 }
531
532 return rc;
533 }
534
sbuf_mgr_reset(struct sbuf_mgr * p_sbuf_mgr)535 static void sbuf_mgr_reset( struct sbuf_mgr *p_sbuf_mgr )
536 {
537 int i;
538 unsigned long irq_flags;
539
540 spin_lock_init( &( p_sbuf_mgr->sbuf_lock ) );
541
542 spin_lock_irqsave( &p_sbuf_mgr->sbuf_lock, irq_flags );
543
544 p_sbuf_mgr->sbuf_base->kf_info.cali_info.is_fetched = 0;
545
546 for ( i = 0; i < SBUF_STATS_ARRAY_SIZE; i++ ) {
547 #if defined( ISP_HAS_AE_MANUAL_FSM )
548 /*** For AE ***/
549 if ( SBUF_STATUS_DATA_USING == p_sbuf_mgr->ae_sbuf_arr[i].buf_status ) {
550 p_sbuf_mgr->ae_sbuf_arr[i].buf_status = SBUF_STATUS_DATA_EMPTY;
551 p_sbuf_mgr->ae_arr_info.item_status_count[SBUF_STATUS_DATA_EMPTY]++;
552 p_sbuf_mgr->ae_arr_info.item_status_count[SBUF_STATUS_DATA_USING]--;
553 }
554
555 if ( i == SBUF_STATS_ARRAY_SIZE - 1 ) {
556 LOG( LOG_DEBUG, "ae sbuf arr info: read_idx: %u, write_idx: %u, status_count: %u-%u-%u-%u.",
557 p_sbuf_mgr->ae_arr_info.read_idx,
558 p_sbuf_mgr->ae_arr_info.write_idx,
559 p_sbuf_mgr->ae_arr_info.item_status_count[SBUF_STATUS_DATA_EMPTY],
560 p_sbuf_mgr->ae_arr_info.item_status_count[SBUF_STATUS_DATA_PREPARE],
561 p_sbuf_mgr->ae_arr_info.item_status_count[SBUF_STATUS_DATA_DONE],
562 p_sbuf_mgr->ae_arr_info.item_status_count[SBUF_STATUS_DATA_USING] );
563 }
564 #endif
565
566 #if defined( ISP_HAS_AWB_MANUAL_FSM )
567 /*** For AWB ***/
568 if ( SBUF_STATUS_DATA_USING == p_sbuf_mgr->awb_sbuf_arr[i].buf_status ) {
569 p_sbuf_mgr->awb_sbuf_arr[i].buf_status = SBUF_STATUS_DATA_EMPTY;
570 p_sbuf_mgr->awb_arr_info.item_status_count[SBUF_STATUS_DATA_EMPTY]++;
571 p_sbuf_mgr->awb_arr_info.item_status_count[SBUF_STATUS_DATA_USING]--;
572 }
573
574 if ( i == SBUF_STATS_ARRAY_SIZE - 1 ) {
575 LOG( LOG_DEBUG, "awb sbuf arr info: read_idx: %u, write_idx: %u, status_count: %u-%u-%u-%u.",
576 p_sbuf_mgr->awb_arr_info.read_idx,
577 p_sbuf_mgr->awb_arr_info.write_idx,
578 p_sbuf_mgr->awb_arr_info.item_status_count[SBUF_STATUS_DATA_EMPTY],
579 p_sbuf_mgr->awb_arr_info.item_status_count[SBUF_STATUS_DATA_PREPARE],
580 p_sbuf_mgr->awb_arr_info.item_status_count[SBUF_STATUS_DATA_DONE],
581 p_sbuf_mgr->awb_arr_info.item_status_count[SBUF_STATUS_DATA_USING] );
582 }
583 #endif
584
585 #if defined( ISP_HAS_AF_MANUAL_FSM )
586 /*** For AF ***/
587 if ( SBUF_STATUS_DATA_USING == p_sbuf_mgr->af_sbuf_arr[i].buf_status ) {
588 p_sbuf_mgr->af_sbuf_arr[i].buf_status = SBUF_STATUS_DATA_EMPTY;
589 p_sbuf_mgr->af_arr_info.item_status_count[SBUF_STATUS_DATA_EMPTY]++;
590 p_sbuf_mgr->af_arr_info.item_status_count[SBUF_STATUS_DATA_USING]--;
591 }
592
593 if ( i == SBUF_STATS_ARRAY_SIZE - 1 ) {
594 LOG( LOG_DEBUG, "af sbuf arr info: read_idx: %u, write_idx: %u, status_count: %u-%u-%u-%u.",
595 p_sbuf_mgr->af_arr_info.read_idx,
596 p_sbuf_mgr->af_arr_info.write_idx,
597 p_sbuf_mgr->af_arr_info.item_status_count[SBUF_STATUS_DATA_EMPTY],
598 p_sbuf_mgr->af_arr_info.item_status_count[SBUF_STATUS_DATA_PREPARE],
599 p_sbuf_mgr->af_arr_info.item_status_count[SBUF_STATUS_DATA_DONE],
600 p_sbuf_mgr->af_arr_info.item_status_count[SBUF_STATUS_DATA_USING] );
601 }
602 #endif
603
604 #if defined( ISP_HAS_GAMMA_MANUAL_FSM )
605 /*** For Gamma Stats ***/
606 if ( SBUF_STATUS_DATA_USING == p_sbuf_mgr->gamma_sbuf_arr[i].buf_status ) {
607 p_sbuf_mgr->gamma_sbuf_arr[i].buf_status = SBUF_STATUS_DATA_EMPTY;
608 p_sbuf_mgr->gamma_arr_info.item_status_count[SBUF_STATUS_DATA_EMPTY]++;
609 p_sbuf_mgr->gamma_arr_info.item_status_count[SBUF_STATUS_DATA_USING]--;
610 }
611
612 if ( i == SBUF_STATS_ARRAY_SIZE - 1 ) {
613 LOG( LOG_DEBUG, "gamma sbuf arr info: read_idx: %u, write_idx: %u, status_count: %u-%u-%u-%u.",
614 p_sbuf_mgr->gamma_arr_info.read_idx,
615 p_sbuf_mgr->gamma_arr_info.write_idx,
616 p_sbuf_mgr->gamma_arr_info.item_status_count[SBUF_STATUS_DATA_EMPTY],
617 p_sbuf_mgr->gamma_arr_info.item_status_count[SBUF_STATUS_DATA_PREPARE],
618 p_sbuf_mgr->gamma_arr_info.item_status_count[SBUF_STATUS_DATA_DONE],
619 p_sbuf_mgr->gamma_arr_info.item_status_count[SBUF_STATUS_DATA_USING] );
620 }
621 #endif
622
623 #if defined( ISP_HAS_IRIDIX_MANUAL_FSM ) || defined( ISP_HAS_IRIDIX8_MANUAL_FSM )
624 /*** For Iridix ***/
625 if ( SBUF_STATUS_DATA_USING == p_sbuf_mgr->iridix_sbuf_arr[i].buf_status ) {
626 p_sbuf_mgr->iridix_sbuf_arr[i].buf_status = SBUF_STATUS_DATA_EMPTY;
627 p_sbuf_mgr->iridix_arr_info.item_status_count[SBUF_STATUS_DATA_EMPTY]++;
628 p_sbuf_mgr->iridix_arr_info.item_status_count[SBUF_STATUS_DATA_USING]--;
629 }
630
631 if ( i == SBUF_STATS_ARRAY_SIZE - 1 ) {
632 LOG( LOG_DEBUG, "iridix sbuf arr info: read_idx: %u, write_idx: %u, status_count: %u-%u-%u-%u.",
633 p_sbuf_mgr->iridix_arr_info.read_idx,
634 p_sbuf_mgr->iridix_arr_info.write_idx,
635 p_sbuf_mgr->iridix_arr_info.item_status_count[SBUF_STATUS_DATA_EMPTY],
636 p_sbuf_mgr->iridix_arr_info.item_status_count[SBUF_STATUS_DATA_PREPARE],
637 p_sbuf_mgr->iridix_arr_info.item_status_count[SBUF_STATUS_DATA_DONE],
638 p_sbuf_mgr->iridix_arr_info.item_status_count[SBUF_STATUS_DATA_USING] );
639 }
640 #endif
641 }
642
643 spin_unlock_irqrestore( &p_sbuf_mgr->sbuf_lock, irq_flags );
644 }
645
sbuf_mgr_free(struct sbuf_mgr * p_sbuf_mgr)646 static int sbuf_mgr_free( struct sbuf_mgr *p_sbuf_mgr )
647 {
648 unsigned long irq_flags;
649
650 if ( !is_sbuf_inited( p_sbuf_mgr ) ) {
651 LOG( LOG_ERR, "Error: sbuf alloc is not inited, can't free." );
652 return -ENOMEM;
653 }
654
655 /* set the flag before we free in case sb use it after free but before flag is clear. */
656 spin_lock_irqsave( &p_sbuf_mgr->sbuf_lock, irq_flags );
657 p_sbuf_mgr->sbuf_inited = 0;
658 spin_unlock_irqrestore( &p_sbuf_mgr->sbuf_lock, irq_flags );
659
660 if ( p_sbuf_mgr->buf_allocated ) {
661 int i;
662 /* clear the reserved flag before free so that no bug showed when freeed */
663 for ( i = 0; i < p_sbuf_mgr->len_used; i += PAGE_SIZE ) {
664 ClearPageReserved( virt_to_page( p_sbuf_mgr->buf_used + i ) );
665 }
666
667 kfree( p_sbuf_mgr->buf_allocated );
668 p_sbuf_mgr->buf_allocated = NULL;
669 p_sbuf_mgr->buf_used = NULL;
670 } else {
671 LOG( LOG_ERR, "Error: sbuf allocated memory is NULL." );
672 }
673
674 return 0;
675 }
676
677 /* function will be called when this FSM received ae_stats_data_ready event */
sbuf_update_ae_idx(sbuf_fsm_t * p_fsm)678 void sbuf_update_ae_idx( sbuf_fsm_t *p_fsm )
679 {
680 int rc = 0;
681 struct sbuf_item sbuf;
682 #if defined( ISP_HAS_IRIDIX_MANUAL_FSM ) || defined( ISP_HAS_IRIDIX8_MANUAL_FSM )
683 struct sbuf_item sbuf_iridix;
684 #endif
685 struct sbuf_context *p_ctx = NULL;
686 uint32_t fw_id = p_fsm->cmn.ctx_id;
687
688 if ( fw_id >= acamera_get_context_number() ) {
689 LOG( LOG_CRIT, "Fatal error: Invalid FW context ID: %d, max is: %d", fw_id, acamera_get_context_number() - 1 );
690 return;
691 }
692
693 p_ctx = &( sbuf_contexts[fw_id] );
694
695 /* no matter UF is running or not, we need drain stats from the queue,
696 otherwise when UF launches, it will not get latest stats */
697 memset( &sbuf, 0, sizeof( sbuf ) );
698 sbuf.buf_status = SBUF_STATUS_DATA_DONE;
699 sbuf.buf_type = SBUF_TYPE_AE;
700
701 if ( sbuf_get_item( p_ctx->fw_id, &sbuf ) ) {
702 LOG( LOG_ERR, "Failed to get sbuf for ae done buffer, fw_id: %d.", p_ctx->fw_id );
703 return;
704 }
705
706 #if defined( ISP_HAS_IRIDIX_MANUAL_FSM ) || defined( ISP_HAS_IRIDIX8_MANUAL_FSM )
707 /*NOTE: iridix */
708 memset( &sbuf_iridix, 0, sizeof( sbuf_iridix ) );
709 sbuf_iridix.buf_status = SBUF_STATUS_DATA_DONE;
710 sbuf_iridix.buf_type = SBUF_TYPE_IRIDIX;
711
712 if ( sbuf_get_item( p_ctx->fw_id, &sbuf_iridix ) ) {
713 LOG( LOG_DEBUG, "Failed to get sbuf for iridix done buffer, fw_id: %d.", p_ctx->fw_id );
714
715 // we need to return AE sbuf to sbuf_manager when error is happened.
716 sbuf.buf_status = SBUF_STATUS_DATA_EMPTY;
717 sbuf_set_item( p_ctx->fw_id, &sbuf );
718
719 return;
720 }
721 #endif
722
723 if ( !p_ctx->dev_opened || p_fsm->is_paused ) {
724 LOG( LOG_DEBUG, "device is not opened or paused, skip, fw_id: %d.", fw_id );
725 sbuf.buf_status = SBUF_STATUS_DATA_EMPTY;
726 sbuf_set_item( p_ctx->fw_id, &sbuf );
727
728 #if defined( ISP_HAS_IRIDIX_MANUAL_FSM ) || defined( ISP_HAS_IRIDIX8_MANUAL_FSM )
729 sbuf_iridix.buf_status = SBUF_STATUS_DATA_EMPTY;
730 sbuf_set_item( p_ctx->fw_id, &sbuf_iridix );
731 #endif
732 return;
733 }
734
735
736 rc = mutex_lock_interruptible( &p_ctx->idx_set_lock );
737 if ( rc ) {
738 LOG( LOG_ERR, "Error: access lock failed, rc: %d.", rc );
739 // we need to return this sbuf to sbuf_manager when error is happened.
740 sbuf.buf_status = SBUF_STATUS_DATA_EMPTY;
741 sbuf_set_item( p_ctx->fw_id, &sbuf );
742
743 #if defined( ISP_HAS_IRIDIX_MANUAL_FSM ) || defined( ISP_HAS_IRIDIX8_MANUAL_FSM )
744 sbuf_iridix.buf_status = SBUF_STATUS_DATA_EMPTY;
745 sbuf_set_item( p_ctx->fw_id, &sbuf_iridix );
746 #endif
747 return;
748 }
749
750 p_ctx->idx_set.ae_idx = sbuf.buf_idx;
751 p_ctx->idx_set.ae_idx_valid = 1;
752
753 #if defined( ISP_HAS_IRIDIX_MANUAL_FSM ) || defined( ISP_HAS_IRIDIX8_MANUAL_FSM )
754 p_ctx->idx_set.iridix_idx = sbuf_iridix.buf_idx;
755 p_ctx->idx_set.iridix_idx_valid = 1;
756
757 // iridix depends on AE stats data
758 sbuf_ae_t *p_sbuf_ae;
759 sbuf_iridix_t *p_sbuf_iridix;
760
761 p_sbuf_ae = (sbuf_ae_t *)&( p_ctx->sbuf_mgr.sbuf_base->ae_sbuf[sbuf.buf_idx] );
762 p_sbuf_iridix = (sbuf_iridix_t *)&( p_ctx->sbuf_mgr.sbuf_base->iridix_sbuf[sbuf_iridix.buf_idx] );
763
764 p_sbuf_iridix->frame_id = p_sbuf_ae->frame_id;
765
766 acamera_fsm_mgr_set_param( p_fsm->cmn.p_fsm_mgr, FSM_PARAM_SET_IRIDIX_FRAME_ID, &p_sbuf_iridix->frame_id, sizeof( p_sbuf_iridix->frame_id ) );
767 #endif
768
769 mutex_unlock( &p_ctx->idx_set_lock );
770
771 wake_up_interruptible( &p_ctx->idx_set_wait_queue );
772 }
773
sbuf_update_awb_idx(sbuf_fsm_t * p_fsm)774 void sbuf_update_awb_idx( sbuf_fsm_t *p_fsm )
775 {
776 int rc = 0;
777 struct sbuf_item sbuf;
778 struct sbuf_context *p_ctx = NULL;
779 uint32_t fw_id = p_fsm->cmn.ctx_id;
780
781 if ( fw_id >= acamera_get_context_number() ) {
782 LOG( LOG_CRIT, "Fatal error: Invalid FW context ID: %d, max is: %d", fw_id, acamera_get_context_number() - 1 );
783 return;
784 }
785
786 p_ctx = &( sbuf_contexts[fw_id] );
787
788 /* no matter UF is running or not, we need drain stats from the queue,
789 otherwise when UF launches, it will not get latest stats */
790 memset( &sbuf, 0, sizeof( sbuf ) );
791 sbuf.buf_status = SBUF_STATUS_DATA_DONE;
792 sbuf.buf_type = SBUF_TYPE_AWB;
793
794 if ( sbuf_get_item( p_ctx->fw_id, &sbuf ) ) {
795 LOG( LOG_DEBUG, "Failed to get sbuf for awb done buffer, fw_id: %d.", p_ctx->fw_id );
796 return;
797 }
798
799
800 if ( !p_ctx->dev_opened || p_fsm->is_paused ) {
801 LOG( LOG_DEBUG, "device is not opened or paused, skip, fw_id: %d.", fw_id );
802 sbuf.buf_status = SBUF_STATUS_DATA_EMPTY;
803 sbuf_set_item( p_ctx->fw_id, &sbuf );
804 return;
805 }
806
807
808 rc = mutex_lock_interruptible( &p_ctx->idx_set_lock );
809 if ( rc ) {
810 LOG( LOG_ERR, "Error: access lock failed, rc: %d.", rc );
811 // we need to return this sbuf to sbuf_manager when error is happened.
812 sbuf.buf_status = SBUF_STATUS_DATA_EMPTY;
813 sbuf_set_item( p_ctx->fw_id, &sbuf );
814 return;
815 }
816
817 p_ctx->idx_set.awb_idx = sbuf.buf_idx;
818 p_ctx->idx_set.awb_idx_valid = 1;
819
820 mutex_unlock( &p_ctx->idx_set_lock );
821
822 wake_up_interruptible( &p_ctx->idx_set_wait_queue );
823 }
824
sbuf_update_af_idx(sbuf_fsm_t * p_fsm)825 void sbuf_update_af_idx( sbuf_fsm_t *p_fsm )
826 {
827 int rc = 0;
828 struct sbuf_item sbuf;
829 struct sbuf_context *p_ctx = NULL;
830 uint32_t fw_id = p_fsm->cmn.ctx_id;
831
832 if ( fw_id >= acamera_get_context_number() ) {
833 LOG( LOG_CRIT, "Fatal error: Invalid FW context ID: %d, max is: %d", fw_id, acamera_get_context_number() - 1 );
834 return;
835 }
836
837 p_ctx = &( sbuf_contexts[fw_id] );
838
839 /* no matter UF is running or not, we need drain stats from the queue,
840 otherwise when UF launches, it will not get latest stats */
841 memset( &sbuf, 0, sizeof( sbuf ) );
842 sbuf.buf_status = SBUF_STATUS_DATA_DONE;
843 sbuf.buf_type = SBUF_TYPE_AF;
844
845 if ( sbuf_get_item( p_ctx->fw_id, &sbuf ) ) {
846 LOG( LOG_DEBUG, "Failed to get sbuf for af done buffer, fw_id: %d.", p_ctx->fw_id );
847 return;
848 }
849
850 if ( !p_ctx->dev_opened || p_fsm->is_paused ) {
851 LOG( LOG_DEBUG, "device is not opened or paused, skip, fw_id: %d.", fw_id );
852 sbuf.buf_status = SBUF_STATUS_DATA_EMPTY;
853 sbuf_set_item( p_ctx->fw_id, &sbuf );
854 return;
855 }
856
857
858 rc = mutex_lock_interruptible( &p_ctx->idx_set_lock );
859 if ( rc ) {
860 LOG( LOG_ERR, "Error: access lock failed, rc: %d.", rc );
861 // we need to return this sbuf to sbuf_manager when error is happened.
862 sbuf.buf_status = SBUF_STATUS_DATA_EMPTY;
863 sbuf_set_item( p_ctx->fw_id, &sbuf );
864 return;
865 }
866
867 p_ctx->idx_set.af_idx = sbuf.buf_idx;
868 p_ctx->idx_set.af_idx_valid = 1;
869
870 mutex_unlock( &p_ctx->idx_set_lock );
871
872 wake_up_interruptible( &p_ctx->idx_set_wait_queue );
873 }
874
sbuf_update_gamma_idx(sbuf_fsm_t * p_fsm)875 void sbuf_update_gamma_idx( sbuf_fsm_t *p_fsm )
876 {
877 int rc = 0;
878 struct sbuf_item sbuf;
879 struct sbuf_context *p_ctx = NULL;
880 uint32_t fw_id = p_fsm->cmn.ctx_id;
881
882 if ( fw_id >= acamera_get_context_number() ) {
883 LOG( LOG_CRIT, "Fatal error: Invalid FW context ID: %d, max is: %d", fw_id, acamera_get_context_number() - 1 );
884 return;
885 }
886
887 p_ctx = &( sbuf_contexts[fw_id] );
888
889 /* no matter UF is running or not, we need drain stats from the queue,
890 otherwise when UF launches, it will not get latest stats */
891 memset( &sbuf, 0, sizeof( sbuf ) );
892 sbuf.buf_status = SBUF_STATUS_DATA_DONE;
893 sbuf.buf_type = SBUF_TYPE_GAMMA;
894
895 if ( sbuf_get_item( p_ctx->fw_id, &sbuf ) ) {
896 LOG( LOG_DEBUG, "Failed to get sbuf for gamma done buffer, fw_id: %d.", p_ctx->fw_id );
897 return;
898 }
899
900 if ( !p_ctx->dev_opened || p_fsm->is_paused ) {
901 LOG( LOG_DEBUG, "device is not opened or paused, skip, fw_id: %d.", fw_id );
902 sbuf.buf_status = SBUF_STATUS_DATA_EMPTY;
903 sbuf_set_item( p_ctx->fw_id, &sbuf );
904 return;
905 }
906
907
908 rc = mutex_lock_interruptible( &p_ctx->idx_set_lock );
909 if ( rc ) {
910 LOG( LOG_ERR, "Error: access lock failed, rc: %d.", rc );
911 // we need to return this sbuf to sbuf_manager when error is happened.
912 sbuf.buf_status = SBUF_STATUS_DATA_EMPTY;
913 sbuf_set_item( p_ctx->fw_id, &sbuf );
914 return;
915 }
916
917 p_ctx->idx_set.gamma_idx = sbuf.buf_idx;
918 p_ctx->idx_set.gamma_idx_valid = 1;
919
920 mutex_unlock( &p_ctx->idx_set_lock );
921
922 wake_up_interruptible( &p_ctx->idx_set_wait_queue );
923 }
924
sbuf_is_ready_to_send_data(struct sbuf_context * p_ctx)925 static uint32_t sbuf_is_ready_to_send_data( struct sbuf_context *p_ctx )
926 {
927 uint32_t rc = 1;
928
929 // If sbuf FSM is paused or UF didn't fetched calibration data yet,
930 // we need to wait before send stats data.
931 if ( p_ctx->p_fsm->is_paused ||
932 !p_ctx->sbuf_mgr.sbuf_base->kf_info.cali_info.is_fetched ) {
933 LOG( LOG_INFO, "is_paused: %d, is_fetched: %d.", p_ctx->p_fsm->is_paused, p_ctx->sbuf_mgr.sbuf_base->kf_info.cali_info.is_fetched );
934
935 rc = 0;
936 }
937
938 return rc;
939 }
940
sbuf_recycle_idx_set(struct sbuf_context * p_ctx,struct sbuf_idx_set * p_idx_set)941 static void sbuf_recycle_idx_set( struct sbuf_context *p_ctx, struct sbuf_idx_set *p_idx_set )
942 {
943 struct sbuf_item item;
944
945 if ( p_idx_set->ae_idx_valid ) {
946 item.buf_idx = p_idx_set->ae_idx;
947 item.buf_type = SBUF_TYPE_AE;
948 item.buf_status = SBUF_STATUS_DATA_EMPTY;
949
950 sbuf_set_item( p_ctx->fw_id, &item );
951 }
952
953 if ( p_idx_set->awb_idx_valid ) {
954 item.buf_idx = p_idx_set->awb_idx;
955 item.buf_type = SBUF_TYPE_AWB;
956 item.buf_status = SBUF_STATUS_DATA_EMPTY;
957
958 sbuf_set_item( p_ctx->fw_id, &item );
959 }
960
961 if ( p_idx_set->af_idx_valid ) {
962 item.buf_idx = p_idx_set->af_idx;
963 item.buf_type = SBUF_TYPE_AF;
964 item.buf_status = SBUF_STATUS_DATA_EMPTY;
965
966 sbuf_set_item( p_ctx->fw_id, &item );
967 }
968
969 if ( p_idx_set->gamma_idx_valid ) {
970 item.buf_idx = p_idx_set->gamma_idx;
971 item.buf_type = SBUF_TYPE_GAMMA;
972 item.buf_status = SBUF_STATUS_DATA_EMPTY;
973
974 sbuf_set_item( p_ctx->fw_id, &item );
975 }
976
977 if ( p_idx_set->iridix_idx_valid ) {
978 item.buf_idx = p_idx_set->iridix_idx;
979 item.buf_type = SBUF_TYPE_IRIDIX;
980 item.buf_status = SBUF_STATUS_DATA_EMPTY;
981
982 sbuf_set_item( p_ctx->fw_id, &item );
983 }
984 }
985
sbuf_mgr_get_latest_idx_set(struct sbuf_context * p_ctx,struct sbuf_idx_set * p_idx_set)986 static void sbuf_mgr_get_latest_idx_set( struct sbuf_context *p_ctx, struct sbuf_idx_set *p_idx_set )
987 {
988 int rc;
989 uint32_t wait = 0;
990
991 // set all items to invalid by default.
992 memset( p_idx_set, 0, sizeof( *p_idx_set ) );
993
994 rc = mutex_lock_interruptible( &p_ctx->idx_set_lock );
995 if ( rc ) {
996 LOG( LOG_ERR, "Error: access lock failed, rc: %d.", rc );
997 return;
998 }
999
1000 if ( is_idx_set_has_valid_item( &p_ctx->idx_set ) ) {
1001 *p_idx_set = p_ctx->idx_set;
1002
1003 // reset the valid flag to prepare next read
1004 memset( &p_ctx->idx_set, 0, sizeof( p_ctx->idx_set ) );
1005 } else {
1006 wait = 1;
1007 }
1008
1009 mutex_unlock( &p_ctx->idx_set_lock );
1010
1011 if ( wait ) {
1012 long time_out_in_jiffies = 30; /* jiffies is depend on HW, in x86 Ubuntu, it's 4 ms, 30 is 120ms. */
1013
1014 /* wait for the event */
1015 LOG( LOG_DEBUG, "wait for data, timeout_in_jiffies: %ld, HZ: %d.", time_out_in_jiffies, HZ );
1016 rc = wait_event_interruptible_timeout( p_ctx->idx_set_wait_queue, is_idx_set_has_valid_item( &p_ctx->idx_set ), time_out_in_jiffies );
1017 LOG( LOG_DEBUG, "after timeout, rc: %d, is_idx_set_has_valid_item: %d.", rc, is_idx_set_has_valid_item( &p_ctx->idx_set ) );
1018
1019 rc = mutex_lock_interruptible( &p_ctx->idx_set_lock );
1020 if ( rc ) {
1021 LOG( LOG_ERR, "Error: 2nd access lock failed, rc: %d.", rc );
1022 return;
1023 }
1024
1025 *p_idx_set = p_ctx->idx_set;
1026
1027 // reset to invalid flag to prepare for next read since we already send to user space
1028 memset( &p_ctx->idx_set, 0, sizeof( p_ctx->idx_set ) );
1029 mutex_unlock( &p_ctx->idx_set_lock );
1030 }
1031 }
1032
sbuf_mgr_apply_new_param(struct sbuf_context * p_ctx,struct sbuf_idx_set * p_idx_set)1033 static void sbuf_mgr_apply_new_param( struct sbuf_context *p_ctx, struct sbuf_idx_set *p_idx_set )
1034 {
1035 struct sbuf_item item;
1036
1037 #if defined( ISP_HAS_AE_MANUAL_FSM )
1038 /* AE */
1039 if ( p_idx_set->ae_idx_valid && ( p_idx_set->ae_idx < SBUF_STATS_ARRAY_SIZE ) ) {
1040 sbuf_ae_t *p_sbuf_ae;
1041
1042 p_sbuf_ae = (sbuf_ae_t *)&( p_ctx->sbuf_mgr.sbuf_base->ae_sbuf[p_idx_set->ae_idx] );
1043
1044 acamera_fsm_mgr_set_param( p_ctx->p_fsm->cmn.p_fsm_mgr, FSM_PARAM_SET_AE_NEW_PARAM, p_sbuf_ae, sizeof( *p_sbuf_ae ) );
1045
1046 LOG( LOG_DEBUG, "ctx: %d, AE exposure: %d, exp_ratio: %u.", p_ctx->fw_id, (int)p_sbuf_ae->ae_exposure, (unsigned int)p_sbuf_ae->ae_exposure_ratio );
1047
1048 /* set this sbuf back to sbuf_mgr */
1049 item.buf_idx = p_idx_set->ae_idx;
1050 item.buf_type = SBUF_TYPE_AE;
1051 item.buf_status = SBUF_STATUS_DATA_EMPTY;
1052
1053 sbuf_set_item( p_ctx->fw_id, &item );
1054 }
1055 #endif
1056
1057 #if defined( ISP_HAS_AWB_MANUAL_FSM )
1058 /* AWB */
1059 if ( p_idx_set->awb_idx_valid && ( p_idx_set->awb_idx < SBUF_STATS_ARRAY_SIZE ) ) {
1060 sbuf_awb_t *p_sbuf_awb;
1061
1062 p_sbuf_awb = (sbuf_awb_t *)&( p_ctx->sbuf_mgr.sbuf_base->awb_sbuf[p_idx_set->awb_idx] );
1063
1064 //unionman add 20200107 xxx
1065 if(p_sbuf_awb->awb_red_gain - 60 > 0){
1066 p_sbuf_awb->awb_red_gain -= 60;
1067 }
1068 if(p_sbuf_awb->awb_blue_gain - 60 > 0){
1069 p_sbuf_awb->awb_blue_gain -= 60;
1070 }
1071 //end
1072
1073
1074 acamera_fsm_mgr_set_param( p_ctx->p_fsm->cmn.p_fsm_mgr, FSM_PARAM_SET_AWB_NEW_PARAM, p_sbuf_awb, sizeof( *p_sbuf_awb ) );
1075
1076 LOG( LOG_DEBUG, "ctx: %d, AWB param: awb_red_gain: %u, awb_blue_gain: %u, temperature: %d, light_source: %u, p_high: %u",
1077 p_ctx->fw_id,
1078 p_sbuf_awb->awb_red_gain,
1079 p_sbuf_awb->awb_blue_gain,
1080 p_sbuf_awb->temperature_detected,
1081 p_sbuf_awb->light_source_candidate,
1082 p_sbuf_awb->p_high );
1083
1084 /* set this sbuf back to sbuf_mgr */
1085 item.buf_idx = p_idx_set->awb_idx;
1086 item.buf_type = SBUF_TYPE_AWB;
1087 item.buf_status = SBUF_STATUS_DATA_EMPTY;
1088
1089 sbuf_set_item( p_ctx->fw_id, &item );
1090 }
1091 #endif
1092
1093 #if defined( ISP_HAS_AF_MANUAL_FSM )
1094
1095 /* AF */
1096 if ( p_idx_set->af_idx_valid && ( p_idx_set->af_idx < SBUF_STATS_ARRAY_SIZE ) ) {
1097 sbuf_af_t *p_sbuf_af;
1098
1099 p_sbuf_af = (sbuf_af_t *)&( p_ctx->sbuf_mgr.sbuf_base->af_sbuf[p_idx_set->af_idx] );
1100
1101 acamera_fsm_mgr_set_param( p_ctx->p_fsm->cmn.p_fsm_mgr, FSM_PARAM_SET_AF_NEW_PARAM, p_sbuf_af, sizeof( *p_sbuf_af ) );
1102
1103 LOG( LOG_DEBUG, "ctx: %d, AF pos: %u.", p_ctx->fw_id, p_sbuf_af->af_position );
1104
1105 /* set this sbuf back to sbuf_mgr */
1106 item.buf_idx = p_idx_set->af_idx;
1107 item.buf_type = SBUF_TYPE_AF;
1108 item.buf_status = SBUF_STATUS_DATA_EMPTY;
1109
1110 sbuf_set_item( p_ctx->fw_id, &item );
1111 }
1112 #endif
1113
1114 #if defined( ISP_HAS_GAMMA_MANUAL_FSM )
1115 /* Gamma */
1116 if ( p_idx_set->gamma_idx_valid && ( p_idx_set->gamma_idx < SBUF_STATS_ARRAY_SIZE ) ) {
1117 sbuf_gamma_t *p_sbuf_gamma;
1118
1119 p_sbuf_gamma = (sbuf_gamma_t *)&( p_ctx->sbuf_mgr.sbuf_base->gamma_sbuf[p_idx_set->gamma_idx] );
1120 acamera_fsm_mgr_set_param( p_ctx->p_fsm->cmn.p_fsm_mgr, FSM_PARAM_SET_GAMMA_NEW_PARAM, p_sbuf_gamma, sizeof( *p_sbuf_gamma ) );
1121 LOG( LOG_DEBUG, "ctx: %d, gamma param: gain: %u, offset: %u.", p_ctx->fw_id, p_sbuf_gamma->gamma_gain, p_sbuf_gamma->gamma_offset );
1122
1123 /* set this sbuf back to sbuf_mgr */
1124 item.buf_idx = p_idx_set->gamma_idx;
1125 item.buf_type = SBUF_TYPE_GAMMA;
1126 item.buf_status = SBUF_STATUS_DATA_EMPTY;
1127
1128 sbuf_set_item( p_ctx->fw_id, &item );
1129 }
1130 #endif
1131
1132 #if defined( ISP_HAS_IRIDIX_MANUAL_FSM ) || defined( ISP_HAS_IRIDIX8_MANUAL_FSM )
1133 /* Iridix */
1134 if ( p_idx_set->iridix_idx_valid && ( p_idx_set->iridix_idx < SBUF_STATS_ARRAY_SIZE ) ) {
1135 sbuf_iridix_t *p_sbuf_iridix;
1136
1137 p_sbuf_iridix = (sbuf_iridix_t *)&( p_ctx->sbuf_mgr.sbuf_base->iridix_sbuf[p_idx_set->iridix_idx] );
1138 acamera_fsm_mgr_set_param( p_ctx->p_fsm->cmn.p_fsm_mgr, FSM_PARAM_SET_IRIDIX_NEW_PARAM, p_sbuf_iridix, sizeof( *p_sbuf_iridix ) );
1139 LOG( LOG_DEBUG, "ctx: %d, iridix strength: %u, iridix_dark_enh: %u.", p_ctx->fw_id, p_sbuf_iridix->strength_target, p_sbuf_iridix->iridix_dark_enh );
1140
1141 /* set this sbuf back to sbuf_mgr */
1142 item.buf_idx = p_idx_set->iridix_idx;
1143 item.buf_type = SBUF_TYPE_IRIDIX;
1144 item.buf_status = SBUF_STATUS_DATA_EMPTY;
1145
1146 sbuf_set_item( p_ctx->fw_id, &item );
1147 }
1148 #endif
1149 }
1150
sbuf_get_item_from_arr(struct sbuf_mgr * p_sbuf_mgr,struct sbuf_item * item,struct sbuf_item * arr,struct sbuf_item_arr_info * info)1151 static int sbuf_get_item_from_arr( struct sbuf_mgr *p_sbuf_mgr, struct sbuf_item *item, struct sbuf_item *arr, struct sbuf_item_arr_info *info )
1152 {
1153 uint32_t count;
1154 unsigned long irq_flags;
1155 int rc = -EINVAL;
1156
1157 /* status cycle: Empty -> Prepare -> Done -> Using -> Empty */
1158 if ( ( SBUF_STATUS_DATA_EMPTY != item->buf_status ) &&
1159 ( SBUF_STATUS_DATA_DONE != item->buf_status ) ) {
1160 LOG( LOG_ERR, "Invalid sbuf status: %d.", item->buf_status );
1161 return -EINVAL;
1162 }
1163
1164 spin_lock_irqsave( &p_sbuf_mgr->sbuf_lock, irq_flags );
1165
1166 LOG( LOG_DEBUG, "+++ sbuf arr info: buf_type: %s, buf_status: %s, read_idx: %u, write_idx: %u, status_count: %u-%u-%u-%u.",
1167 sbuf_type_str[item->buf_type],
1168 sbuf_status_str[item->buf_status],
1169 info->read_idx,
1170 info->write_idx,
1171 info->item_status_count[SBUF_STATUS_DATA_EMPTY],
1172 info->item_status_count[SBUF_STATUS_DATA_PREPARE],
1173 info->item_status_count[SBUF_STATUS_DATA_DONE],
1174 info->item_status_count[SBUF_STATUS_DATA_USING] );
1175
1176 /* sb wants to get empty buffer? */
1177 if ( SBUF_STATUS_DATA_EMPTY == item->buf_status ) {
1178 /*
1179 * only support one buffer to prepare data, if there already
1180 * has a buffer for prepare, we should not give buffer anymore.
1181 */
1182 LOG( LOG_DEBUG, "DATA_EMPTY sbuf is getting." );
1183 if ( info->item_status_count[SBUF_STATUS_DATA_PREPARE] == 0 ) {
1184 /* get the buffer information */
1185 item->buf_idx = info->write_idx;
1186 item->buf_base = arr[item->buf_idx].buf_base;
1187
1188 /* update array information */
1189 info->item_status_count[arr[item->buf_idx].buf_status]--;
1190 arr[item->buf_idx].buf_status = SBUF_STATUS_DATA_PREPARE;
1191 info->item_status_count[SBUF_STATUS_DATA_PREPARE]++;
1192
1193 /* prepare for next write index */
1194 count = info->item_total_count;
1195 while ( --count ) {
1196 info->write_idx++;
1197
1198 if ( info->write_idx >= info->item_total_count )
1199 info->write_idx = 0;
1200
1201 /* find the buffer for next write, we can't use the sbuf which is in using by sb. */
1202 if ( arr[info->write_idx].buf_status != SBUF_STATUS_DATA_USING ) {
1203 /* if we're going to overwrite the next read buffer, we should update the read_idx */
1204 if ( info->read_idx == info->write_idx ) {
1205 do {
1206 info->read_idx++;
1207 if ( info->read_idx >= info->item_total_count )
1208 info->read_idx = 0;
1209
1210 /* check for a whole loop to avoid infinite loop in case of some error conditions */
1211 if ( info->read_idx == info->write_idx ) {
1212 LOG( LOG_DEBUG, "no DONE buffer after a loop, reset read_idx." );
1213 info->read_idx = info->item_total_count;
1214 break;
1215 }
1216 } while ( arr[info->read_idx].buf_status != SBUF_STATUS_DATA_DONE );
1217 }
1218
1219 /* update status count */
1220 info->item_status_count[arr[info->write_idx].buf_status]--;
1221 arr[info->write_idx].buf_status = SBUF_STATUS_DATA_EMPTY;
1222 info->item_status_count[SBUF_STATUS_DATA_EMPTY]++;
1223 break;
1224 }
1225 }
1226
1227 rc = 0;
1228 } else {
1229 LOG( LOG_ERR, "Failed to get empty sbuf, prepare count: %u.",
1230 info->item_status_count[SBUF_STATUS_DATA_PREPARE] );
1231 }
1232 } else {
1233 /* sb wants to get data_done buffer */
1234 /*
1235 * only support one buffer to using the data, if there already
1236 * has a buffer in using, we should failed this request.
1237 */
1238 if ( ( info->item_status_count[SBUF_STATUS_DATA_USING] == 0 ) &&
1239 ( info->item_status_count[SBUF_STATUS_DATA_DONE] > 0 ) &&
1240 ( SBUF_STATUS_DATA_DONE == arr[info->read_idx].buf_status ) ) {
1241
1242 /* get the buffer information */
1243 item->buf_idx = info->read_idx;
1244 item->buf_base = arr[item->buf_idx].buf_base;
1245
1246 /* update array information */
1247 info->item_status_count[arr[item->buf_idx].buf_status]--;
1248 arr[item->buf_idx].buf_status = SBUF_STATUS_DATA_USING;
1249 info->item_status_count[SBUF_STATUS_DATA_USING]++;
1250
1251 /* prepare for next read index */
1252 count = info->item_total_count;
1253 while ( --count ) {
1254 info->read_idx++;
1255
1256 if ( info->read_idx >= info->item_total_count )
1257 info->read_idx = 0;
1258
1259 /* if we find next DTA_DONE buffer, break */
1260 if ( arr[info->read_idx].buf_status == SBUF_STATUS_DATA_DONE )
1261 break;
1262 }
1263
1264 if ( arr[info->read_idx].buf_status != SBUF_STATUS_DATA_DONE ) {
1265 info->read_idx = info->item_total_count;
1266 LOG( LOG_DEBUG, "NOTE: no DONE buffer after a loop, reset read_idx." );
1267 }
1268
1269 rc = 0;
1270 } else {
1271 LOG( LOG_DEBUG, "Failed to get done sbuf, using count: %u, done count: %u, read_idx: %u, read_idx status: %u",
1272 info->item_status_count[SBUF_STATUS_DATA_USING],
1273 info->item_status_count[SBUF_STATUS_DATA_DONE],
1274 info->read_idx,
1275 arr[info->read_idx].buf_status );
1276 }
1277 }
1278
1279 LOG( LOG_DEBUG, "--- sbuf arr info: buf_type: %s, buf_status: %s, read_idx: %u, write_idx: %u, status_count: %u-%u-%u-%u.",
1280 sbuf_type_str[item->buf_type],
1281 sbuf_status_str[item->buf_status],
1282 info->read_idx,
1283 info->write_idx,
1284 info->item_status_count[SBUF_STATUS_DATA_EMPTY],
1285 info->item_status_count[SBUF_STATUS_DATA_PREPARE],
1286 info->item_status_count[SBUF_STATUS_DATA_DONE],
1287 info->item_status_count[SBUF_STATUS_DATA_USING] );
1288
1289 spin_unlock_irqrestore( &p_sbuf_mgr->sbuf_lock, irq_flags );
1290
1291 if ( rc ) {
1292 LOG( LOG_DEBUG, "Get sbuf item failed: buf_type: %s, buf_status: %s.", sbuf_type_str[item->buf_type], sbuf_status_str[item->buf_status] );
1293 } else {
1294 LOG( LOG_DEBUG, "Get sbuf item OK: buf_type: %s, buf_idx: %u, buf_status: %s, buf_base: %p.", sbuf_type_str[item->buf_type], item->buf_idx, sbuf_status_str[item->buf_status], item->buf_base );
1295 }
1296
1297 return rc;
1298 }
1299
sbuf_set_item_to_arr(struct sbuf_mgr * p_sbuf_mgr,struct sbuf_item * item,struct sbuf_item * arr,struct sbuf_item_arr_info * info)1300 static int sbuf_set_item_to_arr( struct sbuf_mgr *p_sbuf_mgr, struct sbuf_item *item, struct sbuf_item *arr, struct sbuf_item_arr_info *info )
1301 {
1302 unsigned long irq_flags;
1303 int rc = -EINVAL;
1304
1305 /* status cycle: Empty -> Prepare -> Done -> Using -> Empty */
1306 if ( ( SBUF_STATUS_DATA_EMPTY != item->buf_status ) &&
1307 ( SBUF_STATUS_DATA_DONE != item->buf_status ) ) {
1308 LOG( LOG_ERR, "Invalid state: %d.", item->buf_status );
1309 return -EINVAL;
1310 }
1311
1312 spin_lock_irqsave( &p_sbuf_mgr->sbuf_lock, irq_flags );
1313
1314 LOG( LOG_DEBUG, "+++ sbuf arr info: buf_type: %s, buf_status: %s, read_idx: %u, write_idx: %u, status_count: %u-%u-%u-%u.",
1315 sbuf_type_str[item->buf_type],
1316 sbuf_status_str[item->buf_status],
1317 info->read_idx,
1318 info->write_idx,
1319 info->item_status_count[SBUF_STATUS_DATA_EMPTY],
1320 info->item_status_count[SBUF_STATUS_DATA_PREPARE],
1321 info->item_status_count[SBUF_STATUS_DATA_DONE],
1322 info->item_status_count[SBUF_STATUS_DATA_USING] );
1323
1324 /* sb wants to set empty buffer after using? */
1325 if ( SBUF_STATUS_DATA_EMPTY == item->buf_status ) {
1326 /* The previous status of this buffer must be USING */
1327 if ( ( info->item_status_count[SBUF_STATUS_DATA_USING] == 1 ) &&
1328 ( arr[item->buf_idx].buf_status == SBUF_STATUS_DATA_USING ) ) {
1329
1330 /* update array information */
1331 arr[item->buf_idx].buf_status = SBUF_STATUS_DATA_EMPTY;
1332 info->item_status_count[SBUF_STATUS_DATA_EMPTY]++;
1333 info->item_status_count[SBUF_STATUS_DATA_USING]--;
1334
1335 rc = 0;
1336 } else {
1337 LOG( LOG_ERR, "Failed to set empty sbuf, using count: %u, item buf_status: %u, buf_idx: %u.",
1338 info->item_status_count[SBUF_STATUS_DATA_USING],
1339 arr[item->buf_idx].buf_status,
1340 item->buf_idx );
1341 }
1342 } else {
1343 /* sb wants to set data_done buffer after prepare */
1344 /*
1345 * only support one buffer to using the data, if there already
1346 * has a buffer in using, we should failed this request.
1347 */
1348 /* The previous status of this buffer must be PREPARE */
1349 if ( ( info->item_status_count[SBUF_STATUS_DATA_PREPARE] == 1 ) &&
1350 ( arr[item->buf_idx].buf_status == SBUF_STATUS_DATA_PREPARE ) ) {
1351
1352 /* update array information */
1353 arr[item->buf_idx].buf_status = SBUF_STATUS_DATA_DONE;
1354 info->item_status_count[SBUF_STATUS_DATA_DONE]++;
1355 info->item_status_count[SBUF_STATUS_DATA_PREPARE]--;
1356
1357 /* set read_idx if it hasn't set properly */
1358 if ( info->read_idx == info->item_total_count ) {
1359 info->read_idx = item->buf_idx;
1360 }
1361
1362 rc = 0;
1363 } else {
1364 LOG( LOG_ERR, "Failed to set done sbuf, prepare count: %u, item buf_status: %u, buf_idx: %u.",
1365 info->item_status_count[SBUF_STATUS_DATA_PREPARE],
1366 arr[item->buf_idx].buf_status,
1367 item->buf_idx );
1368 }
1369 }
1370
1371 LOG( LOG_DEBUG, "--- sbuf arr info: buf_type: %s, buf_status: %u, read_idx: %u, write_idx: %u, status_count: %u-%u-%u-%u.",
1372 sbuf_type_str[item->buf_type],
1373 item->buf_status,
1374 info->read_idx,
1375 info->write_idx,
1376 info->item_status_count[SBUF_STATUS_DATA_EMPTY],
1377 info->item_status_count[SBUF_STATUS_DATA_PREPARE],
1378 info->item_status_count[SBUF_STATUS_DATA_DONE],
1379 info->item_status_count[SBUF_STATUS_DATA_USING] );
1380
1381 spin_unlock_irqrestore( &p_sbuf_mgr->sbuf_lock, irq_flags );
1382
1383 if ( rc ) {
1384 LOG( LOG_ERR, "Set item failed: buf_idx: %u, buf_type: %s, buf_status: %s, buf_base: %p.", item->buf_idx, sbuf_type_str[item->buf_type], sbuf_status_str[item->buf_status], item->buf_base );
1385 } else {
1386 LOG( LOG_DEBUG, "Set item OK: buf_type: %s, buf_idx: %u, buf_status: %s, buf_base: %p.", item->buf_idx, sbuf_type_str[item->buf_type], sbuf_status_str[item->buf_status], item->buf_base );
1387 }
1388
1389 return rc;
1390 }
1391
sbuf_get_item(int fw_id,struct sbuf_item * item)1392 int sbuf_get_item( int fw_id, struct sbuf_item *item )
1393 {
1394 int rc;
1395 struct sbuf_mgr *p_sbuf_mgr;
1396
1397 if ( !item ) {
1398 LOG( LOG_ERR, "Error: Invaid param, item is NULL." );
1399 return -EINVAL;
1400 }
1401
1402 if ( fw_id >= acamera_get_context_number() ) {
1403 LOG( LOG_ERR, "Error: Invaid param, fw_id: %d, max is: %d.", fw_id, acamera_get_context_number() - 1 );
1404 return -EINVAL;
1405 }
1406
1407 LOG( LOG_DEBUG, "fw_id: %d.", fw_id );
1408
1409 p_sbuf_mgr = &( sbuf_contexts[fw_id].sbuf_mgr );
1410 if ( !is_sbuf_inited( p_sbuf_mgr ) ) {
1411 LOG( LOG_ERR, "Error: sbuf is not inited, can't get_item." );
1412 return -ENOMEM;
1413 }
1414
1415 switch ( item->buf_type ) {
1416 #if defined( ISP_HAS_AE_MANUAL_FSM )
1417 case SBUF_TYPE_AE:
1418 rc = sbuf_get_item_from_arr( p_sbuf_mgr, item, p_sbuf_mgr->ae_sbuf_arr, &p_sbuf_mgr->ae_arr_info );
1419 break;
1420 #endif
1421
1422 #if defined( ISP_HAS_AWB_MANUAL_FSM )
1423 case SBUF_TYPE_AWB:
1424 rc = sbuf_get_item_from_arr( p_sbuf_mgr, item, p_sbuf_mgr->awb_sbuf_arr, &p_sbuf_mgr->awb_arr_info );
1425 break;
1426 #endif
1427
1428 #if defined( ISP_HAS_AF_MANUAL_FSM )
1429 case SBUF_TYPE_AF:
1430 rc = sbuf_get_item_from_arr( p_sbuf_mgr, item, p_sbuf_mgr->af_sbuf_arr, &p_sbuf_mgr->af_arr_info );
1431 break;
1432 #endif
1433
1434 #if defined( ISP_HAS_GAMMA_MANUAL_FSM )
1435 case SBUF_TYPE_GAMMA:
1436 rc = sbuf_get_item_from_arr( p_sbuf_mgr, item, p_sbuf_mgr->gamma_sbuf_arr, &p_sbuf_mgr->gamma_arr_info );
1437 break;
1438 #endif
1439
1440 #if defined( ISP_HAS_IRIDIX_MANUAL_FSM ) || defined( ISP_HAS_IRIDIX8_MANUAL_FSM )
1441 case SBUF_TYPE_IRIDIX:
1442 rc = sbuf_get_item_from_arr( p_sbuf_mgr, item, p_sbuf_mgr->iridix_sbuf_arr, &p_sbuf_mgr->iridix_arr_info );
1443 break;
1444 #endif
1445 default:
1446 LOG( LOG_ERR, "Error: Unsupported buf_type: %d.", item->buf_type );
1447 rc = -EINVAL;
1448 break;
1449 }
1450
1451 return rc;
1452 }
1453
sbuf_set_item(int fw_id,struct sbuf_item * item)1454 int sbuf_set_item( int fw_id, struct sbuf_item *item )
1455 {
1456 int rc;
1457 struct sbuf_mgr *p_sbuf_mgr;
1458
1459 if ( !item ) {
1460 LOG( LOG_ERR, "Error: Invaid param, item is NULL." );
1461 return -EINVAL;
1462 }
1463
1464 if ( fw_id >= acamera_get_context_number() ) {
1465 LOG( LOG_ERR, "Error: Invaid param, fw_id: %d, max is: %d.", fw_id, acamera_get_context_number() - 1 );
1466 return -EINVAL;
1467 }
1468
1469 LOG( LOG_DEBUG, "fw_id: %d.", fw_id );
1470
1471 p_sbuf_mgr = &( sbuf_contexts[fw_id].sbuf_mgr );
1472 if ( !is_sbuf_inited( p_sbuf_mgr ) ) {
1473 LOG( LOG_ERR, "Error: sbuf is not inited, can't set_item." );
1474 return -ENOMEM;
1475 }
1476
1477 switch ( item->buf_type ) {
1478 #if defined( ISP_HAS_AE_MANUAL_FSM )
1479 case SBUF_TYPE_AE:
1480 rc = sbuf_set_item_to_arr( p_sbuf_mgr, item, p_sbuf_mgr->ae_sbuf_arr, &p_sbuf_mgr->ae_arr_info );
1481 break;
1482 #endif
1483
1484 #if defined( ISP_HAS_AWB_MANUAL_FSM )
1485 case SBUF_TYPE_AWB:
1486 rc = sbuf_set_item_to_arr( p_sbuf_mgr, item, p_sbuf_mgr->awb_sbuf_arr, &p_sbuf_mgr->awb_arr_info );
1487 break;
1488 #endif
1489
1490 #if defined( ISP_HAS_AF_MANUAL_FSM )
1491 case SBUF_TYPE_AF:
1492 rc = sbuf_set_item_to_arr( p_sbuf_mgr, item, p_sbuf_mgr->af_sbuf_arr, &p_sbuf_mgr->af_arr_info );
1493 break;
1494 #endif
1495
1496 #if defined( ISP_HAS_GAMMA_MANUAL_FSM )
1497 case SBUF_TYPE_GAMMA:
1498 rc = sbuf_set_item_to_arr( p_sbuf_mgr, item, p_sbuf_mgr->gamma_sbuf_arr, &p_sbuf_mgr->gamma_arr_info );
1499 break;
1500 #endif
1501
1502 #if defined( ISP_HAS_IRIDIX_MANUAL_FSM ) || defined( ISP_HAS_IRIDIX8_MANUAL_FSM )
1503 case SBUF_TYPE_IRIDIX:
1504 rc = sbuf_set_item_to_arr( p_sbuf_mgr, item, p_sbuf_mgr->iridix_sbuf_arr, &p_sbuf_mgr->iridix_arr_info );
1505 break;
1506 #endif
1507 default:
1508 LOG( LOG_ERR, "Error: Unsupported buf_type: %d.", item->buf_type );
1509 rc = -EINVAL;
1510 break;
1511 }
1512
1513 return rc;
1514 }
1515
sbuf_fops_open(struct inode * inode,struct file * f)1516 static int sbuf_fops_open( struct inode *inode, struct file *f )
1517 {
1518 int rc;
1519 int i;
1520 struct sbuf_context *p_ctx = NULL;
1521 int minor = iminor( inode );
1522
1523 for ( i = 0; i < acamera_get_context_number(); i++ ) {
1524 if ( sbuf_contexts[i].dev_minor_id == minor ) {
1525 p_ctx = &sbuf_contexts[i];
1526 break;
1527 }
1528 }
1529
1530 if ( !p_ctx ) {
1531 LOG( LOG_CRIT, "Fatal error, sbuf contexts is crashed, contents dump:" );
1532 for ( i = 0; i < acamera_get_context_number(); i++ ) {
1533 p_ctx = &sbuf_contexts[i];
1534 LOG( LOG_CRIT, "%d): fw_id: %d, minor_id: %d, name: %s, p_fsm: %p.",
1535 i, p_ctx->fw_id, p_ctx->dev_minor_id, p_ctx->dev_name, p_ctx->p_fsm );
1536 }
1537 return -ERESTARTSYS;
1538 }
1539
1540 rc = mutex_lock_interruptible( &p_ctx->fops_lock );
1541 if ( rc ) {
1542 LOG( LOG_ERR, "access lock failed, rc: %d.", rc );
1543 return rc;
1544 }
1545
1546 if ( p_ctx->dev_opened ) {
1547 LOG( LOG_ERR, "open failed, already opened." );
1548 rc = -EBUSY;
1549 } else {
1550 p_ctx->dev_opened = 1;
1551 rc = 0;
1552 f->private_data = p_ctx;
1553 }
1554
1555 mutex_unlock( &p_ctx->fops_lock );
1556
1557 return rc;
1558 }
1559
sbuf_fops_release(struct inode * inode,struct file * f)1560 static int sbuf_fops_release( struct inode *inode, struct file *f )
1561 {
1562 int rc = 0;
1563 struct sbuf_context *p_ctx = (struct sbuf_context *)f->private_data;
1564
1565 rc = mutex_lock_interruptible( &p_ctx->fops_lock );
1566 if ( rc ) {
1567 LOG( LOG_ERR, "Error: lock failed." );
1568 return rc;
1569 }
1570
1571 if ( p_ctx->dev_opened ) {
1572 p_ctx->dev_opened = 0;
1573 f->private_data = NULL;
1574 sbuf_mgr_reset( &p_ctx->sbuf_mgr );
1575 } else {
1576 LOG( LOG_CRIT, "Fatal error: wrong state dev_opened: %d.", p_ctx->dev_opened );
1577 rc = -EINVAL;
1578 }
1579
1580 mutex_unlock( &p_ctx->fops_lock );
1581
1582 return 0;
1583 }
1584
sbuf_fops_write(struct file * file,const char __user * buf,size_t count,loff_t * ppos)1585 static ssize_t sbuf_fops_write( struct file *file, const char __user *buf, size_t count, loff_t *ppos )
1586 {
1587 int rc = 0;
1588 struct sbuf_context *p_ctx = (struct sbuf_context *)file->private_data;
1589 struct sbuf_idx_set idx_set = {0};
1590 uint32_t len_to_copy = sizeof( struct sbuf_idx_set );
1591
1592 LOG( LOG_DEBUG, "p_ctx: %p, name: %s, fw_id: %d, minor_id: %d.", p_ctx, p_ctx->dev_name, p_ctx->fw_id, p_ctx->dev_minor_id );
1593
1594 if ( count != len_to_copy ) {
1595 LOG( LOG_ERR, "write size mismatch, size: %u, expected: %d.", (uint32_t)count, len_to_copy );
1596 return -EINVAL;
1597 }
1598
1599 rc = copy_from_user( &idx_set, buf, len_to_copy );
1600 if ( rc ) {
1601 LOG( LOG_ERR, "copy_from_user failed, not copied: %d, expected: %u.", rc, len_to_copy );
1602 }
1603
1604 LOG( LOG_DEBUG, "ctx: %d, write idx_set: %u(%u)-%u(%u)-%u(%u)-%u(%u)-%u(%u).",
1605 p_ctx->fw_id,
1606 idx_set.ae_idx_valid, idx_set.ae_idx,
1607 idx_set.awb_idx_valid, idx_set.awb_idx,
1608 idx_set.af_idx_valid, idx_set.af_idx,
1609 idx_set.gamma_idx_valid, idx_set.gamma_idx,
1610 idx_set.iridix_idx_valid, idx_set.iridix_idx );
1611
1612 sbuf_mgr_apply_new_param( p_ctx, &idx_set );
1613
1614 return rc ? rc : len_to_copy;
1615 }
1616
sbuf_fops_read(struct file * file,char __user * buf,size_t count,loff_t * ppos)1617 static ssize_t sbuf_fops_read( struct file *file, char __user *buf, size_t count, loff_t *ppos )
1618 {
1619 int rc;
1620 int32_t max_exposure_log2 = 0;
1621 struct sbuf_context *p_ctx = (struct sbuf_context *)file->private_data;
1622 struct sbuf_idx_set idx_set;
1623 uint32_t len_to_copy = sizeof( struct sbuf_idx_set );
1624
1625 if ( count != len_to_copy ) {
1626 LOG( LOG_ERR, "read size mismatch, size: %u, expected: %d.", (uint32_t)count, len_to_copy );
1627 return -EINVAL;
1628 }
1629
1630 if ( !sbuf_is_ready_to_send_data( p_ctx ) ) {
1631 LOG( LOG_INFO, "Not ready to send data." );
1632 return -ENODATA;
1633 }
1634
1635 /* Get latest sbuf index set, it will wait if no data availabe */
1636 sbuf_mgr_get_latest_idx_set( p_ctx, &idx_set );
1637
1638 // 2nd Check because sbuf_mgr_get_latest_idx_set() will wait for data available.
1639 if ( !sbuf_is_ready_to_send_data( p_ctx ) ) {
1640 // recycle items to sbuf_mgr
1641 sbuf_recycle_idx_set( p_ctx, &idx_set );
1642
1643 LOG( LOG_INFO, "Not ready to send data." );
1644 return -ENODATA;
1645 }
1646
1647 rc = copy_to_user( buf, &idx_set, len_to_copy );
1648 if ( rc ) {
1649 LOG( LOG_ERR, "copy_to_user failed, rc: %d.", rc );
1650 }
1651
1652 LOG( LOG_INFO, "ctx: %d, read idx_set: %u(%u)-%u(%u)-%u(%u)-%u(%u)-%u(%u).",
1653 p_ctx->fw_id,
1654 idx_set.ae_idx_valid, idx_set.ae_idx,
1655 idx_set.awb_idx_valid, idx_set.awb_idx,
1656 idx_set.af_idx_valid, idx_set.af_idx,
1657 idx_set.gamma_idx_valid, idx_set.gamma_idx,
1658 idx_set.iridix_idx_valid, idx_set.iridix_idx );
1659
1660 int32_t type = CMOS_MAX_EXPOSURE_LOG2;
1661 acamera_fsm_mgr_get_param( p_ctx->p_fsm->cmn.p_fsm_mgr, FSM_PARAM_GET_CMOS_EXPOSURE_LOG2, &type, sizeof( type ), &max_exposure_log2, sizeof( max_exposure_log2 ) );
1662 p_ctx->sbuf_mgr.sbuf_base->kf_info.cmos_info.max_exposure_log2 = max_exposure_log2;
1663 LOG( LOG_DEBUG, "fw_id: %d, set cmos_info max_exposure_log2: %d.", p_ctx->fw_id, max_exposure_log2 );
1664
1665 int32_t total_gain_log2 = 0;
1666 acamera_fsm_mgr_get_param( p_ctx->p_fsm->cmn.p_fsm_mgr, FSM_PARAM_GET_CMOS_TOTAL_GAIN, NULL, 0, &total_gain_log2, sizeof( total_gain_log2 ) );
1667 p_ctx->sbuf_mgr.sbuf_base->kf_info.cmos_info.total_gain_log2 = total_gain_log2;
1668
1669 return rc ? rc : len_to_copy;
1670 }
1671
1672
sbuf_fops_mmap(struct file * file,struct vm_area_struct * vma)1673 static int sbuf_fops_mmap( struct file *file, struct vm_area_struct *vma )
1674 {
1675 unsigned long user_buf_len = vma->vm_end - vma->vm_start;
1676 int rc;
1677 int32_t max_exposure_log2 = 0;
1678 struct sbuf_context *p_ctx = (struct sbuf_context *)file->private_data;
1679 struct sbuf_mgr *p_sbuf_mgr = &p_ctx->sbuf_mgr;
1680
1681 /*
1682 * the user_buf_len will be page aligned even struct fw_sbuf is not
1683 * page aligned, in this case, the size maybe unmatched, but the
1684 * delta should not exceed 1 page
1685 */
1686 if ( ( user_buf_len != sizeof( struct fw_sbuf ) ) &&
1687 ( user_buf_len - sizeof( struct fw_sbuf ) >= PAGE_SIZE ) ) {
1688 LOG( LOG_CRIT, "Not matched buf size, User app size: %ld, kernel sbuf size: %zu.", user_buf_len, sizeof( struct fw_sbuf ) );
1689 return -EINVAL;
1690 }
1691
1692 if ( !is_sbuf_inited( p_sbuf_mgr ) ) {
1693 LOG( LOG_ERR, "Error: sbuf is not inited, can't map." );
1694 return -ENOMEM;
1695 }
1696
1697 /* remap the kernel buffer into the user app address space. */
1698 rc = remap_pfn_range( vma, vma->vm_start, virt_to_phys( p_sbuf_mgr->buf_used ) >> PAGE_SHIFT, user_buf_len, vma->vm_page_prot );
1699 if ( rc < 0 ) {
1700 LOG( LOG_ERR, "remap of sbuf failed, return: %d.", rc );
1701 return rc;
1702 }
1703
1704 /* prepare cmos_info */
1705 int32_t type = CMOS_MAX_EXPOSURE_LOG2;
1706 acamera_fsm_mgr_get_param( p_ctx->p_fsm->cmn.p_fsm_mgr, FSM_PARAM_GET_CMOS_EXPOSURE_LOG2, &type, sizeof( type ), &max_exposure_log2, sizeof( max_exposure_log2 ) );
1707 p_sbuf_mgr->sbuf_base->kf_info.cmos_info.max_exposure_log2 = max_exposure_log2;
1708 LOG( LOG_INFO, "fw_id: %d, set cmos_info max_exposure_log2: %d.", p_ctx->fw_id, max_exposure_log2 );
1709
1710 /* prepare lens_info */
1711 int32_t lens_status = 0;
1712 acamera_fsm_mgr_get_param( p_ctx->p_fsm->cmn.p_fsm_mgr, FSM_PARAM_GET_AF_LENS_STATUS, NULL, 0, &lens_status, sizeof( lens_status ) );
1713 p_sbuf_mgr->sbuf_base->kf_info.af_info.lens_driver_ok = lens_status;
1714 LOG( LOG_INFO, "fw_id: %d, set af_info lens_driver_ok: %d.", p_ctx->fw_id, lens_status );
1715
1716 if ( lens_status ) {
1717 lens_param_t lens_param;
1718 acamera_fsm_mgr_get_param( p_ctx->p_fsm->cmn.p_fsm_mgr, FSM_PARAM_GET_LENS_PARAM, NULL, 0, &lens_param, sizeof( lens_param ) );
1719 p_sbuf_mgr->sbuf_base->kf_info.af_info.lens_param = lens_param;
1720 }
1721
1722 /* prepare sensor_info */
1723 const sensor_param_t *param = NULL;
1724 acamera_fsm_mgr_get_param( p_ctx->p_fsm->cmn.p_fsm_mgr, FSM_PARAM_GET_SENSOR_PARAM, NULL, 0, ¶m, sizeof( param ) );
1725
1726 if ( param ) {
1727 uint32_t idx = 0;
1728 uint32_t valid_modes_num = 0;
1729 valid_modes_num = param->modes_num;
1730
1731 if ( valid_modes_num > ISP_MAX_SENSOR_MODES ) {
1732 valid_modes_num = ISP_MAX_SENSOR_MODES;
1733 }
1734
1735 for ( idx = 0; idx < valid_modes_num; idx++ ) {
1736 p_sbuf_mgr->sbuf_base->kf_info.sensor_info.modes[idx] = param->modes_table[idx];
1737
1738 LOG( LOG_DEBUG, "Sensor_mode[%d]: wdr_mode: %d, exp: %d.", idx,
1739 p_sbuf_mgr->sbuf_base->kf_info.sensor_info.modes[idx].wdr_mode,
1740 p_sbuf_mgr->sbuf_base->kf_info.sensor_info.modes[idx].exposures );
1741 }
1742
1743 p_sbuf_mgr->sbuf_base->kf_info.sensor_info.cur_mode = param->mode;
1744 p_sbuf_mgr->sbuf_base->kf_info.sensor_info.modes_num = valid_modes_num;
1745 }
1746
1747 return 0;
1748 }
1749
1750 static struct file_operations sbuf_mgr_fops = {
1751 .owner = THIS_MODULE,
1752 .open = sbuf_fops_open,
1753 .release = sbuf_fops_release,
1754 .read = sbuf_fops_read,
1755 .write = sbuf_fops_write,
1756 .llseek = noop_llseek,
1757 .mmap = sbuf_fops_mmap,
1758 };
1759
sbuf_fsm_initialize(sbuf_fsm_t * p_fsm)1760 void sbuf_fsm_initialize( sbuf_fsm_t *p_fsm )
1761 {
1762 int rc;
1763 uint32_t fw_id = p_fsm->cmn.ctx_id;
1764 struct miscdevice *p_dev;
1765 struct sbuf_context *p_ctx;
1766
1767 if ( fw_id >= acamera_get_context_number() ) {
1768 LOG( LOG_CRIT, "Fatal error: Invalid FW context ID: %d, max is: %d", fw_id, acamera_get_context_number() - 1 );
1769 return;
1770 }
1771
1772 p_ctx = &( sbuf_contexts[fw_id] );
1773 memset( p_ctx, 0, sizeof( *p_ctx ) );
1774 p_dev = &p_ctx->sbuf_dev;
1775 snprintf( p_ctx->dev_name, SBUF_DEV_NAME_LEN, SBUF_DEV_FORMAT, fw_id );
1776 p_dev->name = p_ctx->dev_name;
1777 p_dev->minor = MISC_DYNAMIC_MINOR,
1778 p_dev->fops = &sbuf_mgr_fops,
1779
1780 rc = misc_register( p_dev );
1781 if ( rc ) {
1782 LOG( LOG_ERR, "init failed, error: register sbuf device failed, ret: %d.", rc );
1783 return;
1784 }
1785
1786 p_ctx->fw_id = fw_id;
1787 p_ctx->dev_minor_id = p_dev->minor;
1788 p_ctx->p_fsm = p_fsm;
1789 p_ctx->dev_opened = 0;
1790
1791 rc = sbuf_ctx_init( p_ctx );
1792 if ( rc ) {
1793 LOG( LOG_ERR, "init failed, , ret: %d.", rc );
1794
1795 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 3, 0))
1796 misc_deregister( p_dev );
1797 #else
1798 rc = misc_deregister( p_dev );
1799 if ( rc ) {
1800 LOG( LOG_ERR, "deregister sbuf dev '%s' failed, ret: %d.", p_dev->name, rc );
1801 } else {
1802 LOG( LOG_INFO, "deregister sbuf dev '%s' ok.", p_dev->name );
1803 }
1804 #endif
1805 p_dev->name = NULL;
1806
1807 return;
1808 }
1809
1810 mutex_init( &p_ctx->fops_lock );
1811 mutex_init( &p_ctx->idx_set_lock );
1812 init_waitqueue_head( &p_ctx->idx_set_wait_queue );
1813
1814 return;
1815 }
1816
sbuf_deinit(sbuf_fsm_ptr_t p_fsm)1817 void sbuf_deinit( sbuf_fsm_ptr_t p_fsm )
1818 {
1819 #if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 3, 0))
1820 int rc;
1821 #endif
1822 uint32_t fw_id = p_fsm->cmn.ctx_id;
1823 struct sbuf_context *p_ctx = NULL;
1824 struct miscdevice *p_dev = NULL;
1825
1826 p_ctx = &( sbuf_contexts[fw_id] );
1827 if ( p_ctx->fw_id != fw_id ) {
1828 LOG( LOG_CRIT, "Error: ctx_id not match, fsm fw_id: %d, ctx_id: %d.", fw_id, p_ctx->fw_id );
1829 return;
1830 }
1831
1832 sbuf_mgr_free( &p_ctx->sbuf_mgr );
1833
1834 p_dev = &p_ctx->sbuf_dev;
1835 if ( !p_dev->name ) {
1836 LOG( LOG_CRIT, "skip sbuf[%d] deregister due to NULL name", fw_id );
1837 return;
1838 }
1839
1840 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 3, 0))
1841 misc_deregister( p_dev );
1842 #else
1843 rc = misc_deregister( p_dev );
1844 if ( rc ) {
1845 LOG( LOG_ERR, "deregister sbuf dev '%s' failed, ret: %d.", p_dev->name, rc );
1846 } else {
1847 LOG( LOG_INFO, "deregister sbuf dev '%s' ok.", p_dev->name );
1848 }
1849 #endif
1850 }
1851