1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Support for Clovertrail PNW Camera Imaging ISP subsystem.
4 *
5 * Copyright (c) 2012 Intel Corporation. All Rights Reserved.
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License version
9 * 2 as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 *
17 */
18
19 /*
20 * This file implements loadable acceleration firmware API,
21 * including ioctls to map and unmap acceleration parameters and buffers.
22 */
23
24 #include <linux/init.h>
25 #include <media/v4l2-event.h>
26
27 #include "hmm.h"
28
29 #include "atomisp_acc.h"
30 #include "atomisp_internal.h"
31 #include "atomisp_compat.h"
32 #include "atomisp_cmd.h"
33
34 #include "ia_css.h"
35
36 static const struct {
37 unsigned int flag;
38 enum ia_css_pipe_id pipe_id;
39 } acc_flag_to_pipe[] = {
40 { ATOMISP_ACC_FW_LOAD_FL_PREVIEW, IA_CSS_PIPE_ID_PREVIEW },
41 { ATOMISP_ACC_FW_LOAD_FL_COPY, IA_CSS_PIPE_ID_COPY },
42 { ATOMISP_ACC_FW_LOAD_FL_VIDEO, IA_CSS_PIPE_ID_VIDEO },
43 { ATOMISP_ACC_FW_LOAD_FL_CAPTURE, IA_CSS_PIPE_ID_CAPTURE },
44 { ATOMISP_ACC_FW_LOAD_FL_ACC, IA_CSS_PIPE_ID_ACC }
45 };
46
47 /*
48 * Allocate struct atomisp_acc_fw along with space for firmware.
49 * The returned struct atomisp_acc_fw is cleared (firmware region is not).
50 */
acc_alloc_fw(unsigned int fw_size)51 static struct atomisp_acc_fw *acc_alloc_fw(unsigned int fw_size)
52 {
53 struct atomisp_acc_fw *acc_fw;
54
55 acc_fw = kzalloc(sizeof(*acc_fw), GFP_KERNEL);
56 if (!acc_fw)
57 return NULL;
58
59 acc_fw->fw = vmalloc(fw_size);
60 if (!acc_fw->fw) {
61 kfree(acc_fw);
62 return NULL;
63 }
64
65 return acc_fw;
66 }
67
acc_free_fw(struct atomisp_acc_fw * acc_fw)68 static void acc_free_fw(struct atomisp_acc_fw *acc_fw)
69 {
70 vfree(acc_fw->fw);
71 kfree(acc_fw);
72 }
73
74 static struct atomisp_acc_fw *
acc_get_fw(struct atomisp_sub_device * asd,unsigned int handle)75 acc_get_fw(struct atomisp_sub_device *asd, unsigned int handle)
76 {
77 struct atomisp_acc_fw *acc_fw;
78
79 list_for_each_entry(acc_fw, &asd->acc.fw, list)
80 if (acc_fw->handle == handle)
81 return acc_fw;
82
83 return NULL;
84 }
85
acc_get_map(struct atomisp_sub_device * asd,unsigned long css_ptr,size_t length)86 static struct atomisp_map *acc_get_map(struct atomisp_sub_device *asd,
87 unsigned long css_ptr, size_t length)
88 {
89 struct atomisp_map *atomisp_map;
90
91 list_for_each_entry(atomisp_map, &asd->acc.memory_maps, list) {
92 if (atomisp_map->ptr == css_ptr &&
93 atomisp_map->length == length)
94 return atomisp_map;
95 }
96 return NULL;
97 }
98
acc_stop_acceleration(struct atomisp_sub_device * asd)99 static int acc_stop_acceleration(struct atomisp_sub_device *asd)
100 {
101 int ret;
102
103 ret = atomisp_css_stop_acc_pipe(asd);
104 atomisp_css_destroy_acc_pipe(asd);
105
106 return ret;
107 }
108
atomisp_acc_cleanup(struct atomisp_device * isp)109 void atomisp_acc_cleanup(struct atomisp_device *isp)
110 {
111 int i;
112
113 for (i = 0; i < isp->num_of_streams; i++)
114 ida_destroy(&isp->asd[i].acc.ida);
115 }
116
atomisp_acc_release(struct atomisp_sub_device * asd)117 void atomisp_acc_release(struct atomisp_sub_device *asd)
118 {
119 struct atomisp_acc_fw *acc_fw, *ta;
120 struct atomisp_map *atomisp_map, *tm;
121
122 /* Stop acceleration if already running */
123 if (asd->acc.pipeline)
124 acc_stop_acceleration(asd);
125
126 /* Unload all loaded acceleration binaries */
127 list_for_each_entry_safe(acc_fw, ta, &asd->acc.fw, list) {
128 list_del(&acc_fw->list);
129 ida_free(&asd->acc.ida, acc_fw->handle);
130 acc_free_fw(acc_fw);
131 }
132
133 /* Free all mapped memory blocks */
134 list_for_each_entry_safe(atomisp_map, tm, &asd->acc.memory_maps, list) {
135 list_del(&atomisp_map->list);
136 hmm_free(atomisp_map->ptr);
137 kfree(atomisp_map);
138 }
139 }
140
atomisp_acc_load_to_pipe(struct atomisp_sub_device * asd,struct atomisp_acc_fw_load_to_pipe * user_fw)141 int atomisp_acc_load_to_pipe(struct atomisp_sub_device *asd,
142 struct atomisp_acc_fw_load_to_pipe *user_fw)
143 {
144 static const unsigned int pipeline_flags =
145 ATOMISP_ACC_FW_LOAD_FL_PREVIEW | ATOMISP_ACC_FW_LOAD_FL_COPY |
146 ATOMISP_ACC_FW_LOAD_FL_VIDEO |
147 ATOMISP_ACC_FW_LOAD_FL_CAPTURE | ATOMISP_ACC_FW_LOAD_FL_ACC;
148
149 struct atomisp_acc_fw *acc_fw;
150 int handle;
151
152 if (!user_fw->data || user_fw->size < sizeof(*acc_fw->fw))
153 return -EINVAL;
154
155 /* Binary has to be enabled at least for one pipeline */
156 if (!(user_fw->flags & pipeline_flags))
157 return -EINVAL;
158
159 /* We do not support other flags yet */
160 if (user_fw->flags & ~pipeline_flags)
161 return -EINVAL;
162
163 if (user_fw->type < ATOMISP_ACC_FW_LOAD_TYPE_OUTPUT ||
164 user_fw->type > ATOMISP_ACC_FW_LOAD_TYPE_STANDALONE)
165 return -EINVAL;
166
167 if (asd->acc.pipeline || asd->acc.extension_mode)
168 return -EBUSY;
169
170 acc_fw = acc_alloc_fw(user_fw->size);
171 if (!acc_fw)
172 return -ENOMEM;
173
174 if (copy_from_user(acc_fw->fw, user_fw->data, user_fw->size)) {
175 acc_free_fw(acc_fw);
176 return -EFAULT;
177 }
178
179 handle = ida_alloc(&asd->acc.ida, GFP_KERNEL);
180 if (handle < 0) {
181 acc_free_fw(acc_fw);
182 return -ENOSPC;
183 }
184
185 user_fw->fw_handle = handle;
186 acc_fw->handle = handle;
187 acc_fw->flags = user_fw->flags;
188 acc_fw->type = user_fw->type;
189 acc_fw->fw->handle = handle;
190
191 /*
192 * correct isp firmware type in order ISP firmware can be appended
193 * to correct pipe properly
194 */
195 if (acc_fw->fw->type == ia_css_isp_firmware) {
196 static const int type_to_css[] = {
197 [ATOMISP_ACC_FW_LOAD_TYPE_OUTPUT] =
198 IA_CSS_ACC_OUTPUT,
199 [ATOMISP_ACC_FW_LOAD_TYPE_VIEWFINDER] =
200 IA_CSS_ACC_VIEWFINDER,
201 [ATOMISP_ACC_FW_LOAD_TYPE_STANDALONE] =
202 IA_CSS_ACC_STANDALONE,
203 };
204 acc_fw->fw->info.isp.type = type_to_css[acc_fw->type];
205 }
206
207 list_add_tail(&acc_fw->list, &asd->acc.fw);
208 return 0;
209 }
210
atomisp_acc_load(struct atomisp_sub_device * asd,struct atomisp_acc_fw_load * user_fw)211 int atomisp_acc_load(struct atomisp_sub_device *asd,
212 struct atomisp_acc_fw_load *user_fw)
213 {
214 struct atomisp_acc_fw_load_to_pipe ltp = {0};
215 int r;
216
217 ltp.flags = ATOMISP_ACC_FW_LOAD_FL_ACC;
218 ltp.type = ATOMISP_ACC_FW_LOAD_TYPE_STANDALONE;
219 ltp.size = user_fw->size;
220 ltp.data = user_fw->data;
221 r = atomisp_acc_load_to_pipe(asd, <p);
222 user_fw->fw_handle = ltp.fw_handle;
223 return r;
224 }
225
atomisp_acc_unload(struct atomisp_sub_device * asd,unsigned int * handle)226 int atomisp_acc_unload(struct atomisp_sub_device *asd, unsigned int *handle)
227 {
228 struct atomisp_acc_fw *acc_fw;
229
230 if (asd->acc.pipeline || asd->acc.extension_mode)
231 return -EBUSY;
232
233 acc_fw = acc_get_fw(asd, *handle);
234 if (!acc_fw)
235 return -EINVAL;
236
237 list_del(&acc_fw->list);
238 ida_free(&asd->acc.ida, acc_fw->handle);
239 acc_free_fw(acc_fw);
240
241 return 0;
242 }
243
atomisp_acc_start(struct atomisp_sub_device * asd,unsigned int * handle)244 int atomisp_acc_start(struct atomisp_sub_device *asd, unsigned int *handle)
245 {
246 struct atomisp_device *isp = asd->isp;
247 struct atomisp_acc_fw *acc_fw;
248 int ret;
249 unsigned int nbin;
250
251 if (asd->acc.pipeline || asd->acc.extension_mode)
252 return -EBUSY;
253
254 /* Invalidate caches. FIXME: should flush only necessary buffers */
255 wbinvd();
256
257 ret = atomisp_css_create_acc_pipe(asd);
258 if (ret)
259 return ret;
260
261 nbin = 0;
262 list_for_each_entry(acc_fw, &asd->acc.fw, list) {
263 if (*handle != 0 && *handle != acc_fw->handle)
264 continue;
265
266 if (acc_fw->type != ATOMISP_ACC_FW_LOAD_TYPE_STANDALONE)
267 continue;
268
269 /* Add the binary into the pipeline */
270 ret = atomisp_css_load_acc_binary(asd, acc_fw->fw, nbin);
271 if (ret < 0) {
272 dev_err(isp->dev, "acc_load_binary failed\n");
273 goto err_stage;
274 }
275
276 ret = atomisp_css_set_acc_parameters(acc_fw);
277 if (ret < 0) {
278 dev_err(isp->dev, "acc_set_parameters failed\n");
279 goto err_stage;
280 }
281 nbin++;
282 }
283 if (nbin < 1) {
284 /* Refuse creating pipelines with no binaries */
285 dev_err(isp->dev, "%s: no acc binary available\n", __func__);
286 ret = -EINVAL;
287 goto err_stage;
288 }
289
290 ret = atomisp_css_start_acc_pipe(asd);
291 if (ret) {
292 dev_err(isp->dev, "%s: atomisp_acc_start_acc_pipe failed\n",
293 __func__);
294 goto err_stage;
295 }
296
297 return 0;
298
299 err_stage:
300 atomisp_css_destroy_acc_pipe(asd);
301 return ret;
302 }
303
atomisp_acc_wait(struct atomisp_sub_device * asd,unsigned int * handle)304 int atomisp_acc_wait(struct atomisp_sub_device *asd, unsigned int *handle)
305 {
306 struct atomisp_device *isp = asd->isp;
307 int ret;
308
309 if (!asd->acc.pipeline)
310 return -ENOENT;
311
312 if (*handle && !acc_get_fw(asd, *handle))
313 return -EINVAL;
314
315 ret = atomisp_css_wait_acc_finish(asd);
316 if (acc_stop_acceleration(asd) == -EIO) {
317 atomisp_reset(isp);
318 return -EINVAL;
319 }
320
321 return ret;
322 }
323
atomisp_acc_done(struct atomisp_sub_device * asd,unsigned int handle)324 void atomisp_acc_done(struct atomisp_sub_device *asd, unsigned int handle)
325 {
326 struct v4l2_event event = { 0 };
327
328 event.type = V4L2_EVENT_ATOMISP_ACC_COMPLETE;
329 event.u.frame_sync.frame_sequence = atomic_read(&asd->sequence);
330 event.id = handle;
331
332 v4l2_event_queue(asd->subdev.devnode, &event);
333 }
334
atomisp_acc_map(struct atomisp_sub_device * asd,struct atomisp_acc_map * map)335 int atomisp_acc_map(struct atomisp_sub_device *asd, struct atomisp_acc_map *map)
336 {
337 struct atomisp_map *atomisp_map;
338 ia_css_ptr cssptr;
339 int pgnr;
340
341 if (map->css_ptr)
342 return -EINVAL;
343
344 if (asd->acc.pipeline)
345 return -EBUSY;
346
347 if (map->user_ptr) {
348 /* Buffer to map must be page-aligned */
349 if ((unsigned long)map->user_ptr & ~PAGE_MASK) {
350 dev_err(asd->isp->dev,
351 "%s: mapped buffer address %p is not page aligned\n",
352 __func__, map->user_ptr);
353 return -EINVAL;
354 }
355
356 pgnr = DIV_ROUND_UP(map->length, PAGE_SIZE);
357 if (pgnr < ((PAGE_ALIGN(map->length)) >> PAGE_SHIFT)) {
358 dev_err(asd->isp->dev,
359 "user space memory size is less than the expected size..\n");
360 return -ENOMEM;
361 } else if (pgnr > ((PAGE_ALIGN(map->length)) >> PAGE_SHIFT)) {
362 dev_err(asd->isp->dev,
363 "user space memory size is large than the expected size..\n");
364 return -ENOMEM;
365 }
366
367 cssptr = hmm_alloc(map->length, HMM_BO_USER, 0, map->user_ptr,
368 map->flags & ATOMISP_MAP_FLAG_CACHED);
369
370 } else {
371 /* Allocate private buffer. */
372 cssptr = hmm_alloc(map->length, HMM_BO_PRIVATE, 0, NULL,
373 map->flags & ATOMISP_MAP_FLAG_CACHED);
374 }
375
376 if (!cssptr)
377 return -ENOMEM;
378
379 atomisp_map = kmalloc(sizeof(*atomisp_map), GFP_KERNEL);
380 if (!atomisp_map) {
381 hmm_free(cssptr);
382 return -ENOMEM;
383 }
384 atomisp_map->ptr = cssptr;
385 atomisp_map->length = map->length;
386 list_add(&atomisp_map->list, &asd->acc.memory_maps);
387
388 dev_dbg(asd->isp->dev, "%s: userptr %p, css_address 0x%x, size %d\n",
389 __func__, map->user_ptr, cssptr, map->length);
390 map->css_ptr = cssptr;
391 return 0;
392 }
393
atomisp_acc_unmap(struct atomisp_sub_device * asd,struct atomisp_acc_map * map)394 int atomisp_acc_unmap(struct atomisp_sub_device *asd,
395 struct atomisp_acc_map *map)
396 {
397 struct atomisp_map *atomisp_map;
398
399 if (asd->acc.pipeline)
400 return -EBUSY;
401
402 atomisp_map = acc_get_map(asd, map->css_ptr, map->length);
403 if (!atomisp_map)
404 return -EINVAL;
405
406 list_del(&atomisp_map->list);
407 hmm_free(atomisp_map->ptr);
408 kfree(atomisp_map);
409 return 0;
410 }
411
atomisp_acc_s_mapped_arg(struct atomisp_sub_device * asd,struct atomisp_acc_s_mapped_arg * arg)412 int atomisp_acc_s_mapped_arg(struct atomisp_sub_device *asd,
413 struct atomisp_acc_s_mapped_arg *arg)
414 {
415 struct atomisp_acc_fw *acc_fw;
416
417 if (arg->memory >= ATOMISP_ACC_NR_MEMORY)
418 return -EINVAL;
419
420 if (asd->acc.pipeline)
421 return -EBUSY;
422
423 acc_fw = acc_get_fw(asd, arg->fw_handle);
424 if (!acc_fw)
425 return -EINVAL;
426
427 if (arg->css_ptr != 0 || arg->length != 0) {
428 /* Unless the parameter is cleared, check that it exists */
429 if (!acc_get_map(asd, arg->css_ptr, arg->length))
430 return -EINVAL;
431 }
432
433 acc_fw->args[arg->memory].length = arg->length;
434 acc_fw->args[arg->memory].css_ptr = arg->css_ptr;
435
436 dev_dbg(asd->isp->dev, "%s: mem %d, address %p, size %ld\n",
437 __func__, arg->memory, (void *)arg->css_ptr,
438 (unsigned long)arg->length);
439 return 0;
440 }
441
atomisp_acc_unload_some_extensions(struct atomisp_sub_device * asd,int i,struct atomisp_acc_fw * acc_fw)442 static void atomisp_acc_unload_some_extensions(struct atomisp_sub_device *asd,
443 int i,
444 struct atomisp_acc_fw *acc_fw)
445 {
446 while (--i >= 0) {
447 if (acc_fw->flags & acc_flag_to_pipe[i].flag) {
448 atomisp_css_unload_acc_extension(asd, acc_fw->fw,
449 acc_flag_to_pipe[i].pipe_id);
450 }
451 }
452 }
453
454 /*
455 * Appends the loaded acceleration binary extensions to the
456 * current ISP mode. Must be called just before sh_css_start().
457 */
atomisp_acc_load_extensions(struct atomisp_sub_device * asd)458 int atomisp_acc_load_extensions(struct atomisp_sub_device *asd)
459 {
460 struct atomisp_acc_fw *acc_fw;
461 bool ext_loaded = false;
462 bool continuous = asd->continuous_mode->val &&
463 asd->run_mode->val == ATOMISP_RUN_MODE_PREVIEW;
464 int ret = 0, i = -1;
465 struct atomisp_device *isp = asd->isp;
466
467 if (asd->acc.pipeline || asd->acc.extension_mode)
468 return -EBUSY;
469
470 /* Invalidate caches. FIXME: should flush only necessary buffers */
471 wbinvd();
472
473 list_for_each_entry(acc_fw, &asd->acc.fw, list) {
474 if (acc_fw->type != ATOMISP_ACC_FW_LOAD_TYPE_OUTPUT &&
475 acc_fw->type != ATOMISP_ACC_FW_LOAD_TYPE_VIEWFINDER)
476 continue;
477
478 for (i = 0; i < ARRAY_SIZE(acc_flag_to_pipe); i++) {
479 /* QoS (ACC pipe) acceleration stages are currently
480 * allowed only in continuous mode. Skip them for
481 * all other modes. */
482 if (!continuous &&
483 acc_flag_to_pipe[i].flag ==
484 ATOMISP_ACC_FW_LOAD_FL_ACC)
485 continue;
486
487 if (acc_fw->flags & acc_flag_to_pipe[i].flag) {
488 ret = atomisp_css_load_acc_extension(asd,
489 acc_fw->fw,
490 acc_flag_to_pipe[i].pipe_id,
491 acc_fw->type);
492 if (ret) {
493 atomisp_acc_unload_some_extensions(asd, i, acc_fw);
494 goto error;
495 }
496
497 ext_loaded = true;
498 }
499 }
500
501 ret = atomisp_css_set_acc_parameters(acc_fw);
502 if (ret < 0) {
503 atomisp_acc_unload_some_extensions(asd, i, acc_fw);
504 goto error;
505 }
506 }
507
508 if (!ext_loaded)
509 return ret;
510
511 ret = atomisp_css_update_stream(asd);
512 if (ret) {
513 dev_err(isp->dev, "%s: update stream failed.\n", __func__);
514 atomisp_acc_unload_extensions(asd);
515 goto error;
516 }
517
518 asd->acc.extension_mode = true;
519 return 0;
520
521 error:
522 list_for_each_entry_continue_reverse(acc_fw, &asd->acc.fw, list) {
523 if (acc_fw->type != ATOMISP_ACC_FW_LOAD_TYPE_OUTPUT &&
524 acc_fw->type != ATOMISP_ACC_FW_LOAD_TYPE_VIEWFINDER)
525 continue;
526
527 for (i = ARRAY_SIZE(acc_flag_to_pipe) - 1; i >= 0; i--) {
528 if (!continuous &&
529 acc_flag_to_pipe[i].flag ==
530 ATOMISP_ACC_FW_LOAD_FL_ACC)
531 continue;
532 if (acc_fw->flags & acc_flag_to_pipe[i].flag) {
533 atomisp_css_unload_acc_extension(asd,
534 acc_fw->fw,
535 acc_flag_to_pipe[i].pipe_id);
536 }
537 }
538 }
539 return ret;
540 }
541
atomisp_acc_unload_extensions(struct atomisp_sub_device * asd)542 void atomisp_acc_unload_extensions(struct atomisp_sub_device *asd)
543 {
544 struct atomisp_acc_fw *acc_fw;
545 int i;
546
547 if (!asd->acc.extension_mode)
548 return;
549
550 list_for_each_entry_reverse(acc_fw, &asd->acc.fw, list) {
551 if (acc_fw->type != ATOMISP_ACC_FW_LOAD_TYPE_OUTPUT &&
552 acc_fw->type != ATOMISP_ACC_FW_LOAD_TYPE_VIEWFINDER)
553 continue;
554
555 for (i = ARRAY_SIZE(acc_flag_to_pipe) - 1; i >= 0; i--) {
556 if (acc_fw->flags & acc_flag_to_pipe[i].flag) {
557 atomisp_css_unload_acc_extension(asd,
558 acc_fw->fw,
559 acc_flag_to_pipe[i].pipe_id);
560 }
561 }
562 }
563
564 asd->acc.extension_mode = false;
565 }
566
atomisp_acc_set_state(struct atomisp_sub_device * asd,struct atomisp_acc_state * arg)567 int atomisp_acc_set_state(struct atomisp_sub_device *asd,
568 struct atomisp_acc_state *arg)
569 {
570 struct atomisp_acc_fw *acc_fw;
571 bool enable = (arg->flags & ATOMISP_STATE_FLAG_ENABLE) != 0;
572 struct ia_css_pipe *pipe;
573 int r;
574 int i;
575
576 if (!asd->acc.extension_mode)
577 return -EBUSY;
578
579 if (arg->flags & ~ATOMISP_STATE_FLAG_ENABLE)
580 return -EINVAL;
581
582 acc_fw = acc_get_fw(asd, arg->fw_handle);
583 if (!acc_fw)
584 return -EINVAL;
585
586 if (enable)
587 wbinvd();
588
589 for (i = 0; i < ARRAY_SIZE(acc_flag_to_pipe); i++) {
590 if (acc_fw->flags & acc_flag_to_pipe[i].flag) {
591 pipe = asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL].
592 pipes[acc_flag_to_pipe[i].pipe_id];
593 r = ia_css_pipe_set_qos_ext_state(pipe, acc_fw->handle,
594 enable);
595 if (r)
596 return -EBADRQC;
597 }
598 }
599
600 if (enable)
601 acc_fw->flags |= ATOMISP_ACC_FW_LOAD_FL_ENABLE;
602 else
603 acc_fw->flags &= ~ATOMISP_ACC_FW_LOAD_FL_ENABLE;
604
605 return 0;
606 }
607
atomisp_acc_get_state(struct atomisp_sub_device * asd,struct atomisp_acc_state * arg)608 int atomisp_acc_get_state(struct atomisp_sub_device *asd,
609 struct atomisp_acc_state *arg)
610 {
611 struct atomisp_acc_fw *acc_fw;
612
613 if (!asd->acc.extension_mode)
614 return -EBUSY;
615
616 acc_fw = acc_get_fw(asd, arg->fw_handle);
617 if (!acc_fw)
618 return -EINVAL;
619
620 arg->flags = acc_fw->flags;
621
622 return 0;
623 }
624