1 /*
2 * Copyright (C) 2022 Huawei Technologies Co., Ltd.
3 * Decription: exported funcs for spi interrupt actions.
4 *
5 * This software is licensed under the terms of the GNU General Public
6 * License version 2, as published by the Free Software Foundation, and
7 * may be copied, distributed, and modified under those terms.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 */
14 #include "tz_spi_notify.h"
15 #include <linux/slab.h>
16 #include <linux/fs.h>
17 #include <linux/uaccess.h>
18 #include <linux/sched.h>
19 #include <linux/list.h>
20 #include <linux/mutex.h>
21 #include <asm/cacheflush.h>
22 #include <linux/kthread.h>
23 #include <linux/freezer.h>
24 #include <linux/module.h>
25 #include <linux/of.h>
26 #include <linux/of_address.h>
27 #include <linux/of_device.h>
28 #include <linux/of_platform.h>
29 #include <linux/of_irq.h>
30 #include <linux/of_reserved_mem.h>
31 #include <linux/atomic.h>
32 #include <linux/interrupt.h>
33 #include <securec.h>
34 #include "teek_client_constants.h"
35 #include "tc_ns_client.h"
36 #include "tc_ns_log.h"
37 #include "tc_client_driver.h"
38 #include "gp_ops.h"
39 #include "mailbox_mempool.h"
40 #include "smc_smp.h"
41 #include "session_manager.h"
42 #include "internal_functions.h"
43 #include "shared_mem.h"
44
45 #define DEFAULT_SPI_NUM 111
46
47 #define MAX_CALLBACK_COUNT 100
48 #define UUID_SIZE 16
49 struct teec_timer_property;
50
51 enum timer_class_type {
52 /* timer event using timer10 */
53 TIMER_GENERIC,
54 /* timer event using RTC */
55 TIMER_RTC
56 };
57
58 struct teec_timer_property {
59 unsigned int type;
60 unsigned int timer_id;
61 unsigned int timer_class;
62 unsigned int reserved2;
63 };
64
65 struct notify_context_timer {
66 unsigned int dev_file_id;
67 unsigned char uuid[UUID_SIZE];
68 unsigned int session_id;
69 struct teec_timer_property property;
70 uint32_t expire_time;
71 };
72
73
74 struct notify_context_wakeup {
75 pid_t ca_thread_id;
76 };
77
78 struct notify_context_shadow {
79 uint64_t target_tcb;
80 };
81
82 #ifdef CONFIG_TA_AFFINITY
83
84 #define AFF_BITS_SIZE 64
85
86 #define AFF_BITS_NUM ((CONFIG_TA_AFFINITY_CPU_NUMS % AFF_BITS_SIZE == 0) ? \
87 (CONFIG_TA_AFFINITY_CPU_NUMS / AFF_BITS_SIZE) : \
88 (CONFIG_TA_AFFINITY_CPU_NUMS / AFF_BITS_SIZE + 1))
89
90 #define aff_bits_mask(cpuid) \
91 (1LLU << (cpuid - (cpuid / AFF_BITS_SIZE) * AFF_BITS_SIZE))
92
93 struct aff_bits_t {
94 uint64_t aff_bits[AFF_BITS_NUM];
95 };
96
97 struct notify_context_set_affinity {
98 pid_t ca_thread_id;
99 struct aff_bits_t aff;
100 };
101
102 #endif
103
104 struct notify_context_stats {
105 uint32_t send_s;
106 uint32_t recv_s;
107 uint32_t send_w;
108 uint32_t recv_w;
109 #ifdef CONFIG_TA_AFFINITY
110 uint32_t send_af;
111 uint32_t recv_af;
112 #endif
113 uint32_t missed;
114 };
115
116 union notify_context {
117 struct notify_context_timer timer;
118 struct notify_context_wakeup wakeup;
119 struct notify_context_shadow shadow;
120 #ifdef CONFIG_TA_AFFINITY
121 struct notify_context_set_affinity affinity;
122 #endif
123 struct notify_context_stats meta;
124 };
125 #ifndef CONFIG_BIG_ENDIAN
126 struct notify_data_entry {
127 uint32_t entry_type : 31;
128 uint32_t filled : 1;
129 union notify_context context;
130 };
131 #else
132 struct notify_data_entry {
133 uint32_t resv;
134 uint32_t filled : 1;
135 uint32_t entry_type : 31;
136 union notify_context context;
137 };
138 #endif
139
140 #ifdef CONFIG_BIG_SESSION
141
142 #define NOTIFY_DATA_ENTRY_COUNT \
143 (((PAGE_SIZE * ((1U) << (CONFIG_NOTIFY_PAGE_ORDER))) \
144 / sizeof(struct notify_data_entry)) - 1)
145 #else
146 #define NOTIFY_DATA_ENTRY_COUNT \
147 ((PAGE_SIZE / sizeof(struct notify_data_entry)) - 1)
148 #endif
149
150 struct notify_data_struct {
151 struct notify_data_entry entry[NOTIFY_DATA_ENTRY_COUNT];
152 struct notify_data_entry meta;
153 };
154
155 static struct notify_data_struct *g_notify_data;
156 static struct notify_data_entry *g_notify_data_entry_shadow;
157 static spinlock_t g_notify_lock;
158
159 enum notify_data_type {
160 NOTIFY_DATA_ENTRY_UNUSED,
161 NOTIFY_DATA_ENTRY_TIMER,
162 NOTIFY_DATA_ENTRY_RTC,
163 NOTIFY_DATA_ENTRY_WAKEUP,
164 NOTIFY_DATA_ENTRY_SHADOW,
165 NOTIFY_DATA_ENTRY_FIQSHD,
166 NOTIFY_DATA_ENTRY_SHADOW_EXIT,
167 #ifdef CONFIG_TA_AFFINITY
168 NOTIFY_DATA_ENTRY_SET_AFFINITY,
169 #endif
170 NOTIFY_DATA_ENTRY_MAX,
171 };
172
173 struct tc_ns_callback {
174 unsigned char uuid[UUID_SIZE];
175 struct mutex callback_lock;
176 void (*callback_func)(void *);
177 struct list_head head;
178 };
179
180 struct tc_ns_callback_list {
181 unsigned int callback_count;
182 struct mutex callback_list_lock;
183 struct list_head callback_list;
184 };
185
186 static void tc_notify_fn(struct work_struct *dummy);
187 static struct tc_ns_callback_list g_ta_callback_func_list;
188 static DECLARE_WORK(tc_notify_work, tc_notify_fn);
189 static struct workqueue_struct *g_tz_spi_wq;
190
walk_callback_list(struct notify_context_timer * tc_notify_data_timer)191 static void walk_callback_list(
192 struct notify_context_timer *tc_notify_data_timer)
193 {
194 struct tc_ns_callback *callback_func_t = NULL;
195
196 mutex_lock(&g_ta_callback_func_list.callback_list_lock);
197 list_for_each_entry(callback_func_t,
198 &g_ta_callback_func_list.callback_list, head) {
199 if (memcmp(callback_func_t->uuid, tc_notify_data_timer->uuid,
200 UUID_SIZE) != 0)
201 continue;
202
203 if (tc_notify_data_timer->property.timer_class ==
204 TIMER_RTC) {
205 tlogd("start to call callback func\n");
206 callback_func_t->callback_func(
207 &(tc_notify_data_timer->property));
208 tlogd("end to call callback func\n");
209 } else if (tc_notify_data_timer->property.timer_class ==
210 TIMER_GENERIC) {
211 tlogd("timer60 no callback func\n");
212 }
213 }
214 mutex_unlock(&g_ta_callback_func_list.callback_list_lock);
215 }
216
find_notify_sess(const struct notify_context_timer * tc_notify_data_timer,struct tc_ns_session ** temp_ses,bool * enc_found)217 static int find_notify_sess(
218 const struct notify_context_timer *tc_notify_data_timer,
219 struct tc_ns_session **temp_ses, bool *enc_found)
220 {
221 struct tc_ns_dev_file *temp_dev_file = NULL;
222 struct tc_ns_dev_list *dev_list = NULL;
223 struct tc_ns_service *temp_svc = NULL;
224
225 dev_list = get_dev_list();
226 if (!dev_list) {
227 tloge("dev list is invalid\n");
228 return -ENOENT;
229 }
230
231 mutex_lock(&dev_list->dev_lock);
232 list_for_each_entry(temp_dev_file, &dev_list->dev_file_list, head) {
233 tlogd("dev file id1 = %u, id2 = %u\n",
234 temp_dev_file->dev_file_id,
235 tc_notify_data_timer->dev_file_id);
236 if (temp_dev_file->dev_file_id ==
237 tc_notify_data_timer->dev_file_id) {
238 mutex_lock(&temp_dev_file->service_lock);
239 temp_svc =
240 tc_find_service_in_dev(temp_dev_file,
241 tc_notify_data_timer->uuid, UUID_LEN);
242 get_service_struct(temp_svc);
243 mutex_unlock(&temp_dev_file->service_lock);
244 if (!temp_svc)
245 break;
246 mutex_lock(&temp_svc->session_lock);
247 *temp_ses =
248 tc_find_session_withowner(
249 &temp_svc->session_list,
250 tc_notify_data_timer->session_id,
251 temp_dev_file);
252 get_session_struct(*temp_ses);
253 mutex_unlock(&temp_svc->session_lock);
254 put_service_struct(temp_svc);
255 temp_svc = NULL;
256 if (*temp_ses) {
257 tlogd("send cmd ses id %u\n",
258 (*temp_ses)->session_id);
259 *enc_found = true;
260 break;
261 }
262 break;
263 }
264 }
265 mutex_unlock(&dev_list->dev_lock);
266
267 return 0;
268 }
269
tc_notify_timer_fn(struct notify_data_entry * notify_data_entry)270 static void tc_notify_timer_fn(struct notify_data_entry *notify_data_entry)
271 {
272 struct tc_ns_session *temp_ses = NULL;
273 bool enc_found = false;
274 struct notify_context_timer *tc_notify_data_timer = NULL;
275
276 tc_notify_data_timer = &(notify_data_entry->context.timer);
277 notify_data_entry->filled = 0;
278 tlogd("notify data timer type is 0x%x, timer ID is 0x%x\n",
279 tc_notify_data_timer->property.type,
280 tc_notify_data_timer->property.timer_id);
281 walk_callback_list(tc_notify_data_timer);
282
283 if (find_notify_sess(tc_notify_data_timer, &temp_ses, &enc_found) != 0)
284 return;
285
286 if (tc_notify_data_timer->property.timer_class == TIMER_GENERIC) {
287 tlogd("timer60 wake up event\n");
288 if (enc_found && temp_ses) {
289 temp_ses->wait_data.send_wait_flag = 1;
290 wake_up(&temp_ses->wait_data.send_cmd_wq);
291 put_session_struct(temp_ses);
292 temp_ses = NULL;
293 }
294 } else {
295 tlogd("RTC do not need to wakeup\n");
296 }
297 }
298
get_notify_data_entry(struct notify_data_entry * copy)299 static noinline int get_notify_data_entry(struct notify_data_entry *copy)
300 {
301 uint32_t i;
302 int filled;
303 int ret = -1;
304
305 if (!copy || !g_notify_data) {
306 tloge("bad parameters or notify data is NULL");
307 return ret;
308 }
309
310 spin_lock(&g_notify_lock);
311 /* TIMER and RTC use fix entry, skip them. */
312 for (i = NOTIFY_DATA_ENTRY_UNUSED; i < NOTIFY_DATA_ENTRY_COUNT; i++) {
313 struct notify_data_entry *e = &g_notify_data->entry[i];
314 filled = e->filled;
315 smp_mb();
316 if (filled == 0)
317 continue;
318 switch (e->entry_type) {
319 case NOTIFY_DATA_ENTRY_TIMER:
320 case NOTIFY_DATA_ENTRY_RTC:
321 break;
322 case NOTIFY_DATA_ENTRY_SHADOW:
323 case NOTIFY_DATA_ENTRY_SHADOW_EXIT:
324 case NOTIFY_DATA_ENTRY_FIQSHD:
325 g_notify_data->meta.context.meta.recv_s++;
326 break;
327 case NOTIFY_DATA_ENTRY_WAKEUP:
328 g_notify_data->meta.context.meta.recv_w++;
329 break;
330 #ifdef CONFIG_TA_AFFINITY
331 case NOTIFY_DATA_ENTRY_SET_AFFINITY:
332 g_notify_data->meta.context.meta.recv_af++;
333 break;
334 #endif
335 default:
336 tloge("invalid notify type=%u\n", e->entry_type);
337 goto exit;
338 }
339 if (memcpy_s(copy, sizeof(*copy), e, sizeof(*e)) != EOK) {
340 tloge("memcpy entry failed\n");
341 break;
342 }
343 smp_mb();
344 e->filled = 0;
345 ret = 0;
346 break;
347 }
348 exit:
349 spin_unlock(&g_notify_lock);
350 return ret;
351 }
352
tc_notify_wakeup_fn(const struct notify_data_entry * entry)353 static void tc_notify_wakeup_fn(const struct notify_data_entry *entry)
354 {
355 const struct notify_context_wakeup *tc_notify_wakeup = NULL;
356
357 tc_notify_wakeup = &(entry->context.wakeup);
358 smc_wakeup_ca(tc_notify_wakeup->ca_thread_id);
359 tlogd("notify data entry wakeup ca: %d\n",
360 tc_notify_wakeup->ca_thread_id);
361 }
362
tc_notify_shadow_fn(const struct notify_data_entry * entry)363 static void tc_notify_shadow_fn(const struct notify_data_entry *entry)
364 {
365 const struct notify_context_shadow *tc_notify_shadow = NULL;
366
367 tc_notify_shadow = &(entry->context.shadow);
368 smc_queue_shadow_worker(tc_notify_shadow->target_tcb);
369 }
370
tc_notify_fiqshd_fn(const struct notify_data_entry * entry)371 static void tc_notify_fiqshd_fn(const struct notify_data_entry *entry)
372 {
373 const struct notify_context_shadow *tc_notify_shadow = NULL;
374
375 if (!entry) {
376 /* for NOTIFY_DATA_ENTRY_FIQSHD missed */
377 fiq_shadow_work_func(0);
378 return;
379 }
380 tc_notify_shadow = &(entry->context.shadow);
381 fiq_shadow_work_func(tc_notify_shadow->target_tcb);
382 }
383
tc_notify_shadowexit_fn(const struct notify_data_entry * entry)384 static void tc_notify_shadowexit_fn(const struct notify_data_entry *entry)
385 {
386 const struct notify_context_wakeup *tc_notify_wakeup = NULL;
387
388 tc_notify_wakeup = &(entry->context.wakeup);
389 if (smc_shadow_exit(tc_notify_wakeup->ca_thread_id) != 0)
390 tloge("shadow ca exit failed: %d\n",
391 (int)tc_notify_wakeup->ca_thread_id);
392 }
393
394 #ifdef CONFIG_TA_AFFINITY
tc_notify_set_affinity(struct notify_data_entry * entry)395 static void tc_notify_set_affinity(struct notify_data_entry *entry)
396 {
397 struct notify_context_set_affinity *af_data = NULL;
398 struct pending_entry *pe = NULL;
399
400 af_data = &(entry->context.affinity);
401 pe = find_pending_entry(af_data->ca_thread_id);
402 if (pe != NULL) {
403 struct cpumask mask;
404 uint32_t i;
405
406 cpumask_clear(&mask);
407 for_each_online_cpu(i) {
408 struct aff_bits_t *aff = &af_data->aff;
409 if (aff->aff_bits[i / AFF_BITS_SIZE] & aff_bits_mask(i))
410 cpumask_set_cpu(i, &mask);
411 }
412
413 /*
414 * we don't set ca's cpumask here but in ca's own thread
415 * context after ca is wakeup in smc_send_func, or
416 * scheduler will set task's allow cpumask failure in that case.
417 */
418 cpumask_copy(&pe->ta_mask, &mask);
419 smc_wakeup_ca(af_data->ca_thread_id);
420 tlogd("set affinity for ca thread id %u\n", af_data->ca_thread_id);
421 put_pending_entry(pe);
422 } else {
423 tloge("invalid ca thread id %u for set affinity\n",
424 af_data->ca_thread_id);
425 /*
426 * if a TEE tcb without CA bind(CA is 0) cause a affinity set,
427 * the CA tid(current cpu context) may wrong
428 * (in tc_notify_fiqshd_fn, don't init_pending_entry,
429 * in this case, cannot find pending_entry),
430 * but we must set affinity for CA otherwise the TA can't run,
431 * so we wakeup all blocked CA.
432 */
433 (void)smc_wakeup_broadcast();
434 }
435 }
436 #endif
437
438 #define MISSED_COUNT 4
spi_broadcast_notifications(void)439 static void spi_broadcast_notifications(void)
440 {
441 uint32_t missed;
442
443 smp_mb();
444
445 if (!g_notify_data) {
446 tloge("notify data is NULL\n");
447 return;
448 }
449
450 missed = (uint32_t)__xchg(0, &g_notify_data->meta.context.meta.missed, MISSED_COUNT);
451 if (missed == 0)
452 return;
453 if ((missed & (1U << NOTIFY_DATA_ENTRY_WAKEUP)) != 0) {
454 smc_wakeup_broadcast();
455 missed &= ~(1U << NOTIFY_DATA_ENTRY_WAKEUP);
456 }
457 if ((missed & (1U << NOTIFY_DATA_ENTRY_FIQSHD)) != 0) {
458 tc_notify_fiqshd_fn(NULL);
459 missed &= ~(1U << NOTIFY_DATA_ENTRY_FIQSHD);
460 }
461 if (missed != 0)
462 tloge("missed spi notification mask %x\n", missed);
463 }
464
tc_notify_fn(struct work_struct * dummy)465 static void tc_notify_fn(struct work_struct *dummy)
466 {
467 struct notify_data_entry copy = {0};
468 (void)dummy;
469
470 while (get_notify_data_entry(©) == 0) {
471 switch (copy.entry_type) {
472 case NOTIFY_DATA_ENTRY_TIMER:
473 case NOTIFY_DATA_ENTRY_RTC:
474 tc_notify_timer_fn(©);
475 break;
476 case NOTIFY_DATA_ENTRY_WAKEUP:
477 tc_notify_wakeup_fn(©);
478 break;
479 case NOTIFY_DATA_ENTRY_SHADOW:
480 tc_notify_shadow_fn(©);
481 break;
482 case NOTIFY_DATA_ENTRY_FIQSHD:
483 tc_notify_fiqshd_fn(©);
484 break;
485 case NOTIFY_DATA_ENTRY_SHADOW_EXIT:
486 tc_notify_shadowexit_fn(©);
487 break;
488 #ifdef CONFIG_TA_AFFINITY
489 case NOTIFY_DATA_ENTRY_SET_AFFINITY:
490 tc_notify_set_affinity(©);
491 break;
492 #endif
493 default:
494 tloge("invalid entry type = %u\n", copy.entry_type);
495 }
496 if (memset_s(©, sizeof(copy), 0, sizeof(copy)) != 0)
497 tloge("memset copy failed\n");
498 }
499 spi_broadcast_notifications();
500 }
501
tc_secure_notify(int irq,void * dev_id)502 static irqreturn_t tc_secure_notify(int irq, void *dev_id)
503 {
504 #define N_WORK 8
505 int i;
506 int queued = 0;
507 static struct work_struct tc_notify_works[N_WORK];
508 static int init;
509 (void)dev_id;
510
511 if (init == 0) {
512 for (i = 0; i < N_WORK; i++)
513 INIT_WORK(&tc_notify_works[i], tc_notify_fn);
514 init = 1;
515 }
516 for (i = 0; i < N_WORK; i++) {
517 if (queue_work(g_tz_spi_wq, &tc_notify_works[i])) {
518 queued = 1;
519 break;
520 }
521 }
522 if (queued == 1)
523 tee_trace_add_event(INTERRUPT_HANDLE_SPI_REE_RESPONSE, (uint64_t)irq);
524 else
525 tee_trace_add_event(INTERRUPT_HANDLE_SPI_REE_MISS, (uint64_t)irq);
526 #undef N_WORK
527
528 return IRQ_HANDLED;
529 }
530
tc_ns_register_service_call_back_func(const char * uuid,void * func,const void * private_data)531 int tc_ns_register_service_call_back_func(const char *uuid, void *func,
532 const void *private_data)
533 {
534 struct tc_ns_callback *callback_func = NULL;
535 struct tc_ns_callback *new_callback = NULL;
536 int ret = 0;
537
538 if (!uuid || !func)
539 return -EINVAL;
540
541 (void)private_data;
542 mutex_lock(&g_ta_callback_func_list.callback_list_lock);
543 if (g_ta_callback_func_list.callback_count > MAX_CALLBACK_COUNT) {
544 mutex_unlock(&g_ta_callback_func_list.callback_list_lock);
545 tloge("callback_count is out\n");
546 return -ENOMEM;
547 }
548 list_for_each_entry(callback_func,
549 &g_ta_callback_func_list.callback_list, head) {
550 if (memcmp(callback_func->uuid, uuid, UUID_SIZE) == 0) {
551 callback_func->callback_func = (void (*)(void *))func;
552 tlogd("succeed to find uuid ta_callback_func_list\n");
553 goto find_callback;
554 }
555 }
556 /* create a new callback struct if we couldn't find it in list */
557 new_callback = kzalloc(sizeof(*new_callback), GFP_KERNEL);
558 if (ZERO_OR_NULL_PTR((unsigned long)(uintptr_t)new_callback)) {
559 tloge("kzalloc failed\n");
560 ret = -ENOMEM;
561 goto find_callback;
562 }
563
564 if (memcpy_s(new_callback->uuid, UUID_SIZE, uuid, UUID_SIZE) != 0) {
565 kfree(new_callback);
566 new_callback = NULL;
567 ret = -ENOMEM;
568 goto find_callback;
569 }
570 g_ta_callback_func_list.callback_count++;
571 tlogd("callback count is %u\n",
572 g_ta_callback_func_list.callback_count);
573 INIT_LIST_HEAD(&new_callback->head);
574 new_callback->callback_func = (void (*)(void *))func;
575 mutex_init(&new_callback->callback_lock);
576 list_add_tail(&new_callback->head,
577 &g_ta_callback_func_list.callback_list);
578 find_callback:
579 mutex_unlock(&g_ta_callback_func_list.callback_list_lock);
580 return ret;
581 }
582
TC_NS_RegisterServiceCallbackFunc(const char * uuid,void * func,const void * private_data)583 int TC_NS_RegisterServiceCallbackFunc(const char *uuid, void *func,
584 const void *private_data)
585 {
586 const char *uuid_in = uuid;
587
588 if (!get_tz_init_flag()) return EFAULT;
589 return tc_ns_register_service_call_back_func(uuid_in,
590 func, private_data);
591 }
592 EXPORT_SYMBOL(TC_NS_RegisterServiceCallbackFunc);
593
send_notify_cmd(unsigned int cmd_id)594 int send_notify_cmd(unsigned int cmd_id)
595 {
596 struct tc_ns_smc_cmd smc_cmd = { {0}, 0 };
597 int ret = 0;
598 struct mb_cmd_pack *mb_pack = NULL;
599
600 mb_pack = mailbox_alloc_cmd_pack();
601 if (!mb_pack)
602 return -ENOMEM;
603
604 mb_pack->operation.paramtypes =
605 TEE_PARAM_TYPE_VALUE_INPUT |
606 TEE_PARAM_TYPE_VALUE_INPUT << TEE_PARAM_NUM;
607 mb_pack->operation.params[0].value.a =
608 (unsigned int)(get_spi_mem_paddr((uintptr_t)g_notify_data));
609 mb_pack->operation.params[0].value.b =
610 (unsigned int)(get_spi_mem_paddr((uintptr_t)g_notify_data) >> ADDR_TRANS_NUM);
611 mb_pack->operation.params[1].value.a = SZ_4K;
612 smc_cmd.cmd_type = CMD_TYPE_GLOBAL;
613 smc_cmd.cmd_id = cmd_id;
614 smc_cmd.operation_phys =
615 (unsigned int)mailbox_virt_to_phys((uintptr_t)&mb_pack->operation);
616 smc_cmd.operation_h_phys =
617 (unsigned int)((uint64_t)mailbox_virt_to_phys((uintptr_t)&mb_pack->operation) >> ADDR_TRANS_NUM);
618
619 if (is_tee_rebooting())
620 ret = send_smc_cmd_rebooting(TSP_REQUEST, 0, 0, &smc_cmd);
621 else
622 ret = tc_ns_smc(&smc_cmd);
623
624 if (ret != 0) {
625 ret = -EPERM;
626 tloge("register notify mem failed\n");
627 }
628
629 mailbox_free(mb_pack);
630
631 return ret;
632 }
633
634 static unsigned int g_irq = DEFAULT_SPI_NUM;
config_spi_context(struct device * class_dev,struct device_node * np)635 static int config_spi_context(struct device *class_dev, struct device_node *np)
636 {
637 int ret;
638
639 #ifndef CONFIG_ACPI
640 if (!np) {
641 tloge("device node not found\n");
642 return -EINVAL;
643 }
644 #endif
645
646 /* Map IRQ 0 from the OF interrupts list */
647 #ifdef CONFIG_ACPI
648 g_irq = (unsigned int)get_acpi_tz_irq();
649 #else
650 g_irq = irq_of_parse_and_map(np, 0);
651 #endif
652 ret = devm_request_irq(class_dev, g_irq, tc_secure_notify,
653 IRQF_NO_SUSPEND, TC_NS_CLIENT_DEV, NULL);
654 if (ret < 0) {
655 tloge("device irq %u request failed %d", g_irq, ret);
656 return ret;
657 }
658
659 g_ta_callback_func_list.callback_count = 0;
660 INIT_LIST_HEAD(&g_ta_callback_func_list.callback_list);
661 mutex_init(&g_ta_callback_func_list.callback_list_lock);
662
663 return 0;
664 }
665
tz_spi_init(struct device * class_dev,struct device_node * np)666 int tz_spi_init(struct device *class_dev, struct device_node *np)
667 {
668 int ret;
669
670 if (!class_dev) /* here np can be NULL */
671 return -EINVAL;
672
673 spin_lock_init(&g_notify_lock);
674 g_tz_spi_wq = alloc_workqueue("g_tz_spi_wq",
675 WQ_UNBOUND | WQ_HIGHPRI, TZ_WQ_MAX_ACTIVE);
676 if (!g_tz_spi_wq) {
677 tloge("it failed to create workqueue g_tz_spi_wq\n");
678 return -ENOMEM;
679 }
680 tz_workqueue_bind_mask(g_tz_spi_wq, WQ_HIGHPRI);
681
682 ret = config_spi_context(class_dev, np);
683 if (ret != 0)
684 goto clean;
685
686 if (!g_notify_data) {
687 g_notify_data = (struct notify_data_struct *)(uintptr_t)get_spi_mem_vaddr();
688 if (!g_notify_data) {
689 tloge("get free page failed for notification data\n");
690 ret = -ENOMEM;
691 goto clean;
692 }
693
694 ret = send_notify_cmd(GLOBAL_CMD_ID_REGISTER_NOTIFY_MEMORY);
695 if (ret != 0) {
696 tloge("shared memory failed ret is 0x%x\n", ret);
697 ret = -EFAULT;
698 free_spi_mem((uint64_t)(uintptr_t)g_notify_data);
699 g_notify_data = NULL;
700 goto clean;
701 }
702
703 g_notify_data_entry_shadow =
704 &g_notify_data->entry[NOTIFY_DATA_ENTRY_SHADOW - 1];
705 tlogd("target is: %llx\n",
706 g_notify_data_entry_shadow->context.shadow.target_tcb);
707 }
708
709 return 0;
710 clean:
711 free_tz_spi(class_dev);
712 return ret;
713 }
714
free_tz_spi(struct device * class_dev)715 void free_tz_spi(struct device *class_dev)
716 {
717 if (g_notify_data) {
718 free_spi_mem((uint64_t)(uintptr_t)g_notify_data);
719 g_notify_data = NULL;
720 }
721
722 if (g_tz_spi_wq) {
723 flush_workqueue(g_tz_spi_wq);
724 destroy_workqueue(g_tz_spi_wq);
725 g_tz_spi_wq = NULL;
726 }
727 if (!class_dev)
728 return;
729
730 devm_free_irq(class_dev, g_irq, NULL);
731 }
732