1 // Copyright (C) 2022 Beken Corporation
2 //
3 // Licensed under the Apache License, Version 2.0 (the "License");
4 // you may not use this file except in compliance with the License.
5 // You may obtain a copy of the License at
6 //
7 // http://www.apache.org/licenses/LICENSE-2.0
8 //
9 // Unless required by applicable law or agreed to in writing, software
10 // distributed under the License is distributed on an "AS IS" BASIS,
11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 // See the License for the specific language governing permissions and
13 // limitations under the License.
14
15 #include <stdio.h>
16 #include <string.h>
17
18 #include <os/os.h>
19 #include "mailbox_channel.h"
20 #include "mailbox_driver_base.h"
21
22 #define MB_PHY_CMD_CHNL (MAILBOX_BOX0)
23 #define MB_PHY_ACK_CHNL (MAILBOX_BOX1)
24
25 #define CHNL_STATE_BUSY 1
26 #define CHNL_STATE_ILDE 0
27
28 typedef struct
29 {
30 u8 tx_state; /* physical channel tx state. */
31 u8 tx_seq; /* physical channel tx sequence. */
32 u8 tx_log_chnl_id; /* logical channel id. */
33 u32 rx_fault_cnt;
34 u32 tx_fault_cnt;
35 } mb_phy_chnl_cb_t;
36
37
38 #define CHNL_CTRL_MASK 0xF
39 /*
40 * there are 2 boxes in one MAILBOXn HW,
41 * but no way to know which box this msg is from in current mailbox_driver design.
42 * use the CHNL_CTRL_ACK_BOX bit in the msg hdr.ctrl to distinguish where it is from.
43 * when CHNL_CTRL_ACK_BOX is set, it means from ack box ( MAILBOXn_BOX1 ).
44 */
45 #define CHNL_CTRL_ACK_BOX 0x01
46
47 #define CHNL_CTRL_SYNC_TX 0x02
48
49 typedef union
50 {
51 struct
52 {
53 u32 cmd : 8;
54 u32 state : 4;
55 u32 ctrl : 4;
56 u32 tx_seq : 8;
57 u32 logical_chnl : 8;
58 } ;
59 u32 data;
60 } phy_chnnl_hdr_t;
61
62 typedef struct
63 {
64 phy_chnnl_hdr_t hdr;
65
66 u32 param1;
67 u32 param2;
68 u32 param3;
69 } mb_phy_chnl_cmd_t;
70
71 typedef struct
72 {
73 phy_chnnl_hdr_t hdr;
74
75 u32 data1;
76 u32 data2;
77 u32 data3;
78 } mb_phy_chnl_ack_t;
79
80 typedef union
81 {
82 phy_chnnl_hdr_t phy_ch_hdr;
83 mb_phy_chnl_cmd_t phy_ch_cmd;
84 mb_phy_chnl_ack_t phy_ch_ack;
85 mailbox_data_t mb_data;
86 } mb_phy_chnl_union_t;
87
88 typedef struct
89 {
90 u8 log_chnl_id; /* logical channel id. */
91 u8 tx_state; /* logical channel tx state. */
92 u8 in_used;
93 chnl_rx_isr_t rx_isr;
94 chnl_tx_isr_t tx_isr;
95 chnl_tx_cmpl_isr_t tx_cmpl_isr;
96 void * isr_param;
97 mailbox_data_t chnnl_tx_buff; /* logical channel tx buffer. */
98 } mb_log_chnl_cb_t;
99
100 static mb_phy_chnl_cb_t phy_chnl_cb;
101 static mb_log_chnl_cb_t log_chnl_cb[MB_LOG_CHNL_NUM];
102
103 static u8 mb_chnnl_init_ok = 0;
104
105 /* ===================== physical channel functions ==================*/
106
mb_phy_chnl_tx_cmd(u8 log_chnl)107 static u8 mb_phy_chnl_tx_cmd(u8 log_chnl)
108 {
109 mb_phy_chnl_cmd_t * cmd_ptr;
110 bk_err_t ret_code;
111 u16 chnl_type;
112
113 if(log_chnl >= MB_LOG_CHNL_NUM)
114 return 1;
115
116 phy_chnl_cb.tx_seq++;
117 phy_chnl_cb.tx_log_chnl_id = log_chnl;
118
119 cmd_ptr = (mb_phy_chnl_cmd_t *)&log_chnl_cb[log_chnl].chnnl_tx_buff;
120
121 cmd_ptr->hdr.logical_chnl = log_chnl;
122 cmd_ptr->hdr.tx_seq = phy_chnl_cb.tx_seq;
123 cmd_ptr->hdr.ctrl = 0;
124 cmd_ptr->hdr.state = 0;
125
126 chnl_type = MB_PHY_CMD_CHNL;
127
128 ret_code = bk_mailbox_send(&log_chnl_cb[log_chnl].chnnl_tx_buff, SRC_CPU, DST_CPU, (void *)&chnl_type);
129
130 if(ret_code != BK_OK)
131 {
132 phy_chnl_cb.tx_fault_cnt++;
133
134 return 2;
135 }
136
137 log_chnl_cb[log_chnl].tx_state = CHNL_STATE_ILDE;
138
139 if(log_chnl_cb[log_chnl].tx_isr != NULL)
140 {
141 log_chnl_cb[log_chnl].tx_isr(log_chnl_cb[log_chnl].isr_param); /* phy_chnl is BUSY now, tx_isr will not trigger phy_chnl_start_tx. */
142 }
143
144 return 0;
145 }
146
mb_phy_chnl_rx_ack_isr(mb_phy_chnl_ack_t * ack_ptr)147 static void mb_phy_chnl_rx_ack_isr(mb_phy_chnl_ack_t *ack_ptr)
148 {
149 u8 log_chnl;
150 u8 ret_code;
151
152 log_chnl = ack_ptr->hdr.logical_chnl;
153
154 if( (log_chnl != phy_chnl_cb.tx_log_chnl_id) ||
155 (ack_ptr->hdr.tx_seq != phy_chnl_cb.tx_seq) )
156 {
157 phy_chnl_cb.rx_fault_cnt++;
158
159 return;
160 }
161
162 if(log_chnl_cb[log_chnl].tx_cmpl_isr != NULL)
163 {
164 /* clear following header members. */
165 ack_ptr->hdr.logical_chnl = 0;
166 ack_ptr->hdr.tx_seq = 0;
167 ack_ptr->hdr.ctrl = 0;
168
169 /* hdr.state, hdr.cmd these 2 members keep untouched. */
170
171 log_chnl_cb[log_chnl].tx_cmpl_isr(log_chnl_cb[log_chnl].isr_param, (mb_chnl_ack_t *)ack_ptr);
172 }
173
174 for(log_chnl = 0; log_chnl < MB_LOG_CHNL_NUM; log_chnl++) /* priority descended search. */
175 {
176 if(log_chnl_cb[log_chnl].tx_state != CHNL_STATE_ILDE)
177 break;
178 }
179
180 if(log_chnl >= MB_LOG_CHNL_NUM)
181 {
182 phy_chnl_cb.tx_state = CHNL_STATE_ILDE;
183 return;
184 }
185
186 ret_code = mb_phy_chnl_tx_cmd(log_chnl);
187
188 if(ret_code != 0)
189 {
190 phy_chnl_cb.tx_state = CHNL_STATE_ILDE;
191 return;
192 }
193
194 return;
195
196 }
197
mb_phy_chnl_rx_cmd_isr(mb_phy_chnl_cmd_t * cmd_ptr)198 static void mb_phy_chnl_rx_cmd_isr(mb_phy_chnl_cmd_t *cmd_ptr)
199 {
200 phy_chnnl_hdr_t chnl_hdr;
201 u8 log_chnl = cmd_ptr->hdr.logical_chnl;
202 bk_err_t ret_code;
203 u16 chnl_type;
204
205 chnl_hdr.data = cmd_ptr->hdr.data;
206
207 if(log_chnl >= MB_LOG_CHNL_NUM)
208 {
209 phy_chnl_cb.rx_fault_cnt++;
210
211 return;
212 }
213
214 if(log_chnl_cb[log_chnl].rx_isr != NULL)
215 {
216 /* clear all other hdr members except hdr.cmd. */
217 cmd_ptr->hdr.logical_chnl = 0;
218 cmd_ptr->hdr.tx_seq = 0;
219 cmd_ptr->hdr.ctrl = 0;
220 cmd_ptr->hdr.state = 0;
221
222 log_chnl_cb[log_chnl].rx_isr(log_chnl_cb[log_chnl].isr_param, (mb_chnl_cmd_t *)cmd_ptr);
223
224 /* !!!! cmd_ptr buffer now contains ACK data !!!!. */
225 }
226 else
227 {
228 chnl_hdr.state |= CHNL_STATE_COM_FAIL; /* cmd NO target app, it is an ACK bit to peer CPU. */
229 }
230
231 if(chnl_hdr.ctrl & CHNL_CTRL_SYNC_TX)
232 {
233 /* sync tx cmd, do NOT send ACK. */
234 return;
235 }
236
237 /* mb_phy_chnl_tx_ack. */
238
239 /* RE-USE the cmd buffer for ACK !!! */
240 cmd_ptr->hdr.data = chnl_hdr.data;
241 cmd_ptr->hdr.ctrl |= CHNL_CTRL_ACK_BOX; /* ACK msg, use the ACK channel. */
242
243 if(cmd_ptr->hdr.ctrl & CHNL_CTRL_ACK_BOX)
244 chnl_type = MB_PHY_ACK_CHNL;
245 else
246 chnl_type = MB_PHY_CMD_CHNL;
247
248 ret_code = bk_mailbox_send((mailbox_data_t *)cmd_ptr, SRC_CPU, DST_CPU, (void *)&chnl_type); /* mb_phy_chnl_tx_ack. */
249
250 if(ret_code != BK_OK)
251 {
252 phy_chnl_cb.tx_fault_cnt++;
253
254 return;
255 }
256
257 return;
258 }
259
mb_phy_chnl_rx_isr(mailbox_data_t * mb_data)260 static void mb_phy_chnl_rx_isr(mailbox_data_t * mb_data)
261 {
262 mb_phy_chnl_union_t rx_data;
263
264 rx_data.mb_data.param0 = mb_data->param0;
265 rx_data.mb_data.param1 = mb_data->param1;
266 rx_data.mb_data.param2 = mb_data->param2;
267 rx_data.mb_data.param3 = mb_data->param3;
268 /* the following process will damage the input parameter,
269 so pass in the pointer of copied parameter instad of the original. */
270
271 /*
272 * there are 2 boxes in one MAILBOXn HW,
273 * but no way to know which box this msg is from in current mailbox_driver design.
274 * so use the CHNL_CTRL_ACK_BOX bit in the msg hdr.ctrl to distinguish where it is from.
275 * when CHNL_CTRL_ACK_BOX is set, it means from ack box ( MAILBOXn_BOX1 ).
276 */
277 if(rx_data.phy_ch_hdr.ctrl & CHNL_CTRL_ACK_BOX) /* rx ack. */
278 {
279 mb_phy_chnl_rx_ack_isr(&rx_data.phy_ch_ack);
280 }
281 else /* rx cmd. */
282 {
283 mb_phy_chnl_rx_cmd_isr(&rx_data.phy_ch_cmd);
284 }
285 }
286
mb_phy_chnl_start_tx(u8 log_chnl)287 static void mb_phy_chnl_start_tx(u8 log_chnl)
288 {
289 u32 int_mask;
290 u8 ret_code;
291
292 int_mask = rtos_disable_int();
293
294 if(phy_chnl_cb.tx_state == CHNL_STATE_ILDE)
295 {
296 phy_chnl_cb.tx_state = CHNL_STATE_BUSY; /* MUST set channel state to BUSY firstly. */
297 /* start_tx->tx_cmd->tx_isr callback->mb_chnl_write->start_tx, it is a loop.
298 break the loop by setting the phy_chnl_cb.tx_state to busy. */
299
300 ret_code = mb_phy_chnl_tx_cmd(log_chnl);
301
302 if(ret_code != 0)
303 {
304 phy_chnl_cb.tx_state = CHNL_STATE_ILDE;
305 }
306 }
307
308 rtos_enable_int(int_mask);
309
310 return;
311 }
312
mb_phy_chnl_tx_cmd_sync(u8 log_chnl,mb_phy_chnl_cmd_t * cmd_ptr)313 static bk_err_t mb_phy_chnl_tx_cmd_sync(u8 log_chnl, mb_phy_chnl_cmd_t *cmd_ptr)
314 {
315 bk_err_t ret_code;
316 u16 chnl_type;
317
318 cmd_ptr->hdr.logical_chnl = log_chnl;
319 cmd_ptr->hdr.tx_seq = 0;
320 cmd_ptr->hdr.ctrl = CHNL_CTRL_SYNC_TX;
321 cmd_ptr->hdr.state = 0;
322
323 chnl_type = MB_PHY_CMD_CHNL;
324
325 /*
326 * can't wait 'phy_chnl_cb.tx_state' to be CHNL_STATE_ILDE here,
327 * 'phy_chnl_cb.tx_state' is set to CHNL_STATE_ILDE in interrupt callback.
328 * but the interrupt may be disabled when this API is called.
329 * wait physical channel HW to be IDLE by <POLLing> !!
330 */
331 while(1)
332 {
333 ret_code = bk_mailbox_send((mailbox_data_t *)cmd_ptr, SRC_CPU, DST_CPU, (void *)&chnl_type);
334
335 if(ret_code != BK_ERR_MAILBOX_TIMEOUT)
336 {
337 break;
338 }
339 }
340
341 return ret_code;
342 }
343
344 /* ===================== logical channel APIs ==================*/
345 /*
346 * init logical chnanel module.
347 * return:
348 * succeed: BK_OK;
349 * failed : fail code.
350 *
351 */
mb_chnl_init(void)352 bk_err_t mb_chnl_init(void)
353 {
354 bk_err_t ret_code;
355 int i;
356
357 if(mb_chnnl_init_ok)
358 {
359 return BK_OK;
360 }
361
362 memset(&phy_chnl_cb, 0, sizeof(phy_chnl_cb));
363 phy_chnl_cb.tx_state = CHNL_STATE_ILDE;
364
365 memset(&log_chnl_cb, 0, sizeof(log_chnl_cb));
366 for(i = 0; i < MB_LOG_CHNL_NUM; i++)
367 {
368 log_chnl_cb[i].log_chnl_id = i;
369 log_chnl_cb[i].tx_state = CHNL_STATE_ILDE;
370 }
371
372 ret_code = bk_mailbox_init();
373 if(ret_code != BK_OK)
374 {
375 return ret_code;
376 }
377
378 ret_code = bk_mailbox_recv_callback_register(DST_CPU, SRC_CPU, mb_phy_chnl_rx_isr);
379 if(ret_code != BK_OK)
380 {
381 return ret_code;
382 }
383
384 mb_chnnl_init_ok = 1;
385
386 return BK_OK;
387 }
388
389 /*
390 * open logical chnanel.
391 * input:
392 * log_chnl : logical channel id to open.
393 * callback_param : param passsed to all callbacks.
394 * return:
395 * succeed: BK_OK;
396 * failed : fail code.
397 *
398 */
mb_chnl_open(u8 log_chnl,void * callback_param)399 bk_err_t mb_chnl_open(u8 log_chnl, void * callback_param)
400 {
401 if(!mb_chnnl_init_ok) /* if driver is not initialized. */
402 {
403 bk_err_t ret_code;
404
405 ret_code = mb_chnl_init();
406
407 if(ret_code != BK_OK)
408 {
409 return ret_code;
410 }
411 }
412
413 if(log_chnl >= MB_LOG_CHNL_NUM)
414 return BK_ERR_PARAM;
415
416 if(log_chnl_cb[log_chnl].in_used)
417 return BK_ERR_OPEN;
418
419 log_chnl_cb[log_chnl].in_used = 1; /* chnl in used. */
420 log_chnl_cb[log_chnl].isr_param = callback_param;
421
422 return BK_OK;
423 }
424
425 /*
426 * close logical chnanel.
427 * input:
428 * log_chnl : logical channel id to close.
429 * return:
430 * succeed: BK_OK;
431 * failed : fail code.
432 *
433 */
mb_chnl_close(u8 log_chnl)434 bk_err_t mb_chnl_close(u8 log_chnl)
435 {
436 if(log_chnl >= MB_LOG_CHNL_NUM)
437 return BK_ERR_PARAM;
438
439 if(log_chnl_cb[log_chnl].in_used == 0)
440 return BK_ERR_STATE;
441
442 log_chnl_cb[log_chnl].in_used = 0;
443 log_chnl_cb[log_chnl].tx_state = CHNL_STATE_ILDE;
444 log_chnl_cb[log_chnl].rx_isr = NULL;
445 log_chnl_cb[log_chnl].tx_isr = NULL;
446 log_chnl_cb[log_chnl].tx_cmpl_isr = NULL;
447
448 return BK_OK;
449 }
450
451 /*
452 * read from logical chnanel.
453 * input:
454 * log_chnl : logical channel id to read.
455 * read_buf : buffer to receive channel cmd data.
456 * return:
457 * succeed: BK_OK;
458 * failed : fail code.
459 *
460 */
mb_chnl_read(u8 log_chnl,mb_chnl_cmd_t * read_buf)461 bk_err_t mb_chnl_read(u8 log_chnl, mb_chnl_cmd_t * read_buf)
462 {
463 return BK_ERR_NOT_SUPPORT;
464 }
465
466 /*
467 * write to logical chnanel.
468 * input:
469 * log_chnl : logical channel id to write.
470 * cmd_buf : buffer of channel cmd data.
471 * return:
472 * succeed: BK_OK;
473 * failed : fail code.
474 *
475 */
mb_chnl_write(u8 log_chnl,mb_chnl_cmd_t * cmd_buf)476 bk_err_t mb_chnl_write(u8 log_chnl, mb_chnl_cmd_t * cmd_buf)
477 {
478 u16 write_len;
479
480 if(log_chnl >= MB_LOG_CHNL_NUM)
481 return BK_ERR_PARAM;
482
483 if(log_chnl_cb[log_chnl].in_used == 0)
484 return BK_ERR_STATE;
485
486 if(log_chnl_cb[log_chnl].tx_state != CHNL_STATE_ILDE)
487 return BK_ERR_BUSY;
488
489 write_len = sizeof(mailbox_data_t) > sizeof(mb_chnl_cmd_t) ? sizeof(mb_chnl_cmd_t) : sizeof(mailbox_data_t);
490
491 memcpy(&log_chnl_cb[log_chnl].chnnl_tx_buff, cmd_buf, write_len);
492
493 /* set to BUSY means there is data in tx-buff. mb_phy_chnl_rx_ack_isr will get it to send. */
494 log_chnl_cb[log_chnl].tx_state = CHNL_STATE_BUSY; /* MUST set to BUSY after data was copied. */
495
496 mb_phy_chnl_start_tx(log_chnl);
497
498 return BK_OK;
499 }
500
501 /*
502 * logical chnanel misc io (set/get param).
503 * input:
504 * log_chnl : logical channel id to set/get param.
505 * cmd : control command for logical channel.
506 * param : parameter of the command.
507 * return:
508 * succeed: BK_OK;
509 * failed : fail code.
510 *
511 */
mb_chnl_ctrl(u8 log_chnl,u8 cmd,void * param)512 bk_err_t mb_chnl_ctrl(u8 log_chnl, u8 cmd, void * param)
513 {
514 bk_err_t ret_code;
515
516 if(log_chnl >= MB_LOG_CHNL_NUM)
517 return BK_ERR_PARAM;
518
519 if(log_chnl_cb[log_chnl].in_used == 0)
520 return BK_ERR_STATE;
521
522 switch(cmd)
523 {
524 case MB_CHNL_GET_STATUS:
525
526 if(param == NULL)
527 return BK_ERR_NULL_PARAM;
528
529 *((u8 *)param) = log_chnl_cb[log_chnl].tx_state;
530
531 break;
532
533 case MB_CHNL_SET_RX_ISR:
534 log_chnl_cb[log_chnl].rx_isr = (chnl_rx_isr_t)param;
535 break;
536
537 case MB_CHNL_SET_TX_ISR:
538 log_chnl_cb[log_chnl].tx_isr = (chnl_tx_isr_t)param;
539 break;
540
541 case MB_CHNL_SET_TX_CMPL_ISR:
542 log_chnl_cb[log_chnl].tx_cmpl_isr = (chnl_tx_cmpl_isr_t)param;
543 break;
544
545 case MB_CHNL_WRITE_SYNC:
546 if(param == NULL)
547 return BK_ERR_NULL_PARAM;
548
549 ret_code = mb_phy_chnl_tx_cmd_sync(log_chnl, (mb_phy_chnl_cmd_t *)param);
550 return ret_code;
551 break;
552
553 default:
554 return BK_ERR_NOT_SUPPORT;
555 break;
556 }
557
558 return BK_OK;
559 }
560
561
562