1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) 2020-21 Intel Corporation.
4 */
5
6 #include <linux/nospec.h>
7
8 #include "iosm_ipc_imem_ops.h"
9 #include "iosm_ipc_mux_codec.h"
10 #include "iosm_ipc_task_queue.h"
11
12 /* Test the link power state and send a MUX command in blocking mode. */
ipc_mux_tq_cmd_send(struct iosm_imem * ipc_imem,int arg,void * msg,size_t size)13 static int ipc_mux_tq_cmd_send(struct iosm_imem *ipc_imem, int arg, void *msg,
14 size_t size)
15 {
16 struct iosm_mux *ipc_mux = ipc_imem->mux;
17 const struct mux_acb *acb = msg;
18
19 skb_queue_tail(&ipc_mux->channel->ul_list, acb->skb);
20 ipc_imem_ul_send(ipc_mux->imem);
21
22 return 0;
23 }
24
ipc_mux_acb_send(struct iosm_mux * ipc_mux,bool blocking)25 static int ipc_mux_acb_send(struct iosm_mux *ipc_mux, bool blocking)
26 {
27 struct completion *completion = &ipc_mux->channel->ul_sem;
28 int ret = ipc_task_queue_send_task(ipc_mux->imem, ipc_mux_tq_cmd_send,
29 0, &ipc_mux->acb,
30 sizeof(ipc_mux->acb), false);
31 if (ret) {
32 dev_err(ipc_mux->dev, "unable to send mux command");
33 return ret;
34 }
35
36 /* if blocking, suspend the app and wait for irq in the flash or
37 * crash phase. return false on timeout to indicate failure.
38 */
39 if (blocking) {
40 u32 wait_time_milliseconds = IPC_MUX_CMD_RUN_DEFAULT_TIMEOUT;
41
42 reinit_completion(completion);
43
44 if (wait_for_completion_interruptible_timeout
45 (completion, msecs_to_jiffies(wait_time_milliseconds)) ==
46 0) {
47 dev_err(ipc_mux->dev, "ch[%d] timeout",
48 ipc_mux->channel_id);
49 ipc_uevent_send(ipc_mux->imem->dev, UEVENT_MDM_TIMEOUT);
50 return -ETIMEDOUT;
51 }
52 }
53
54 return 0;
55 }
56
57 /* Prepare mux Command */
ipc_mux_lite_add_cmd(struct iosm_mux * ipc_mux,u32 cmd,struct mux_acb * acb,void * param,u32 param_size)58 static struct mux_lite_cmdh *ipc_mux_lite_add_cmd(struct iosm_mux *ipc_mux,
59 u32 cmd, struct mux_acb *acb,
60 void *param, u32 param_size)
61 {
62 struct mux_lite_cmdh *cmdh = (struct mux_lite_cmdh *)acb->skb->data;
63
64 cmdh->signature = cpu_to_le32(MUX_SIG_CMDH);
65 cmdh->command_type = cpu_to_le32(cmd);
66 cmdh->if_id = acb->if_id;
67
68 acb->cmd = cmd;
69
70 cmdh->cmd_len = cpu_to_le16(offsetof(struct mux_lite_cmdh, param) +
71 param_size);
72 cmdh->transaction_id = cpu_to_le32(ipc_mux->tx_transaction_id++);
73
74 if (param)
75 memcpy(&cmdh->param, param, param_size);
76
77 skb_put(acb->skb, le16_to_cpu(cmdh->cmd_len));
78
79 return cmdh;
80 }
81
ipc_mux_acb_alloc(struct iosm_mux * ipc_mux)82 static int ipc_mux_acb_alloc(struct iosm_mux *ipc_mux)
83 {
84 struct mux_acb *acb = &ipc_mux->acb;
85 struct sk_buff *skb;
86 dma_addr_t mapping;
87
88 /* Allocate skb memory for the uplink buffer. */
89 skb = ipc_pcie_alloc_skb(ipc_mux->pcie, MUX_MAX_UL_ACB_BUF_SIZE,
90 GFP_ATOMIC, &mapping, DMA_TO_DEVICE, 0);
91 if (!skb)
92 return -ENOMEM;
93
94 /* Save the skb address. */
95 acb->skb = skb;
96
97 memset(skb->data, 0, MUX_MAX_UL_ACB_BUF_SIZE);
98
99 return 0;
100 }
101
ipc_mux_dl_acb_send_cmds(struct iosm_mux * ipc_mux,u32 cmd_type,u8 if_id,u32 transaction_id,union mux_cmd_param * param,size_t res_size,bool blocking,bool respond)102 int ipc_mux_dl_acb_send_cmds(struct iosm_mux *ipc_mux, u32 cmd_type, u8 if_id,
103 u32 transaction_id, union mux_cmd_param *param,
104 size_t res_size, bool blocking, bool respond)
105 {
106 struct mux_acb *acb = &ipc_mux->acb;
107 struct mux_lite_cmdh *ack_lite;
108 int ret = 0;
109
110 acb->if_id = if_id;
111 ret = ipc_mux_acb_alloc(ipc_mux);
112 if (ret)
113 return ret;
114
115 ack_lite = ipc_mux_lite_add_cmd(ipc_mux, cmd_type, acb, param,
116 res_size);
117 if (respond)
118 ack_lite->transaction_id = cpu_to_le32(transaction_id);
119
120 ret = ipc_mux_acb_send(ipc_mux, blocking);
121
122 return ret;
123 }
124
ipc_mux_netif_tx_flowctrl(struct mux_session * session,int idx,bool on)125 void ipc_mux_netif_tx_flowctrl(struct mux_session *session, int idx, bool on)
126 {
127 /* Inform the network interface to start/stop flow ctrl */
128 ipc_wwan_tx_flowctrl(session->wwan, idx, on);
129 }
130
ipc_mux_dl_cmdresps_decode_process(struct iosm_mux * ipc_mux,struct mux_lite_cmdh * cmdh)131 static int ipc_mux_dl_cmdresps_decode_process(struct iosm_mux *ipc_mux,
132 struct mux_lite_cmdh *cmdh)
133 {
134 struct mux_acb *acb = &ipc_mux->acb;
135
136 switch (le32_to_cpu(cmdh->command_type)) {
137 case MUX_CMD_OPEN_SESSION_RESP:
138 case MUX_CMD_CLOSE_SESSION_RESP:
139 /* Resume the control application. */
140 acb->got_param = cmdh->param;
141 break;
142
143 case MUX_LITE_CMD_FLOW_CTL_ACK:
144 /* This command type is not expected as response for
145 * Aggregation version of the protocol. So return non-zero.
146 */
147 if (ipc_mux->protocol != MUX_LITE)
148 return -EINVAL;
149
150 dev_dbg(ipc_mux->dev, "if %u FLOW_CTL_ACK %u received",
151 cmdh->if_id, le32_to_cpu(cmdh->transaction_id));
152 break;
153
154 default:
155 return -EINVAL;
156 }
157
158 acb->wanted_response = MUX_CMD_INVALID;
159 acb->got_response = le32_to_cpu(cmdh->command_type);
160 complete(&ipc_mux->channel->ul_sem);
161
162 return 0;
163 }
164
ipc_mux_dl_dlcmds_decode_process(struct iosm_mux * ipc_mux,struct mux_lite_cmdh * cmdh)165 static int ipc_mux_dl_dlcmds_decode_process(struct iosm_mux *ipc_mux,
166 struct mux_lite_cmdh *cmdh)
167 {
168 union mux_cmd_param *param = &cmdh->param;
169 struct mux_session *session;
170 int new_size;
171
172 dev_dbg(ipc_mux->dev, "if_id[%d]: dlcmds decode process %d",
173 cmdh->if_id, le32_to_cpu(cmdh->command_type));
174
175 switch (le32_to_cpu(cmdh->command_type)) {
176 case MUX_LITE_CMD_FLOW_CTL:
177
178 if (cmdh->if_id >= ipc_mux->nr_sessions) {
179 dev_err(ipc_mux->dev, "if_id [%d] not valid",
180 cmdh->if_id);
181 return -EINVAL; /* No session interface id. */
182 }
183
184 session = &ipc_mux->session[cmdh->if_id];
185
186 new_size = offsetof(struct mux_lite_cmdh, param) +
187 sizeof(param->flow_ctl);
188 if (param->flow_ctl.mask == cpu_to_le32(0xFFFFFFFF)) {
189 /* Backward Compatibility */
190 if (cmdh->cmd_len == cpu_to_le16(new_size))
191 session->flow_ctl_mask =
192 le32_to_cpu(param->flow_ctl.mask);
193 else
194 session->flow_ctl_mask = ~0;
195 /* if CP asks for FLOW CTRL Enable
196 * then set our internal flow control Tx flag
197 * to limit uplink session queueing
198 */
199 session->net_tx_stop = true;
200 /* Update the stats */
201 session->flow_ctl_en_cnt++;
202 } else if (param->flow_ctl.mask == 0) {
203 /* Just reset the Flow control mask and let
204 * mux_flow_ctrl_low_thre_b take control on
205 * our internal Tx flag and enabling kernel
206 * flow control
207 */
208 /* Backward Compatibility */
209 if (cmdh->cmd_len == cpu_to_le16(new_size))
210 session->flow_ctl_mask =
211 le32_to_cpu(param->flow_ctl.mask);
212 else
213 session->flow_ctl_mask = 0;
214 /* Update the stats */
215 session->flow_ctl_dis_cnt++;
216 } else {
217 break;
218 }
219
220 dev_dbg(ipc_mux->dev, "if[%u] FLOW CTRL 0x%08X", cmdh->if_id,
221 le32_to_cpu(param->flow_ctl.mask));
222 break;
223
224 case MUX_LITE_CMD_LINK_STATUS_REPORT:
225 break;
226
227 default:
228 return -EINVAL;
229 }
230 return 0;
231 }
232
233 /* Decode and Send appropriate response to a command block. */
ipc_mux_dl_cmd_decode(struct iosm_mux * ipc_mux,struct sk_buff * skb)234 static void ipc_mux_dl_cmd_decode(struct iosm_mux *ipc_mux, struct sk_buff *skb)
235 {
236 struct mux_lite_cmdh *cmdh = (struct mux_lite_cmdh *)skb->data;
237 __le32 trans_id = cmdh->transaction_id;
238
239 if (ipc_mux_dl_cmdresps_decode_process(ipc_mux, cmdh)) {
240 /* Unable to decode command response indicates the cmd_type
241 * may be a command instead of response. So try to decoding it.
242 */
243 if (!ipc_mux_dl_dlcmds_decode_process(ipc_mux, cmdh)) {
244 /* Decoded command may need a response. Give the
245 * response according to the command type.
246 */
247 union mux_cmd_param *mux_cmd = NULL;
248 size_t size = 0;
249 u32 cmd = MUX_LITE_CMD_LINK_STATUS_REPORT_RESP;
250
251 if (cmdh->command_type ==
252 cpu_to_le32(MUX_LITE_CMD_LINK_STATUS_REPORT)) {
253 mux_cmd = &cmdh->param;
254 mux_cmd->link_status_resp.response =
255 cpu_to_le32(MUX_CMD_RESP_SUCCESS);
256 /* response field is u32 */
257 size = sizeof(u32);
258 } else if (cmdh->command_type ==
259 cpu_to_le32(MUX_LITE_CMD_FLOW_CTL)) {
260 cmd = MUX_LITE_CMD_FLOW_CTL_ACK;
261 } else {
262 return;
263 }
264
265 if (ipc_mux_dl_acb_send_cmds(ipc_mux, cmd, cmdh->if_id,
266 le32_to_cpu(trans_id),
267 mux_cmd, size, false,
268 true))
269 dev_err(ipc_mux->dev,
270 "if_id %d: cmd send failed",
271 cmdh->if_id);
272 }
273 }
274 }
275
276 /* Pass the DL packet to the netif layer. */
ipc_mux_net_receive(struct iosm_mux * ipc_mux,int if_id,struct iosm_wwan * wwan,u32 offset,u8 service_class,struct sk_buff * skb)277 static int ipc_mux_net_receive(struct iosm_mux *ipc_mux, int if_id,
278 struct iosm_wwan *wwan, u32 offset,
279 u8 service_class, struct sk_buff *skb)
280 {
281 struct sk_buff *dest_skb = skb_clone(skb, GFP_ATOMIC);
282
283 if (!dest_skb)
284 return -ENOMEM;
285
286 skb_pull(dest_skb, offset);
287 skb_set_tail_pointer(dest_skb, dest_skb->len);
288 /* Pass the packet to the netif layer. */
289 dest_skb->priority = service_class;
290
291 return ipc_wwan_receive(wwan, dest_skb, false, if_id);
292 }
293
294 /* Decode Flow Credit Table in the block */
ipc_mux_dl_fcth_decode(struct iosm_mux * ipc_mux,unsigned char * block)295 static void ipc_mux_dl_fcth_decode(struct iosm_mux *ipc_mux,
296 unsigned char *block)
297 {
298 struct ipc_mem_lite_gen_tbl *fct = (struct ipc_mem_lite_gen_tbl *)block;
299 struct iosm_wwan *wwan;
300 int ul_credits;
301 int if_id;
302
303 if (fct->vfl_length != sizeof(fct->vfl.nr_of_bytes)) {
304 dev_err(ipc_mux->dev, "unexpected FCT length: %d",
305 fct->vfl_length);
306 return;
307 }
308
309 if_id = fct->if_id;
310 if (if_id >= ipc_mux->nr_sessions) {
311 dev_err(ipc_mux->dev, "not supported if_id: %d", if_id);
312 return;
313 }
314
315 /* Is the session active ? */
316 if_id = array_index_nospec(if_id, ipc_mux->nr_sessions);
317 wwan = ipc_mux->session[if_id].wwan;
318 if (!wwan) {
319 dev_err(ipc_mux->dev, "session Net ID is NULL");
320 return;
321 }
322
323 ul_credits = le32_to_cpu(fct->vfl.nr_of_bytes);
324
325 dev_dbg(ipc_mux->dev, "Flow_Credit:: if_id[%d] Old: %d Grants: %d",
326 if_id, ipc_mux->session[if_id].ul_flow_credits, ul_credits);
327
328 /* Update the Flow Credit information from ADB */
329 ipc_mux->session[if_id].ul_flow_credits += ul_credits;
330
331 /* Check whether the TX can be started */
332 if (ipc_mux->session[if_id].ul_flow_credits > 0) {
333 ipc_mux->session[if_id].net_tx_stop = false;
334 ipc_mux_netif_tx_flowctrl(&ipc_mux->session[if_id],
335 ipc_mux->session[if_id].if_id, false);
336 }
337 }
338
339 /* Decode non-aggregated datagram */
ipc_mux_dl_adgh_decode(struct iosm_mux * ipc_mux,struct sk_buff * skb)340 static void ipc_mux_dl_adgh_decode(struct iosm_mux *ipc_mux,
341 struct sk_buff *skb)
342 {
343 u32 pad_len, packet_offset;
344 struct iosm_wwan *wwan;
345 struct mux_adgh *adgh;
346 u8 *block = skb->data;
347 int rc = 0;
348 u8 if_id;
349
350 adgh = (struct mux_adgh *)block;
351
352 if (adgh->signature != cpu_to_le32(MUX_SIG_ADGH)) {
353 dev_err(ipc_mux->dev, "invalid ADGH signature received");
354 return;
355 }
356
357 if_id = adgh->if_id;
358 if (if_id >= ipc_mux->nr_sessions) {
359 dev_err(ipc_mux->dev, "invalid if_id while decoding %d", if_id);
360 return;
361 }
362
363 /* Is the session active ? */
364 if_id = array_index_nospec(if_id, ipc_mux->nr_sessions);
365 wwan = ipc_mux->session[if_id].wwan;
366 if (!wwan) {
367 dev_err(ipc_mux->dev, "session Net ID is NULL");
368 return;
369 }
370
371 /* Store the pad len for the corresponding session
372 * Pad bytes as negotiated in the open session less the header size
373 * (see session management chapter for details).
374 * If resulting padding is zero or less, the additional head padding is
375 * omitted. For e.g., if HEAD_PAD_LEN = 16 or less, this field is
376 * omitted if HEAD_PAD_LEN = 20, then this field will have 4 bytes
377 * set to zero
378 */
379 pad_len =
380 ipc_mux->session[if_id].dl_head_pad_len - IPC_MEM_DL_ETH_OFFSET;
381 packet_offset = sizeof(*adgh) + pad_len;
382
383 if_id += ipc_mux->wwan_q_offset;
384
385 /* Pass the packet to the netif layer */
386 rc = ipc_mux_net_receive(ipc_mux, if_id, wwan, packet_offset,
387 adgh->service_class, skb);
388 if (rc) {
389 dev_err(ipc_mux->dev, "mux adgh decoding error");
390 return;
391 }
392 ipc_mux->session[if_id].flush = 1;
393 }
394
ipc_mux_dl_decode(struct iosm_mux * ipc_mux,struct sk_buff * skb)395 void ipc_mux_dl_decode(struct iosm_mux *ipc_mux, struct sk_buff *skb)
396 {
397 u32 signature;
398
399 if (!skb->data)
400 return;
401
402 /* Decode the MUX header type. */
403 signature = le32_to_cpup((__le32 *)skb->data);
404
405 switch (signature) {
406 case MUX_SIG_ADGH:
407 ipc_mux_dl_adgh_decode(ipc_mux, skb);
408 break;
409
410 case MUX_SIG_FCTH:
411 ipc_mux_dl_fcth_decode(ipc_mux, skb->data);
412 break;
413
414 case MUX_SIG_CMDH:
415 ipc_mux_dl_cmd_decode(ipc_mux, skb);
416 break;
417
418 default:
419 dev_err(ipc_mux->dev, "invalid ABH signature");
420 }
421
422 ipc_pcie_kfree_skb(ipc_mux->pcie, skb);
423 }
424
ipc_mux_ul_skb_alloc(struct iosm_mux * ipc_mux,struct mux_adb * ul_adb,u32 type)425 static int ipc_mux_ul_skb_alloc(struct iosm_mux *ipc_mux,
426 struct mux_adb *ul_adb, u32 type)
427 {
428 /* Take the first element of the free list. */
429 struct sk_buff *skb = skb_dequeue(&ul_adb->free_list);
430 int qlt_size;
431
432 if (!skb)
433 return -EBUSY; /* Wait for a free ADB skb. */
434
435 /* Mark it as UL ADB to select the right free operation. */
436 IPC_CB(skb)->op_type = (u8)UL_MUX_OP_ADB;
437
438 switch (type) {
439 case MUX_SIG_ADGH:
440 /* Save the ADB memory settings. */
441 ul_adb->dest_skb = skb;
442 ul_adb->buf = skb->data;
443 ul_adb->size = IPC_MEM_MAX_DL_MUX_LITE_BUF_SIZE;
444 /* reset statistic counter */
445 ul_adb->if_cnt = 0;
446 ul_adb->payload_size = 0;
447 ul_adb->dg_cnt_total = 0;
448
449 ul_adb->adgh = (struct mux_adgh *)skb->data;
450 memset(ul_adb->adgh, 0, sizeof(struct mux_adgh));
451 break;
452
453 case MUX_SIG_QLTH:
454 qlt_size = offsetof(struct ipc_mem_lite_gen_tbl, vfl) +
455 (MUX_QUEUE_LEVEL * sizeof(struct mux_lite_vfl));
456
457 if (qlt_size > IPC_MEM_MAX_DL_MUX_LITE_BUF_SIZE) {
458 dev_err(ipc_mux->dev,
459 "can't support. QLT size:%d SKB size: %d",
460 qlt_size, IPC_MEM_MAX_DL_MUX_LITE_BUF_SIZE);
461 return -ERANGE;
462 }
463
464 ul_adb->qlth_skb = skb;
465 memset((ul_adb->qlth_skb)->data, 0, qlt_size);
466 skb_put(skb, qlt_size);
467 break;
468 }
469
470 return 0;
471 }
472
ipc_mux_ul_adgh_finish(struct iosm_mux * ipc_mux)473 static void ipc_mux_ul_adgh_finish(struct iosm_mux *ipc_mux)
474 {
475 struct mux_adb *ul_adb = &ipc_mux->ul_adb;
476 u16 adgh_len;
477 long long bytes;
478 char *str;
479
480 if (!ul_adb->dest_skb) {
481 dev_err(ipc_mux->dev, "no dest skb");
482 return;
483 }
484
485 adgh_len = le16_to_cpu(ul_adb->adgh->length);
486 skb_put(ul_adb->dest_skb, adgh_len);
487 skb_queue_tail(&ipc_mux->channel->ul_list, ul_adb->dest_skb);
488 ul_adb->dest_skb = NULL;
489
490 if (ipc_mux->ul_flow == MUX_UL_ON_CREDITS) {
491 struct mux_session *session;
492
493 session = &ipc_mux->session[ul_adb->adgh->if_id];
494 str = "available_credits";
495 bytes = (long long)session->ul_flow_credits;
496
497 } else {
498 str = "pend_bytes";
499 bytes = ipc_mux->ul_data_pend_bytes;
500 ipc_mux->ul_data_pend_bytes = ipc_mux->ul_data_pend_bytes +
501 adgh_len;
502 }
503
504 dev_dbg(ipc_mux->dev, "UL ADGH: size=%u, if_id=%d, payload=%d, %s=%lld",
505 adgh_len, ul_adb->adgh->if_id, ul_adb->payload_size,
506 str, bytes);
507 }
508
509 /* Allocates an ADB from the free list and initializes it with ADBH */
ipc_mux_ul_adb_allocate(struct iosm_mux * ipc_mux,struct mux_adb * adb,int * size_needed,u32 type)510 static bool ipc_mux_ul_adb_allocate(struct iosm_mux *ipc_mux,
511 struct mux_adb *adb, int *size_needed,
512 u32 type)
513 {
514 bool ret_val = false;
515 int status;
516
517 if (!adb->dest_skb) {
518 /* Allocate memory for the ADB including of the
519 * datagram table header.
520 */
521 status = ipc_mux_ul_skb_alloc(ipc_mux, adb, type);
522 if (status)
523 /* Is a pending ADB available ? */
524 ret_val = true; /* None. */
525
526 /* Update size need to zero only for new ADB memory */
527 *size_needed = 0;
528 }
529
530 return ret_val;
531 }
532
533 /* Informs the network stack to stop sending further packets for all opened
534 * sessions
535 */
ipc_mux_stop_tx_for_all_sessions(struct iosm_mux * ipc_mux)536 static void ipc_mux_stop_tx_for_all_sessions(struct iosm_mux *ipc_mux)
537 {
538 struct mux_session *session;
539 int idx;
540
541 for (idx = 0; idx < ipc_mux->nr_sessions; idx++) {
542 session = &ipc_mux->session[idx];
543
544 if (!session->wwan)
545 continue;
546
547 session->net_tx_stop = true;
548 }
549 }
550
551 /* Sends Queue Level Table of all opened sessions */
ipc_mux_lite_send_qlt(struct iosm_mux * ipc_mux)552 static bool ipc_mux_lite_send_qlt(struct iosm_mux *ipc_mux)
553 {
554 struct ipc_mem_lite_gen_tbl *qlt;
555 struct mux_session *session;
556 bool qlt_updated = false;
557 int i;
558 int qlt_size;
559
560 if (!ipc_mux->initialized || ipc_mux->state != MUX_S_ACTIVE)
561 return qlt_updated;
562
563 qlt_size = offsetof(struct ipc_mem_lite_gen_tbl, vfl) +
564 MUX_QUEUE_LEVEL * sizeof(struct mux_lite_vfl);
565
566 for (i = 0; i < ipc_mux->nr_sessions; i++) {
567 session = &ipc_mux->session[i];
568
569 if (!session->wwan || session->flow_ctl_mask)
570 continue;
571
572 if (ipc_mux_ul_skb_alloc(ipc_mux, &ipc_mux->ul_adb,
573 MUX_SIG_QLTH)) {
574 dev_err(ipc_mux->dev,
575 "no reserved mem to send QLT of if_id: %d", i);
576 break;
577 }
578
579 /* Prepare QLT */
580 qlt = (struct ipc_mem_lite_gen_tbl *)(ipc_mux->ul_adb.qlth_skb)
581 ->data;
582 qlt->signature = cpu_to_le32(MUX_SIG_QLTH);
583 qlt->length = cpu_to_le16(qlt_size);
584 qlt->if_id = i;
585 qlt->vfl_length = MUX_QUEUE_LEVEL * sizeof(struct mux_lite_vfl);
586 qlt->reserved[0] = 0;
587 qlt->reserved[1] = 0;
588
589 qlt->vfl.nr_of_bytes = cpu_to_le32(session->ul_list.qlen);
590
591 /* Add QLT to the transfer list. */
592 skb_queue_tail(&ipc_mux->channel->ul_list,
593 ipc_mux->ul_adb.qlth_skb);
594
595 qlt_updated = true;
596 ipc_mux->ul_adb.qlth_skb = NULL;
597 }
598
599 if (qlt_updated)
600 /* Updates the TDs with ul_list */
601 (void)ipc_imem_ul_write_td(ipc_mux->imem);
602
603 return qlt_updated;
604 }
605
606 /* Checks the available credits for the specified session and returns
607 * number of packets for which credits are available.
608 */
ipc_mux_ul_bytes_credits_check(struct iosm_mux * ipc_mux,struct mux_session * session,struct sk_buff_head * ul_list,int max_nr_of_pkts)609 static int ipc_mux_ul_bytes_credits_check(struct iosm_mux *ipc_mux,
610 struct mux_session *session,
611 struct sk_buff_head *ul_list,
612 int max_nr_of_pkts)
613 {
614 int pkts_to_send = 0;
615 struct sk_buff *skb;
616 int credits = 0;
617
618 if (ipc_mux->ul_flow == MUX_UL_ON_CREDITS) {
619 credits = session->ul_flow_credits;
620 if (credits <= 0) {
621 dev_dbg(ipc_mux->dev,
622 "FC::if_id[%d] Insuff.Credits/Qlen:%d/%u",
623 session->if_id, session->ul_flow_credits,
624 session->ul_list.qlen); /* nr_of_bytes */
625 return 0;
626 }
627 } else {
628 credits = IPC_MEM_MUX_UL_FLOWCTRL_HIGH_B -
629 ipc_mux->ul_data_pend_bytes;
630 if (credits <= 0) {
631 ipc_mux_stop_tx_for_all_sessions(ipc_mux);
632
633 dev_dbg(ipc_mux->dev,
634 "if_id[%d] encod. fail Bytes: %llu, thresh: %d",
635 session->if_id, ipc_mux->ul_data_pend_bytes,
636 IPC_MEM_MUX_UL_FLOWCTRL_HIGH_B);
637 return 0;
638 }
639 }
640
641 /* Check if there are enough credits/bytes available to send the
642 * requested max_nr_of_pkts. Otherwise restrict the nr_of_pkts
643 * depending on available credits.
644 */
645 skb_queue_walk(ul_list, skb)
646 {
647 if (!(credits >= skb->len && pkts_to_send < max_nr_of_pkts))
648 break;
649 credits -= skb->len;
650 pkts_to_send++;
651 }
652
653 return pkts_to_send;
654 }
655
656 /* Encode the UL IP packet according to Lite spec. */
ipc_mux_ul_adgh_encode(struct iosm_mux * ipc_mux,int session_id,struct mux_session * session,struct sk_buff_head * ul_list,struct mux_adb * adb,int nr_of_pkts)657 static int ipc_mux_ul_adgh_encode(struct iosm_mux *ipc_mux, int session_id,
658 struct mux_session *session,
659 struct sk_buff_head *ul_list,
660 struct mux_adb *adb, int nr_of_pkts)
661 {
662 int offset = sizeof(struct mux_adgh);
663 int adb_updated = -EINVAL;
664 struct sk_buff *src_skb;
665 int aligned_size = 0;
666 int nr_of_skb = 0;
667 u32 pad_len = 0;
668
669 /* Re-calculate the number of packets depending on number of bytes to be
670 * processed/available credits.
671 */
672 nr_of_pkts = ipc_mux_ul_bytes_credits_check(ipc_mux, session, ul_list,
673 nr_of_pkts);
674
675 /* If calculated nr_of_pkts from available credits is <= 0
676 * then nothing to do.
677 */
678 if (nr_of_pkts <= 0)
679 return 0;
680
681 /* Read configured UL head_pad_length for session.*/
682 if (session->ul_head_pad_len > IPC_MEM_DL_ETH_OFFSET)
683 pad_len = session->ul_head_pad_len - IPC_MEM_DL_ETH_OFFSET;
684
685 /* Process all pending UL packets for this session
686 * depending on the allocated datagram table size.
687 */
688 while (nr_of_pkts > 0) {
689 /* get destination skb allocated */
690 if (ipc_mux_ul_adb_allocate(ipc_mux, adb, &ipc_mux->size_needed,
691 MUX_SIG_ADGH)) {
692 dev_err(ipc_mux->dev, "no reserved memory for ADGH");
693 return -ENOMEM;
694 }
695
696 /* Peek at the head of the list. */
697 src_skb = skb_peek(ul_list);
698 if (!src_skb) {
699 dev_err(ipc_mux->dev,
700 "skb peek return NULL with count : %d",
701 nr_of_pkts);
702 break;
703 }
704
705 /* Calculate the memory value. */
706 aligned_size = ALIGN((pad_len + src_skb->len), 4);
707
708 ipc_mux->size_needed = sizeof(struct mux_adgh) + aligned_size;
709
710 if (ipc_mux->size_needed > adb->size) {
711 dev_dbg(ipc_mux->dev, "size needed %d, adgh size %d",
712 ipc_mux->size_needed, adb->size);
713 /* Return 1 if any IP packet is added to the transfer
714 * list.
715 */
716 return nr_of_skb ? 1 : 0;
717 }
718
719 /* Add buffer (without head padding to next pending transfer) */
720 memcpy(adb->buf + offset + pad_len, src_skb->data,
721 src_skb->len);
722
723 adb->adgh->signature = cpu_to_le32(MUX_SIG_ADGH);
724 adb->adgh->if_id = session_id;
725 adb->adgh->length =
726 cpu_to_le16(sizeof(struct mux_adgh) + pad_len +
727 src_skb->len);
728 adb->adgh->service_class = src_skb->priority;
729 adb->adgh->next_count = --nr_of_pkts;
730 adb->dg_cnt_total++;
731 adb->payload_size += src_skb->len;
732
733 if (ipc_mux->ul_flow == MUX_UL_ON_CREDITS)
734 /* Decrement the credit value as we are processing the
735 * datagram from the UL list.
736 */
737 session->ul_flow_credits -= src_skb->len;
738
739 /* Remove the processed elements and free it. */
740 src_skb = skb_dequeue(ul_list);
741 dev_kfree_skb(src_skb);
742 nr_of_skb++;
743
744 ipc_mux_ul_adgh_finish(ipc_mux);
745 }
746
747 if (nr_of_skb) {
748 /* Send QLT info to modem if pending bytes > high watermark
749 * in case of mux lite
750 */
751 if (ipc_mux->ul_flow == MUX_UL_ON_CREDITS ||
752 ipc_mux->ul_data_pend_bytes >=
753 IPC_MEM_MUX_UL_FLOWCTRL_LOW_B)
754 adb_updated = ipc_mux_lite_send_qlt(ipc_mux);
755 else
756 adb_updated = 1;
757
758 /* Updates the TDs with ul_list */
759 (void)ipc_imem_ul_write_td(ipc_mux->imem);
760 }
761
762 return adb_updated;
763 }
764
ipc_mux_ul_data_encode(struct iosm_mux * ipc_mux)765 bool ipc_mux_ul_data_encode(struct iosm_mux *ipc_mux)
766 {
767 struct sk_buff_head *ul_list;
768 struct mux_session *session;
769 int updated = 0;
770 int session_id;
771 int dg_n;
772 int i;
773
774 if (!ipc_mux || ipc_mux->state != MUX_S_ACTIVE ||
775 ipc_mux->adb_prep_ongoing)
776 return false;
777
778 ipc_mux->adb_prep_ongoing = true;
779
780 for (i = 0; i < ipc_mux->nr_sessions; i++) {
781 session_id = ipc_mux->rr_next_session;
782 session = &ipc_mux->session[session_id];
783
784 /* Go to next handle rr_next_session overflow */
785 ipc_mux->rr_next_session++;
786 if (ipc_mux->rr_next_session >= ipc_mux->nr_sessions)
787 ipc_mux->rr_next_session = 0;
788
789 if (!session->wwan || session->flow_ctl_mask ||
790 session->net_tx_stop)
791 continue;
792
793 ul_list = &session->ul_list;
794
795 /* Is something pending in UL and flow ctrl off */
796 dg_n = skb_queue_len(ul_list);
797 if (dg_n > MUX_MAX_UL_DG_ENTRIES)
798 dg_n = MUX_MAX_UL_DG_ENTRIES;
799
800 if (dg_n == 0)
801 /* Nothing to do for ipc_mux session
802 * -> try next session id.
803 */
804 continue;
805
806 updated = ipc_mux_ul_adgh_encode(ipc_mux, session_id, session,
807 ul_list, &ipc_mux->ul_adb,
808 dg_n);
809 }
810
811 ipc_mux->adb_prep_ongoing = false;
812 return updated == 1;
813 }
814
ipc_mux_ul_encoded_process(struct iosm_mux * ipc_mux,struct sk_buff * skb)815 void ipc_mux_ul_encoded_process(struct iosm_mux *ipc_mux, struct sk_buff *skb)
816 {
817 struct mux_adgh *adgh;
818 u16 adgh_len;
819
820 adgh = (struct mux_adgh *)skb->data;
821 adgh_len = le16_to_cpu(adgh->length);
822
823 if (adgh->signature == cpu_to_le32(MUX_SIG_ADGH) &&
824 ipc_mux->ul_flow == MUX_UL)
825 ipc_mux->ul_data_pend_bytes = ipc_mux->ul_data_pend_bytes -
826 adgh_len;
827
828 if (ipc_mux->ul_flow == MUX_UL)
829 dev_dbg(ipc_mux->dev, "ul_data_pend_bytes: %lld",
830 ipc_mux->ul_data_pend_bytes);
831
832 /* Reset the skb settings. */
833 skb_trim(skb, 0);
834
835 /* Add the consumed ADB to the free list. */
836 skb_queue_tail((&ipc_mux->ul_adb.free_list), skb);
837 }
838
839 /* Start the NETIF uplink send transfer in MUX mode. */
ipc_mux_tq_ul_trigger_encode(struct iosm_imem * ipc_imem,int arg,void * msg,size_t size)840 static int ipc_mux_tq_ul_trigger_encode(struct iosm_imem *ipc_imem, int arg,
841 void *msg, size_t size)
842 {
843 struct iosm_mux *ipc_mux = ipc_imem->mux;
844 bool ul_data_pend = false;
845
846 /* Add session UL data to a ADB and ADGH */
847 ul_data_pend = ipc_mux_ul_data_encode(ipc_mux);
848 if (ul_data_pend)
849 /* Delay the doorbell irq */
850 ipc_imem_td_update_timer_start(ipc_mux->imem);
851
852 /* reset the debounce flag */
853 ipc_mux->ev_mux_net_transmit_pending = false;
854
855 return 0;
856 }
857
ipc_mux_ul_trigger_encode(struct iosm_mux * ipc_mux,int if_id,struct sk_buff * skb)858 int ipc_mux_ul_trigger_encode(struct iosm_mux *ipc_mux, int if_id,
859 struct sk_buff *skb)
860 {
861 struct mux_session *session = &ipc_mux->session[if_id];
862 int ret = -EINVAL;
863
864 if (ipc_mux->channel &&
865 ipc_mux->channel->state != IMEM_CHANNEL_ACTIVE) {
866 dev_err(ipc_mux->dev,
867 "channel state is not IMEM_CHANNEL_ACTIVE");
868 goto out;
869 }
870
871 if (!session->wwan) {
872 dev_err(ipc_mux->dev, "session net ID is NULL");
873 ret = -EFAULT;
874 goto out;
875 }
876
877 /* Session is under flow control.
878 * Check if packet can be queued in session list, if not
879 * suspend net tx
880 */
881 if (skb_queue_len(&session->ul_list) >=
882 (session->net_tx_stop ?
883 IPC_MEM_MUX_UL_SESS_FCON_THRESHOLD :
884 (IPC_MEM_MUX_UL_SESS_FCON_THRESHOLD *
885 IPC_MEM_MUX_UL_SESS_FCOFF_THRESHOLD_FACTOR))) {
886 ipc_mux_netif_tx_flowctrl(session, session->if_id, true);
887 ret = -EBUSY;
888 goto out;
889 }
890
891 /* Add skb to the uplink skb accumulator. */
892 skb_queue_tail(&session->ul_list, skb);
893
894 /* Inform the IPC kthread to pass uplink IP packets to CP. */
895 if (!ipc_mux->ev_mux_net_transmit_pending) {
896 ipc_mux->ev_mux_net_transmit_pending = true;
897 ret = ipc_task_queue_send_task(ipc_mux->imem,
898 ipc_mux_tq_ul_trigger_encode, 0,
899 NULL, 0, false);
900 if (ret)
901 goto out;
902 }
903 dev_dbg(ipc_mux->dev, "mux ul if[%d] qlen=%d/%u, len=%d/%d, prio=%d",
904 if_id, skb_queue_len(&session->ul_list), session->ul_list.qlen,
905 skb->len, skb->truesize, skb->priority);
906 ret = 0;
907 out:
908 return ret;
909 }
910