• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (c) 2021, MediaTek Inc.
4  * Copyright (c) 2021-2022, Intel Corporation.
5  *
6  * Authors:
7  *  Amir Hanania <amir.hanania@intel.com>
8  *  Chandrashekar Devegowda <chandrashekar.devegowda@intel.com>
9  *  Haijun Liu <haijun.liu@mediatek.com>
10  *  Moises Veleta <moises.veleta@intel.com>
11  *  Ricardo Martinez <ricardo.martinez@linux.intel.com>
12  *
13  * Contributors:
14  *  Andy Shevchenko <andriy.shevchenko@linux.intel.com>
15  *  Chiranjeevi Rapolu <chiranjeevi.rapolu@intel.com>
16  *  Eliot Lee <eliot.lee@intel.com>
17  *  Sreehari Kancharla <sreehari.kancharla@intel.com>
18  */
19 
20 #include <linux/atomic.h>
21 #include <linux/bitfield.h>
22 #include <linux/dev_printk.h>
23 #include <linux/err.h>
24 #include <linux/gfp.h>
25 #include <linux/minmax.h>
26 #include <linux/netdevice.h>
27 #include <linux/skbuff.h>
28 #include <linux/spinlock.h>
29 #include <linux/string.h>
30 #include <linux/wwan.h>
31 
32 #include "t7xx_port.h"
33 #include "t7xx_port_proxy.h"
34 #include "t7xx_state_monitor.h"
35 
t7xx_port_ctrl_start(struct wwan_port * port)36 static int t7xx_port_ctrl_start(struct wwan_port *port)
37 {
38 	struct t7xx_port *port_mtk = wwan_port_get_drvdata(port);
39 
40 	if (atomic_read(&port_mtk->usage_cnt))
41 		return -EBUSY;
42 
43 	atomic_inc(&port_mtk->usage_cnt);
44 	return 0;
45 }
46 
t7xx_port_ctrl_stop(struct wwan_port * port)47 static void t7xx_port_ctrl_stop(struct wwan_port *port)
48 {
49 	struct t7xx_port *port_mtk = wwan_port_get_drvdata(port);
50 
51 	atomic_dec(&port_mtk->usage_cnt);
52 }
53 
t7xx_port_ctrl_tx(struct wwan_port * port,struct sk_buff * skb)54 static int t7xx_port_ctrl_tx(struct wwan_port *port, struct sk_buff *skb)
55 {
56 	struct t7xx_port *port_private = wwan_port_get_drvdata(port);
57 	size_t len, offset, chunk_len = 0, txq_mtu = CLDMA_MTU;
58 	const struct t7xx_port_conf *port_conf;
59 	struct t7xx_fsm_ctl *ctl;
60 	enum md_state md_state;
61 
62 	len = skb->len;
63 	if (!len || !port_private->chan_enable)
64 		return -EINVAL;
65 
66 	port_conf = port_private->port_conf;
67 	ctl = port_private->t7xx_dev->md->fsm_ctl;
68 	md_state = t7xx_fsm_get_md_state(ctl);
69 	if (md_state == MD_STATE_WAITING_FOR_HS1 || md_state == MD_STATE_WAITING_FOR_HS2) {
70 		dev_warn(port_private->dev, "Cannot write to %s port when md_state=%d\n",
71 			 port_conf->name, md_state);
72 		return -ENODEV;
73 	}
74 
75 	for (offset = 0; offset < len; offset += chunk_len) {
76 		struct sk_buff *skb_ccci;
77 		int ret;
78 
79 		chunk_len = min(len - offset, txq_mtu - sizeof(struct ccci_header));
80 		skb_ccci = t7xx_port_alloc_skb(chunk_len);
81 		if (!skb_ccci)
82 			return -ENOMEM;
83 
84 		skb_put_data(skb_ccci, skb->data + offset, chunk_len);
85 		ret = t7xx_port_send_skb(port_private, skb_ccci, 0, 0);
86 		if (ret) {
87 			dev_kfree_skb_any(skb_ccci);
88 			dev_err(port_private->dev, "Write error on %s port, %d\n",
89 				port_conf->name, ret);
90 			return ret;
91 		}
92 	}
93 
94 	dev_kfree_skb(skb);
95 	return 0;
96 }
97 
98 static const struct wwan_port_ops wwan_ops = {
99 	.start = t7xx_port_ctrl_start,
100 	.stop = t7xx_port_ctrl_stop,
101 	.tx = t7xx_port_ctrl_tx,
102 };
103 
t7xx_port_wwan_init(struct t7xx_port * port)104 static int t7xx_port_wwan_init(struct t7xx_port *port)
105 {
106 	port->rx_length_th = RX_QUEUE_MAXLEN;
107 	return 0;
108 }
109 
t7xx_port_wwan_uninit(struct t7xx_port * port)110 static void t7xx_port_wwan_uninit(struct t7xx_port *port)
111 {
112 	if (!port->wwan_port)
113 		return;
114 
115 	port->rx_length_th = 0;
116 	wwan_remove_port(port->wwan_port);
117 	port->wwan_port = NULL;
118 }
119 
t7xx_port_wwan_recv_skb(struct t7xx_port * port,struct sk_buff * skb)120 static int t7xx_port_wwan_recv_skb(struct t7xx_port *port, struct sk_buff *skb)
121 {
122 	if (!atomic_read(&port->usage_cnt) || !port->chan_enable) {
123 		const struct t7xx_port_conf *port_conf = port->port_conf;
124 
125 		dev_kfree_skb_any(skb);
126 		dev_err_ratelimited(port->dev, "Port %s is not opened, drop packets\n",
127 				    port_conf->name);
128 		/* Dropping skb, caller should not access skb.*/
129 		return 0;
130 	}
131 
132 	wwan_port_rx(port->wwan_port, skb);
133 	return 0;
134 }
135 
t7xx_port_wwan_enable_chl(struct t7xx_port * port)136 static int t7xx_port_wwan_enable_chl(struct t7xx_port *port)
137 {
138 	spin_lock(&port->port_update_lock);
139 	port->chan_enable = true;
140 	spin_unlock(&port->port_update_lock);
141 
142 	return 0;
143 }
144 
t7xx_port_wwan_disable_chl(struct t7xx_port * port)145 static int t7xx_port_wwan_disable_chl(struct t7xx_port *port)
146 {
147 	spin_lock(&port->port_update_lock);
148 	port->chan_enable = false;
149 	spin_unlock(&port->port_update_lock);
150 
151 	return 0;
152 }
153 
t7xx_port_wwan_md_state_notify(struct t7xx_port * port,unsigned int state)154 static void t7xx_port_wwan_md_state_notify(struct t7xx_port *port, unsigned int state)
155 {
156 	const struct t7xx_port_conf *port_conf = port->port_conf;
157 
158 	if (state != MD_STATE_READY)
159 		return;
160 
161 	if (!port->wwan_port) {
162 		port->wwan_port = wwan_create_port(port->dev, port_conf->port_type,
163 						   &wwan_ops, port);
164 		if (IS_ERR(port->wwan_port))
165 			dev_err(port->dev, "Unable to create WWWAN port %s", port_conf->name);
166 	}
167 }
168 
169 struct port_ops wwan_sub_port_ops = {
170 	.init = t7xx_port_wwan_init,
171 	.recv_skb = t7xx_port_wwan_recv_skb,
172 	.uninit = t7xx_port_wwan_uninit,
173 	.enable_chl = t7xx_port_wwan_enable_chl,
174 	.disable_chl = t7xx_port_wwan_disable_chl,
175 	.md_state_notify = t7xx_port_wwan_md_state_notify,
176 };
177