• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
2 /* Copyright(c) 2018-2019  Realtek Corporation
3  */
4 
5 #ifndef	__RTW_HCI_H__
6 #define __RTW_HCI_H__
7 
8 /* ops for PCI, USB and SDIO */
9 struct rtw_hci_ops {
10 	int (*tx_write)(struct rtw_dev *rtwdev,
11 			struct rtw_tx_pkt_info *pkt_info,
12 			struct sk_buff *skb);
13 	void (*tx_kick_off)(struct rtw_dev *rtwdev);
14 	void (*flush_queues)(struct rtw_dev *rtwdev, u32 queues, bool drop);
15 	int (*setup)(struct rtw_dev *rtwdev);
16 	int (*start)(struct rtw_dev *rtwdev);
17 	void (*stop)(struct rtw_dev *rtwdev);
18 	void (*deep_ps)(struct rtw_dev *rtwdev, bool enter);
19 	void (*link_ps)(struct rtw_dev *rtwdev, bool enter);
20 	void (*interface_cfg)(struct rtw_dev *rtwdev);
21 
22 	int (*write_data_rsvd_page)(struct rtw_dev *rtwdev, u8 *buf, u32 size);
23 	int (*write_data_h2c)(struct rtw_dev *rtwdev, u8 *buf, u32 size);
24 
25 	u8 (*read8)(struct rtw_dev *rtwdev, u32 addr);
26 	u16 (*read16)(struct rtw_dev *rtwdev, u32 addr);
27 	u32 (*read32)(struct rtw_dev *rtwdev, u32 addr);
28 	void (*write8)(struct rtw_dev *rtwdev, u32 addr, u8 val);
29 	void (*write16)(struct rtw_dev *rtwdev, u32 addr, u16 val);
30 	void (*write32)(struct rtw_dev *rtwdev, u32 addr, u32 val);
31 };
32 
rtw_hci_tx_write(struct rtw_dev * rtwdev,struct rtw_tx_pkt_info * pkt_info,struct sk_buff * skb)33 static inline int rtw_hci_tx_write(struct rtw_dev *rtwdev,
34 				   struct rtw_tx_pkt_info *pkt_info,
35 				   struct sk_buff *skb)
36 {
37 	return rtwdev->hci.ops->tx_write(rtwdev, pkt_info, skb);
38 }
39 
rtw_hci_tx_kick_off(struct rtw_dev * rtwdev)40 static inline void rtw_hci_tx_kick_off(struct rtw_dev *rtwdev)
41 {
42 	return rtwdev->hci.ops->tx_kick_off(rtwdev);
43 }
44 
rtw_hci_setup(struct rtw_dev * rtwdev)45 static inline int rtw_hci_setup(struct rtw_dev *rtwdev)
46 {
47 	return rtwdev->hci.ops->setup(rtwdev);
48 }
49 
rtw_hci_start(struct rtw_dev * rtwdev)50 static inline int rtw_hci_start(struct rtw_dev *rtwdev)
51 {
52 	return rtwdev->hci.ops->start(rtwdev);
53 }
54 
rtw_hci_stop(struct rtw_dev * rtwdev)55 static inline void rtw_hci_stop(struct rtw_dev *rtwdev)
56 {
57 	rtwdev->hci.ops->stop(rtwdev);
58 }
59 
rtw_hci_deep_ps(struct rtw_dev * rtwdev,bool enter)60 static inline void rtw_hci_deep_ps(struct rtw_dev *rtwdev, bool enter)
61 {
62 	rtwdev->hci.ops->deep_ps(rtwdev, enter);
63 }
64 
rtw_hci_link_ps(struct rtw_dev * rtwdev,bool enter)65 static inline void rtw_hci_link_ps(struct rtw_dev *rtwdev, bool enter)
66 {
67 	rtwdev->hci.ops->link_ps(rtwdev, enter);
68 }
69 
rtw_hci_interface_cfg(struct rtw_dev * rtwdev)70 static inline void rtw_hci_interface_cfg(struct rtw_dev *rtwdev)
71 {
72 	rtwdev->hci.ops->interface_cfg(rtwdev);
73 }
74 
75 static inline int
rtw_hci_write_data_rsvd_page(struct rtw_dev * rtwdev,u8 * buf,u32 size)76 rtw_hci_write_data_rsvd_page(struct rtw_dev *rtwdev, u8 *buf, u32 size)
77 {
78 	return rtwdev->hci.ops->write_data_rsvd_page(rtwdev, buf, size);
79 }
80 
81 static inline int
rtw_hci_write_data_h2c(struct rtw_dev * rtwdev,u8 * buf,u32 size)82 rtw_hci_write_data_h2c(struct rtw_dev *rtwdev, u8 *buf, u32 size)
83 {
84 	return rtwdev->hci.ops->write_data_h2c(rtwdev, buf, size);
85 }
86 
rtw_read8(struct rtw_dev * rtwdev,u32 addr)87 static inline u8 rtw_read8(struct rtw_dev *rtwdev, u32 addr)
88 {
89 	return rtwdev->hci.ops->read8(rtwdev, addr);
90 }
91 
rtw_read16(struct rtw_dev * rtwdev,u32 addr)92 static inline u16 rtw_read16(struct rtw_dev *rtwdev, u32 addr)
93 {
94 	return rtwdev->hci.ops->read16(rtwdev, addr);
95 }
96 
rtw_read32(struct rtw_dev * rtwdev,u32 addr)97 static inline u32 rtw_read32(struct rtw_dev *rtwdev, u32 addr)
98 {
99 	return rtwdev->hci.ops->read32(rtwdev, addr);
100 }
101 
rtw_write8(struct rtw_dev * rtwdev,u32 addr,u8 val)102 static inline void rtw_write8(struct rtw_dev *rtwdev, u32 addr, u8 val)
103 {
104 	rtwdev->hci.ops->write8(rtwdev, addr, val);
105 }
106 
rtw_write16(struct rtw_dev * rtwdev,u32 addr,u16 val)107 static inline void rtw_write16(struct rtw_dev *rtwdev, u32 addr, u16 val)
108 {
109 	rtwdev->hci.ops->write16(rtwdev, addr, val);
110 }
111 
rtw_write32(struct rtw_dev * rtwdev,u32 addr,u32 val)112 static inline void rtw_write32(struct rtw_dev *rtwdev, u32 addr, u32 val)
113 {
114 	rtwdev->hci.ops->write32(rtwdev, addr, val);
115 }
116 
rtw_write8_set(struct rtw_dev * rtwdev,u32 addr,u8 bit)117 static inline void rtw_write8_set(struct rtw_dev *rtwdev, u32 addr, u8 bit)
118 {
119 	u8 val;
120 
121 	val = rtw_read8(rtwdev, addr);
122 	rtw_write8(rtwdev, addr, val | bit);
123 }
124 
rtw_write16_set(struct rtw_dev * rtwdev,u32 addr,u16 bit)125 static inline void rtw_write16_set(struct rtw_dev *rtwdev, u32 addr, u16 bit)
126 {
127 	u16 val;
128 
129 	val = rtw_read16(rtwdev, addr);
130 	rtw_write16(rtwdev, addr, val | bit);
131 }
132 
rtw_write32_set(struct rtw_dev * rtwdev,u32 addr,u32 bit)133 static inline void rtw_write32_set(struct rtw_dev *rtwdev, u32 addr, u32 bit)
134 {
135 	u32 val;
136 
137 	val = rtw_read32(rtwdev, addr);
138 	rtw_write32(rtwdev, addr, val | bit);
139 }
140 
rtw_write8_clr(struct rtw_dev * rtwdev,u32 addr,u8 bit)141 static inline void rtw_write8_clr(struct rtw_dev *rtwdev, u32 addr, u8 bit)
142 {
143 	u8 val;
144 
145 	val = rtw_read8(rtwdev, addr);
146 	rtw_write8(rtwdev, addr, val & ~bit);
147 }
148 
rtw_write16_clr(struct rtw_dev * rtwdev,u32 addr,u16 bit)149 static inline void rtw_write16_clr(struct rtw_dev *rtwdev, u32 addr, u16 bit)
150 {
151 	u16 val;
152 
153 	val = rtw_read16(rtwdev, addr);
154 	rtw_write16(rtwdev, addr, val & ~bit);
155 }
156 
rtw_write32_clr(struct rtw_dev * rtwdev,u32 addr,u32 bit)157 static inline void rtw_write32_clr(struct rtw_dev *rtwdev, u32 addr, u32 bit)
158 {
159 	u32 val;
160 
161 	val = rtw_read32(rtwdev, addr);
162 	rtw_write32(rtwdev, addr, val & ~bit);
163 }
164 
165 static inline u32
rtw_read_rf(struct rtw_dev * rtwdev,enum rtw_rf_path rf_path,u32 addr,u32 mask)166 rtw_read_rf(struct rtw_dev *rtwdev, enum rtw_rf_path rf_path,
167 	    u32 addr, u32 mask)
168 {
169 	unsigned long flags;
170 	u32 val;
171 
172 	spin_lock_irqsave(&rtwdev->rf_lock, flags);
173 	val = rtwdev->chip->ops->read_rf(rtwdev, rf_path, addr, mask);
174 	spin_unlock_irqrestore(&rtwdev->rf_lock, flags);
175 
176 	return val;
177 }
178 
179 static inline void
rtw_write_rf(struct rtw_dev * rtwdev,enum rtw_rf_path rf_path,u32 addr,u32 mask,u32 data)180 rtw_write_rf(struct rtw_dev *rtwdev, enum rtw_rf_path rf_path,
181 	     u32 addr, u32 mask, u32 data)
182 {
183 	unsigned long flags;
184 
185 	spin_lock_irqsave(&rtwdev->rf_lock, flags);
186 	rtwdev->chip->ops->write_rf(rtwdev, rf_path, addr, mask, data);
187 	spin_unlock_irqrestore(&rtwdev->rf_lock, flags);
188 }
189 
190 static inline u32
rtw_read32_mask(struct rtw_dev * rtwdev,u32 addr,u32 mask)191 rtw_read32_mask(struct rtw_dev *rtwdev, u32 addr, u32 mask)
192 {
193 	u32 shift = __ffs(mask);
194 	u32 orig;
195 	u32 ret;
196 
197 	orig = rtw_read32(rtwdev, addr);
198 	ret = (orig & mask) >> shift;
199 
200 	return ret;
201 }
202 
203 static inline u16
rtw_read16_mask(struct rtw_dev * rtwdev,u32 addr,u32 mask)204 rtw_read16_mask(struct rtw_dev *rtwdev, u32 addr, u32 mask)
205 {
206 	u32 shift = __ffs(mask);
207 	u32 orig;
208 	u32 ret;
209 
210 	orig = rtw_read16(rtwdev, addr);
211 	ret = (orig & mask) >> shift;
212 
213 	return ret;
214 }
215 
216 static inline u8
rtw_read8_mask(struct rtw_dev * rtwdev,u32 addr,u32 mask)217 rtw_read8_mask(struct rtw_dev *rtwdev, u32 addr, u32 mask)
218 {
219 	u32 shift = __ffs(mask);
220 	u32 orig;
221 	u32 ret;
222 
223 	orig = rtw_read8(rtwdev, addr);
224 	ret = (orig & mask) >> shift;
225 
226 	return ret;
227 }
228 
229 static inline void
rtw_write32_mask(struct rtw_dev * rtwdev,u32 addr,u32 mask,u32 data)230 rtw_write32_mask(struct rtw_dev *rtwdev, u32 addr, u32 mask, u32 data)
231 {
232 	u32 shift = __ffs(mask);
233 	u32 orig;
234 	u32 set;
235 
236 	WARN(addr & 0x3, "should be 4-byte aligned, addr = 0x%08x\n", addr);
237 
238 	orig = rtw_read32(rtwdev, addr);
239 	set = (orig & ~mask) | ((data << shift) & mask);
240 	rtw_write32(rtwdev, addr, set);
241 }
242 
243 static inline void
rtw_write8_mask(struct rtw_dev * rtwdev,u32 addr,u32 mask,u8 data)244 rtw_write8_mask(struct rtw_dev *rtwdev, u32 addr, u32 mask, u8 data)
245 {
246 	u32 shift;
247 	u8 orig, set;
248 
249 	mask &= 0xff;
250 	shift = __ffs(mask);
251 
252 	orig = rtw_read8(rtwdev, addr);
253 	set = (orig & ~mask) | ((data << shift) & mask);
254 	rtw_write8(rtwdev, addr, set);
255 }
256 
rtw_hci_type(struct rtw_dev * rtwdev)257 static inline enum rtw_hci_type rtw_hci_type(struct rtw_dev *rtwdev)
258 {
259 	return rtwdev->hci.type;
260 }
261 
rtw_hci_flush_queues(struct rtw_dev * rtwdev,u32 queues,bool drop)262 static inline void rtw_hci_flush_queues(struct rtw_dev *rtwdev, u32 queues,
263 					bool drop)
264 {
265 	if (rtwdev->hci.ops->flush_queues)
266 		rtwdev->hci.ops->flush_queues(rtwdev, queues, drop);
267 }
268 
rtw_hci_flush_all_queues(struct rtw_dev * rtwdev,bool drop)269 static inline void rtw_hci_flush_all_queues(struct rtw_dev *rtwdev, bool drop)
270 {
271 	if (rtwdev->hci.ops->flush_queues)
272 		rtwdev->hci.ops->flush_queues(rtwdev,
273 					      BIT(rtwdev->hw->queues) - 1,
274 					      drop);
275 }
276 
277 #endif
278