1 /*
2 * Copyright (c) 2005-2011 Atheros Communications Inc.
3 * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
4 *
5 * Permission to use, copy, modify, and/or distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16 */
17
18 #include "core.h"
19 #include "hif.h"
20 #include "debug.h"
21
22 /********/
23 /* Send */
24 /********/
25
ath10k_htc_send_complete_check(struct ath10k_htc_ep * ep,int force)26 static inline void ath10k_htc_send_complete_check(struct ath10k_htc_ep *ep,
27 int force)
28 {
29 /*
30 * Check whether HIF has any prior sends that have finished,
31 * have not had the post-processing done.
32 */
33 ath10k_hif_send_complete_check(ep->htc->ar, ep->ul_pipe_id, force);
34 }
35
ath10k_htc_control_tx_complete(struct ath10k * ar,struct sk_buff * skb)36 static void ath10k_htc_control_tx_complete(struct ath10k *ar,
37 struct sk_buff *skb)
38 {
39 kfree_skb(skb);
40 }
41
ath10k_htc_build_tx_ctrl_skb(void * ar)42 static struct sk_buff *ath10k_htc_build_tx_ctrl_skb(void *ar)
43 {
44 struct sk_buff *skb;
45 struct ath10k_skb_cb *skb_cb;
46
47 skb = dev_alloc_skb(ATH10K_HTC_CONTROL_BUFFER_SIZE);
48 if (!skb)
49 return NULL;
50
51 skb_reserve(skb, 20); /* FIXME: why 20 bytes? */
52 WARN_ONCE((unsigned long)skb->data & 3, "unaligned skb");
53
54 skb_cb = ATH10K_SKB_CB(skb);
55 memset(skb_cb, 0, sizeof(*skb_cb));
56
57 ath10k_dbg(ar, ATH10K_DBG_HTC, "%s: skb %p\n", __func__, skb);
58 return skb;
59 }
60
ath10k_htc_restore_tx_skb(struct ath10k_htc * htc,struct sk_buff * skb)61 static inline void ath10k_htc_restore_tx_skb(struct ath10k_htc *htc,
62 struct sk_buff *skb)
63 {
64 struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(skb);
65
66 dma_unmap_single(htc->ar->dev, skb_cb->paddr, skb->len, DMA_TO_DEVICE);
67 skb_pull(skb, sizeof(struct ath10k_htc_hdr));
68 }
69
ath10k_htc_notify_tx_completion(struct ath10k_htc_ep * ep,struct sk_buff * skb)70 static void ath10k_htc_notify_tx_completion(struct ath10k_htc_ep *ep,
71 struct sk_buff *skb)
72 {
73 struct ath10k *ar = ep->htc->ar;
74
75 ath10k_dbg(ar, ATH10K_DBG_HTC, "%s: ep %d skb %p\n", __func__,
76 ep->eid, skb);
77
78 ath10k_htc_restore_tx_skb(ep->htc, skb);
79
80 if (!ep->ep_ops.ep_tx_complete) {
81 ath10k_warn(ar, "no tx handler for eid %d\n", ep->eid);
82 dev_kfree_skb_any(skb);
83 return;
84 }
85
86 ep->ep_ops.ep_tx_complete(ep->htc->ar, skb);
87 }
88
89 /* assumes tx_lock is held */
ath10k_htc_ep_need_credit_update(struct ath10k_htc_ep * ep)90 static bool ath10k_htc_ep_need_credit_update(struct ath10k_htc_ep *ep)
91 {
92 struct ath10k *ar = ep->htc->ar;
93
94 if (!ep->tx_credit_flow_enabled)
95 return false;
96 if (ep->tx_credits >= ep->tx_credits_per_max_message)
97 return false;
98
99 ath10k_dbg(ar, ATH10K_DBG_HTC, "HTC: endpoint %d needs credit update\n",
100 ep->eid);
101 return true;
102 }
103
ath10k_htc_prepare_tx_skb(struct ath10k_htc_ep * ep,struct sk_buff * skb)104 static void ath10k_htc_prepare_tx_skb(struct ath10k_htc_ep *ep,
105 struct sk_buff *skb)
106 {
107 struct ath10k_htc_hdr *hdr;
108
109 hdr = (struct ath10k_htc_hdr *)skb->data;
110
111 hdr->eid = ep->eid;
112 hdr->len = __cpu_to_le16(skb->len - sizeof(*hdr));
113 hdr->flags = 0;
114
115 spin_lock_bh(&ep->htc->tx_lock);
116 hdr->seq_no = ep->seq_no++;
117
118 if (ath10k_htc_ep_need_credit_update(ep))
119 hdr->flags |= ATH10K_HTC_FLAG_NEED_CREDIT_UPDATE;
120
121 spin_unlock_bh(&ep->htc->tx_lock);
122 }
123
ath10k_htc_send(struct ath10k_htc * htc,enum ath10k_htc_ep_id eid,struct sk_buff * skb)124 int ath10k_htc_send(struct ath10k_htc *htc,
125 enum ath10k_htc_ep_id eid,
126 struct sk_buff *skb)
127 {
128 struct ath10k *ar = htc->ar;
129 struct ath10k_htc_ep *ep = &htc->endpoint[eid];
130 struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(skb);
131 struct ath10k_hif_sg_item sg_item;
132 struct device *dev = htc->ar->dev;
133 int credits = 0;
134 int ret;
135
136 if (htc->ar->state == ATH10K_STATE_WEDGED)
137 return -ECOMM;
138
139 if (eid >= ATH10K_HTC_EP_COUNT) {
140 ath10k_warn(ar, "Invalid endpoint id: %d\n", eid);
141 return -ENOENT;
142 }
143
144 skb_push(skb, sizeof(struct ath10k_htc_hdr));
145
146 if (ep->tx_credit_flow_enabled) {
147 credits = DIV_ROUND_UP(skb->len, htc->target_credit_size);
148 spin_lock_bh(&htc->tx_lock);
149 if (ep->tx_credits < credits) {
150 spin_unlock_bh(&htc->tx_lock);
151 ret = -EAGAIN;
152 goto err_pull;
153 }
154 ep->tx_credits -= credits;
155 ath10k_dbg(ar, ATH10K_DBG_HTC,
156 "htc ep %d consumed %d credits (total %d)\n",
157 eid, credits, ep->tx_credits);
158 spin_unlock_bh(&htc->tx_lock);
159 }
160
161 ath10k_htc_prepare_tx_skb(ep, skb);
162
163 skb_cb->paddr = dma_map_single(dev, skb->data, skb->len, DMA_TO_DEVICE);
164 ret = dma_mapping_error(dev, skb_cb->paddr);
165 if (ret) {
166 ret = -EIO;
167 goto err_credits;
168 }
169
170 sg_item.transfer_id = ep->eid;
171 sg_item.transfer_context = skb;
172 sg_item.vaddr = skb->data;
173 sg_item.paddr = skb_cb->paddr;
174 sg_item.len = skb->len;
175
176 ret = ath10k_hif_tx_sg(htc->ar, ep->ul_pipe_id, &sg_item, 1);
177 if (ret)
178 goto err_unmap;
179
180 return 0;
181
182 err_unmap:
183 dma_unmap_single(dev, skb_cb->paddr, skb->len, DMA_TO_DEVICE);
184 err_credits:
185 if (ep->tx_credit_flow_enabled) {
186 spin_lock_bh(&htc->tx_lock);
187 ep->tx_credits += credits;
188 ath10k_dbg(ar, ATH10K_DBG_HTC,
189 "htc ep %d reverted %d credits back (total %d)\n",
190 eid, credits, ep->tx_credits);
191 spin_unlock_bh(&htc->tx_lock);
192
193 if (ep->ep_ops.ep_tx_credits)
194 ep->ep_ops.ep_tx_credits(htc->ar);
195 }
196 err_pull:
197 skb_pull(skb, sizeof(struct ath10k_htc_hdr));
198 return ret;
199 }
200
ath10k_htc_tx_completion_handler(struct ath10k * ar,struct sk_buff * skb,unsigned int eid)201 static int ath10k_htc_tx_completion_handler(struct ath10k *ar,
202 struct sk_buff *skb,
203 unsigned int eid)
204 {
205 struct ath10k_htc *htc = &ar->htc;
206 struct ath10k_htc_ep *ep = &htc->endpoint[eid];
207
208 if (WARN_ON_ONCE(!skb))
209 return 0;
210
211 ath10k_htc_notify_tx_completion(ep, skb);
212 /* the skb now belongs to the completion handler */
213
214 return 0;
215 }
216
217 /***********/
218 /* Receive */
219 /***********/
220
221 static void
ath10k_htc_process_credit_report(struct ath10k_htc * htc,const struct ath10k_htc_credit_report * report,int len,enum ath10k_htc_ep_id eid)222 ath10k_htc_process_credit_report(struct ath10k_htc *htc,
223 const struct ath10k_htc_credit_report *report,
224 int len,
225 enum ath10k_htc_ep_id eid)
226 {
227 struct ath10k *ar = htc->ar;
228 struct ath10k_htc_ep *ep;
229 int i, n_reports;
230
231 if (len % sizeof(*report))
232 ath10k_warn(ar, "Uneven credit report len %d", len);
233
234 n_reports = len / sizeof(*report);
235
236 spin_lock_bh(&htc->tx_lock);
237 for (i = 0; i < n_reports; i++, report++) {
238 if (report->eid >= ATH10K_HTC_EP_COUNT)
239 break;
240
241 ep = &htc->endpoint[report->eid];
242 ep->tx_credits += report->credits;
243
244 ath10k_dbg(ar, ATH10K_DBG_HTC, "htc ep %d got %d credits (total %d)\n",
245 report->eid, report->credits, ep->tx_credits);
246
247 if (ep->ep_ops.ep_tx_credits) {
248 spin_unlock_bh(&htc->tx_lock);
249 ep->ep_ops.ep_tx_credits(htc->ar);
250 spin_lock_bh(&htc->tx_lock);
251 }
252 }
253 spin_unlock_bh(&htc->tx_lock);
254 }
255
ath10k_htc_process_trailer(struct ath10k_htc * htc,u8 * buffer,int length,enum ath10k_htc_ep_id src_eid)256 static int ath10k_htc_process_trailer(struct ath10k_htc *htc,
257 u8 *buffer,
258 int length,
259 enum ath10k_htc_ep_id src_eid)
260 {
261 struct ath10k *ar = htc->ar;
262 int status = 0;
263 struct ath10k_htc_record *record;
264 u8 *orig_buffer;
265 int orig_length;
266 size_t len;
267
268 orig_buffer = buffer;
269 orig_length = length;
270
271 while (length > 0) {
272 record = (struct ath10k_htc_record *)buffer;
273
274 if (length < sizeof(record->hdr)) {
275 status = -EINVAL;
276 break;
277 }
278
279 if (record->hdr.len > length) {
280 /* no room left in buffer for record */
281 ath10k_warn(ar, "Invalid record length: %d\n",
282 record->hdr.len);
283 status = -EINVAL;
284 break;
285 }
286
287 switch (record->hdr.id) {
288 case ATH10K_HTC_RECORD_CREDITS:
289 len = sizeof(struct ath10k_htc_credit_report);
290 if (record->hdr.len < len) {
291 ath10k_warn(ar, "Credit report too long\n");
292 status = -EINVAL;
293 break;
294 }
295 ath10k_htc_process_credit_report(htc,
296 record->credit_report,
297 record->hdr.len,
298 src_eid);
299 break;
300 default:
301 ath10k_warn(ar, "Unhandled record: id:%d length:%d\n",
302 record->hdr.id, record->hdr.len);
303 break;
304 }
305
306 if (status)
307 break;
308
309 /* multiple records may be present in a trailer */
310 buffer += sizeof(record->hdr) + record->hdr.len;
311 length -= sizeof(record->hdr) + record->hdr.len;
312 }
313
314 if (status)
315 ath10k_dbg_dump(ar, ATH10K_DBG_HTC, "htc rx bad trailer", "",
316 orig_buffer, orig_length);
317
318 return status;
319 }
320
ath10k_htc_rx_completion_handler(struct ath10k * ar,struct sk_buff * skb,u8 pipe_id)321 static int ath10k_htc_rx_completion_handler(struct ath10k *ar,
322 struct sk_buff *skb,
323 u8 pipe_id)
324 {
325 int status = 0;
326 struct ath10k_htc *htc = &ar->htc;
327 struct ath10k_htc_hdr *hdr;
328 struct ath10k_htc_ep *ep;
329 u16 payload_len;
330 u32 trailer_len = 0;
331 size_t min_len;
332 u8 eid;
333 bool trailer_present;
334
335 hdr = (struct ath10k_htc_hdr *)skb->data;
336 skb_pull(skb, sizeof(*hdr));
337
338 eid = hdr->eid;
339
340 if (eid >= ATH10K_HTC_EP_COUNT) {
341 ath10k_warn(ar, "HTC Rx: invalid eid %d\n", eid);
342 ath10k_dbg_dump(ar, ATH10K_DBG_HTC, "htc bad header", "",
343 hdr, sizeof(*hdr));
344 status = -EINVAL;
345 goto out;
346 }
347
348 ep = &htc->endpoint[eid];
349
350 /*
351 * If this endpoint that received a message from the target has
352 * a to-target HIF pipe whose send completions are polled rather
353 * than interrupt-driven, this is a good point to ask HIF to check
354 * whether it has any completed sends to handle.
355 */
356 if (ep->ul_is_polled)
357 ath10k_htc_send_complete_check(ep, 1);
358
359 payload_len = __le16_to_cpu(hdr->len);
360
361 if (payload_len + sizeof(*hdr) > ATH10K_HTC_MAX_LEN) {
362 ath10k_warn(ar, "HTC rx frame too long, len: %zu\n",
363 payload_len + sizeof(*hdr));
364 ath10k_dbg_dump(ar, ATH10K_DBG_HTC, "htc bad rx pkt len", "",
365 hdr, sizeof(*hdr));
366 status = -EINVAL;
367 goto out;
368 }
369
370 if (skb->len < payload_len) {
371 ath10k_dbg(ar, ATH10K_DBG_HTC,
372 "HTC Rx: insufficient length, got %d, expected %d\n",
373 skb->len, payload_len);
374 ath10k_dbg_dump(ar, ATH10K_DBG_HTC, "htc bad rx pkt len",
375 "", hdr, sizeof(*hdr));
376 status = -EINVAL;
377 goto out;
378 }
379
380 /* get flags to check for trailer */
381 trailer_present = hdr->flags & ATH10K_HTC_FLAG_TRAILER_PRESENT;
382 if (trailer_present) {
383 u8 *trailer;
384
385 trailer_len = hdr->trailer_len;
386 min_len = sizeof(struct ath10k_ath10k_htc_record_hdr);
387
388 if ((trailer_len < min_len) ||
389 (trailer_len > payload_len)) {
390 ath10k_warn(ar, "Invalid trailer length: %d\n",
391 trailer_len);
392 status = -EPROTO;
393 goto out;
394 }
395
396 trailer = (u8 *)hdr;
397 trailer += sizeof(*hdr);
398 trailer += payload_len;
399 trailer -= trailer_len;
400 status = ath10k_htc_process_trailer(htc, trailer,
401 trailer_len, hdr->eid);
402 if (status)
403 goto out;
404
405 skb_trim(skb, skb->len - trailer_len);
406 }
407
408 if (((int)payload_len - (int)trailer_len) <= 0)
409 /* zero length packet with trailer data, just drop these */
410 goto out;
411
412 if (eid == ATH10K_HTC_EP_0) {
413 struct ath10k_htc_msg *msg = (struct ath10k_htc_msg *)skb->data;
414
415 switch (__le16_to_cpu(msg->hdr.message_id)) {
416 default:
417 /* handle HTC control message */
418 if (completion_done(&htc->ctl_resp)) {
419 /*
420 * this is a fatal error, target should not be
421 * sending unsolicited messages on the ep 0
422 */
423 ath10k_warn(ar, "HTC rx ctrl still processing\n");
424 status = -EINVAL;
425 complete(&htc->ctl_resp);
426 goto out;
427 }
428
429 htc->control_resp_len =
430 min_t(int, skb->len,
431 ATH10K_HTC_MAX_CTRL_MSG_LEN);
432
433 memcpy(htc->control_resp_buffer, skb->data,
434 htc->control_resp_len);
435
436 complete(&htc->ctl_resp);
437 break;
438 case ATH10K_HTC_MSG_SEND_SUSPEND_COMPLETE:
439 htc->htc_ops.target_send_suspend_complete(ar);
440 }
441 goto out;
442 }
443
444 ath10k_dbg(ar, ATH10K_DBG_HTC, "htc rx completion ep %d skb %p\n",
445 eid, skb);
446 ep->ep_ops.ep_rx_complete(ar, skb);
447
448 /* skb is now owned by the rx completion handler */
449 skb = NULL;
450 out:
451 kfree_skb(skb);
452
453 return status;
454 }
455
ath10k_htc_control_rx_complete(struct ath10k * ar,struct sk_buff * skb)456 static void ath10k_htc_control_rx_complete(struct ath10k *ar,
457 struct sk_buff *skb)
458 {
459 /* This is unexpected. FW is not supposed to send regular rx on this
460 * endpoint. */
461 ath10k_warn(ar, "unexpected htc rx\n");
462 kfree_skb(skb);
463 }
464
465 /***************/
466 /* Init/Deinit */
467 /***************/
468
htc_service_name(enum ath10k_htc_svc_id id)469 static const char *htc_service_name(enum ath10k_htc_svc_id id)
470 {
471 switch (id) {
472 case ATH10K_HTC_SVC_ID_RESERVED:
473 return "Reserved";
474 case ATH10K_HTC_SVC_ID_RSVD_CTRL:
475 return "Control";
476 case ATH10K_HTC_SVC_ID_WMI_CONTROL:
477 return "WMI";
478 case ATH10K_HTC_SVC_ID_WMI_DATA_BE:
479 return "DATA BE";
480 case ATH10K_HTC_SVC_ID_WMI_DATA_BK:
481 return "DATA BK";
482 case ATH10K_HTC_SVC_ID_WMI_DATA_VI:
483 return "DATA VI";
484 case ATH10K_HTC_SVC_ID_WMI_DATA_VO:
485 return "DATA VO";
486 case ATH10K_HTC_SVC_ID_NMI_CONTROL:
487 return "NMI Control";
488 case ATH10K_HTC_SVC_ID_NMI_DATA:
489 return "NMI Data";
490 case ATH10K_HTC_SVC_ID_HTT_DATA_MSG:
491 return "HTT Data";
492 case ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS:
493 return "RAW";
494 }
495
496 return "Unknown";
497 }
498
ath10k_htc_reset_endpoint_states(struct ath10k_htc * htc)499 static void ath10k_htc_reset_endpoint_states(struct ath10k_htc *htc)
500 {
501 struct ath10k_htc_ep *ep;
502 int i;
503
504 for (i = ATH10K_HTC_EP_0; i < ATH10K_HTC_EP_COUNT; i++) {
505 ep = &htc->endpoint[i];
506 ep->service_id = ATH10K_HTC_SVC_ID_UNUSED;
507 ep->max_ep_message_len = 0;
508 ep->max_tx_queue_depth = 0;
509 ep->eid = i;
510 ep->htc = htc;
511 ep->tx_credit_flow_enabled = true;
512 }
513 }
514
ath10k_htc_setup_target_buffer_assignments(struct ath10k_htc * htc)515 static void ath10k_htc_setup_target_buffer_assignments(struct ath10k_htc *htc)
516 {
517 struct ath10k_htc_svc_tx_credits *entry;
518
519 entry = &htc->service_tx_alloc[0];
520
521 /*
522 * for PCIE allocate all credists/HTC buffers to WMI.
523 * no buffers are used/required for data. data always
524 * remains on host.
525 */
526 entry++;
527 entry->service_id = ATH10K_HTC_SVC_ID_WMI_CONTROL;
528 entry->credit_allocation = htc->total_transmit_credits;
529 }
530
ath10k_htc_get_credit_allocation(struct ath10k_htc * htc,u16 service_id)531 static u8 ath10k_htc_get_credit_allocation(struct ath10k_htc *htc,
532 u16 service_id)
533 {
534 u8 allocation = 0;
535 int i;
536
537 for (i = 0; i < ATH10K_HTC_EP_COUNT; i++) {
538 if (htc->service_tx_alloc[i].service_id == service_id)
539 allocation =
540 htc->service_tx_alloc[i].credit_allocation;
541 }
542
543 return allocation;
544 }
545
ath10k_htc_wait_target(struct ath10k_htc * htc)546 int ath10k_htc_wait_target(struct ath10k_htc *htc)
547 {
548 struct ath10k *ar = htc->ar;
549 int i, status = 0;
550 struct ath10k_htc_svc_conn_req conn_req;
551 struct ath10k_htc_svc_conn_resp conn_resp;
552 struct ath10k_htc_msg *msg;
553 u16 message_id;
554 u16 credit_count;
555 u16 credit_size;
556
557 status = wait_for_completion_timeout(&htc->ctl_resp,
558 ATH10K_HTC_WAIT_TIMEOUT_HZ);
559 if (status == 0) {
560 /* Workaround: In some cases the PCI HIF doesn't
561 * receive interrupt for the control response message
562 * even if the buffer was completed. It is suspected
563 * iomap writes unmasking PCI CE irqs aren't propagated
564 * properly in KVM PCI-passthrough sometimes.
565 */
566 ath10k_warn(ar, "failed to receive control response completion, polling..\n");
567
568 for (i = 0; i < CE_COUNT; i++)
569 ath10k_hif_send_complete_check(htc->ar, i, 1);
570
571 status = wait_for_completion_timeout(&htc->ctl_resp,
572 ATH10K_HTC_WAIT_TIMEOUT_HZ);
573
574 if (status == 0)
575 status = -ETIMEDOUT;
576 }
577
578 if (status < 0) {
579 ath10k_err(ar, "ctl_resp never came in (%d)\n", status);
580 return status;
581 }
582
583 if (htc->control_resp_len < sizeof(msg->hdr) + sizeof(msg->ready)) {
584 ath10k_err(ar, "Invalid HTC ready msg len:%d\n",
585 htc->control_resp_len);
586 return -ECOMM;
587 }
588
589 msg = (struct ath10k_htc_msg *)htc->control_resp_buffer;
590 message_id = __le16_to_cpu(msg->hdr.message_id);
591 credit_count = __le16_to_cpu(msg->ready.credit_count);
592 credit_size = __le16_to_cpu(msg->ready.credit_size);
593
594 if (message_id != ATH10K_HTC_MSG_READY_ID) {
595 ath10k_err(ar, "Invalid HTC ready msg: 0x%x\n", message_id);
596 return -ECOMM;
597 }
598
599 htc->total_transmit_credits = credit_count;
600 htc->target_credit_size = credit_size;
601
602 ath10k_dbg(ar, ATH10K_DBG_HTC,
603 "Target ready! transmit resources: %d size:%d\n",
604 htc->total_transmit_credits,
605 htc->target_credit_size);
606
607 if ((htc->total_transmit_credits == 0) ||
608 (htc->target_credit_size == 0)) {
609 ath10k_err(ar, "Invalid credit size received\n");
610 return -ECOMM;
611 }
612
613 ath10k_htc_setup_target_buffer_assignments(htc);
614
615 /* setup our pseudo HTC control endpoint connection */
616 memset(&conn_req, 0, sizeof(conn_req));
617 memset(&conn_resp, 0, sizeof(conn_resp));
618 conn_req.ep_ops.ep_tx_complete = ath10k_htc_control_tx_complete;
619 conn_req.ep_ops.ep_rx_complete = ath10k_htc_control_rx_complete;
620 conn_req.max_send_queue_depth = ATH10K_NUM_CONTROL_TX_BUFFERS;
621 conn_req.service_id = ATH10K_HTC_SVC_ID_RSVD_CTRL;
622
623 /* connect fake service */
624 status = ath10k_htc_connect_service(htc, &conn_req, &conn_resp);
625 if (status) {
626 ath10k_err(ar, "could not connect to htc service (%d)\n",
627 status);
628 return status;
629 }
630
631 return 0;
632 }
633
ath10k_htc_connect_service(struct ath10k_htc * htc,struct ath10k_htc_svc_conn_req * conn_req,struct ath10k_htc_svc_conn_resp * conn_resp)634 int ath10k_htc_connect_service(struct ath10k_htc *htc,
635 struct ath10k_htc_svc_conn_req *conn_req,
636 struct ath10k_htc_svc_conn_resp *conn_resp)
637 {
638 struct ath10k *ar = htc->ar;
639 struct ath10k_htc_msg *msg;
640 struct ath10k_htc_conn_svc *req_msg;
641 struct ath10k_htc_conn_svc_response resp_msg_dummy;
642 struct ath10k_htc_conn_svc_response *resp_msg = &resp_msg_dummy;
643 enum ath10k_htc_ep_id assigned_eid = ATH10K_HTC_EP_COUNT;
644 struct ath10k_htc_ep *ep;
645 struct sk_buff *skb;
646 unsigned int max_msg_size = 0;
647 int length, status;
648 bool disable_credit_flow_ctrl = false;
649 u16 message_id, service_id, flags = 0;
650 u8 tx_alloc = 0;
651
652 /* special case for HTC pseudo control service */
653 if (conn_req->service_id == ATH10K_HTC_SVC_ID_RSVD_CTRL) {
654 disable_credit_flow_ctrl = true;
655 assigned_eid = ATH10K_HTC_EP_0;
656 max_msg_size = ATH10K_HTC_MAX_CTRL_MSG_LEN;
657 memset(&resp_msg_dummy, 0, sizeof(resp_msg_dummy));
658 goto setup;
659 }
660
661 tx_alloc = ath10k_htc_get_credit_allocation(htc,
662 conn_req->service_id);
663 if (!tx_alloc)
664 ath10k_dbg(ar, ATH10K_DBG_BOOT,
665 "boot htc service %s does not allocate target credits\n",
666 htc_service_name(conn_req->service_id));
667
668 skb = ath10k_htc_build_tx_ctrl_skb(htc->ar);
669 if (!skb) {
670 ath10k_err(ar, "Failed to allocate HTC packet\n");
671 return -ENOMEM;
672 }
673
674 length = sizeof(msg->hdr) + sizeof(msg->connect_service);
675 skb_put(skb, length);
676 memset(skb->data, 0, length);
677
678 msg = (struct ath10k_htc_msg *)skb->data;
679 msg->hdr.message_id =
680 __cpu_to_le16(ATH10K_HTC_MSG_CONNECT_SERVICE_ID);
681
682 flags |= SM(tx_alloc, ATH10K_HTC_CONN_FLAGS_RECV_ALLOC);
683
684 /* Only enable credit flow control for WMI ctrl service */
685 if (conn_req->service_id != ATH10K_HTC_SVC_ID_WMI_CONTROL) {
686 flags |= ATH10K_HTC_CONN_FLAGS_DISABLE_CREDIT_FLOW_CTRL;
687 disable_credit_flow_ctrl = true;
688 }
689
690 req_msg = &msg->connect_service;
691 req_msg->flags = __cpu_to_le16(flags);
692 req_msg->service_id = __cpu_to_le16(conn_req->service_id);
693
694 reinit_completion(&htc->ctl_resp);
695
696 status = ath10k_htc_send(htc, ATH10K_HTC_EP_0, skb);
697 if (status) {
698 kfree_skb(skb);
699 return status;
700 }
701
702 /* wait for response */
703 status = wait_for_completion_timeout(&htc->ctl_resp,
704 ATH10K_HTC_CONN_SVC_TIMEOUT_HZ);
705 if (status <= 0) {
706 if (status == 0)
707 status = -ETIMEDOUT;
708 ath10k_err(ar, "Service connect timeout: %d\n", status);
709 return status;
710 }
711
712 /* we controlled the buffer creation, it's aligned */
713 msg = (struct ath10k_htc_msg *)htc->control_resp_buffer;
714 resp_msg = &msg->connect_service_response;
715 message_id = __le16_to_cpu(msg->hdr.message_id);
716 service_id = __le16_to_cpu(resp_msg->service_id);
717
718 if ((message_id != ATH10K_HTC_MSG_CONNECT_SERVICE_RESP_ID) ||
719 (htc->control_resp_len < sizeof(msg->hdr) +
720 sizeof(msg->connect_service_response))) {
721 ath10k_err(ar, "Invalid resp message ID 0x%x", message_id);
722 return -EPROTO;
723 }
724
725 ath10k_dbg(ar, ATH10K_DBG_HTC,
726 "HTC Service %s connect response: status: 0x%x, assigned ep: 0x%x\n",
727 htc_service_name(service_id),
728 resp_msg->status, resp_msg->eid);
729
730 conn_resp->connect_resp_code = resp_msg->status;
731
732 /* check response status */
733 if (resp_msg->status != ATH10K_HTC_CONN_SVC_STATUS_SUCCESS) {
734 ath10k_err(ar, "HTC Service %s connect request failed: 0x%x)\n",
735 htc_service_name(service_id),
736 resp_msg->status);
737 return -EPROTO;
738 }
739
740 assigned_eid = (enum ath10k_htc_ep_id)resp_msg->eid;
741 max_msg_size = __le16_to_cpu(resp_msg->max_msg_size);
742
743 setup:
744
745 if (assigned_eid >= ATH10K_HTC_EP_COUNT)
746 return -EPROTO;
747
748 if (max_msg_size == 0)
749 return -EPROTO;
750
751 ep = &htc->endpoint[assigned_eid];
752 ep->eid = assigned_eid;
753
754 if (ep->service_id != ATH10K_HTC_SVC_ID_UNUSED)
755 return -EPROTO;
756
757 /* return assigned endpoint to caller */
758 conn_resp->eid = assigned_eid;
759 conn_resp->max_msg_len = __le16_to_cpu(resp_msg->max_msg_size);
760
761 /* setup the endpoint */
762 ep->service_id = conn_req->service_id;
763 ep->max_tx_queue_depth = conn_req->max_send_queue_depth;
764 ep->max_ep_message_len = __le16_to_cpu(resp_msg->max_msg_size);
765 ep->tx_credits = tx_alloc;
766 ep->tx_credit_size = htc->target_credit_size;
767 ep->tx_credits_per_max_message = ep->max_ep_message_len /
768 htc->target_credit_size;
769
770 if (ep->max_ep_message_len % htc->target_credit_size)
771 ep->tx_credits_per_max_message++;
772
773 /* copy all the callbacks */
774 ep->ep_ops = conn_req->ep_ops;
775
776 status = ath10k_hif_map_service_to_pipe(htc->ar,
777 ep->service_id,
778 &ep->ul_pipe_id,
779 &ep->dl_pipe_id,
780 &ep->ul_is_polled,
781 &ep->dl_is_polled);
782 if (status)
783 return status;
784
785 ath10k_dbg(ar, ATH10K_DBG_BOOT,
786 "boot htc service '%s' ul pipe %d dl pipe %d eid %d ready\n",
787 htc_service_name(ep->service_id), ep->ul_pipe_id,
788 ep->dl_pipe_id, ep->eid);
789
790 ath10k_dbg(ar, ATH10K_DBG_BOOT,
791 "boot htc ep %d ul polled %d dl polled %d\n",
792 ep->eid, ep->ul_is_polled, ep->dl_is_polled);
793
794 if (disable_credit_flow_ctrl && ep->tx_credit_flow_enabled) {
795 ep->tx_credit_flow_enabled = false;
796 ath10k_dbg(ar, ATH10K_DBG_BOOT,
797 "boot htc service '%s' eid %d TX flow control disabled\n",
798 htc_service_name(ep->service_id), assigned_eid);
799 }
800
801 return status;
802 }
803
ath10k_htc_alloc_skb(struct ath10k * ar,int size)804 struct sk_buff *ath10k_htc_alloc_skb(struct ath10k *ar, int size)
805 {
806 struct sk_buff *skb;
807
808 skb = dev_alloc_skb(size + sizeof(struct ath10k_htc_hdr));
809 if (!skb)
810 return NULL;
811
812 skb_reserve(skb, sizeof(struct ath10k_htc_hdr));
813
814 /* FW/HTC requires 4-byte aligned streams */
815 if (!IS_ALIGNED((unsigned long)skb->data, 4))
816 ath10k_warn(ar, "Unaligned HTC tx skb\n");
817
818 return skb;
819 }
820
ath10k_htc_start(struct ath10k_htc * htc)821 int ath10k_htc_start(struct ath10k_htc *htc)
822 {
823 struct ath10k *ar = htc->ar;
824 struct sk_buff *skb;
825 int status = 0;
826 struct ath10k_htc_msg *msg;
827
828 skb = ath10k_htc_build_tx_ctrl_skb(htc->ar);
829 if (!skb)
830 return -ENOMEM;
831
832 skb_put(skb, sizeof(msg->hdr) + sizeof(msg->setup_complete_ext));
833 memset(skb->data, 0, skb->len);
834
835 msg = (struct ath10k_htc_msg *)skb->data;
836 msg->hdr.message_id =
837 __cpu_to_le16(ATH10K_HTC_MSG_SETUP_COMPLETE_EX_ID);
838
839 ath10k_dbg(ar, ATH10K_DBG_HTC, "HTC is using TX credit flow control\n");
840
841 status = ath10k_htc_send(htc, ATH10K_HTC_EP_0, skb);
842 if (status) {
843 kfree_skb(skb);
844 return status;
845 }
846
847 return 0;
848 }
849
850 /* registered target arrival callback from the HIF layer */
ath10k_htc_init(struct ath10k * ar)851 int ath10k_htc_init(struct ath10k *ar)
852 {
853 struct ath10k_hif_cb htc_callbacks;
854 struct ath10k_htc_ep *ep = NULL;
855 struct ath10k_htc *htc = &ar->htc;
856
857 spin_lock_init(&htc->tx_lock);
858
859 ath10k_htc_reset_endpoint_states(htc);
860
861 /* setup HIF layer callbacks */
862 htc_callbacks.rx_completion = ath10k_htc_rx_completion_handler;
863 htc_callbacks.tx_completion = ath10k_htc_tx_completion_handler;
864 htc->ar = ar;
865
866 /* Get HIF default pipe for HTC message exchange */
867 ep = &htc->endpoint[ATH10K_HTC_EP_0];
868
869 ath10k_hif_set_callbacks(ar, &htc_callbacks);
870 ath10k_hif_get_default_pipe(ar, &ep->ul_pipe_id, &ep->dl_pipe_id);
871
872 init_completion(&htc->ctl_resp);
873
874 return 0;
875 }
876