1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (c) 2021, MediaTek Inc.
4 * Copyright (c) 2021-2022, Intel Corporation.
5 *
6 * Authors:
7 * Haijun Liu <haijun.liu@mediatek.com>
8 * Eliot Lee <eliot.lee@intel.com>
9 * Moises Veleta <moises.veleta@intel.com>
10 * Ricardo Martinez <ricardo.martinez@linux.intel.com>
11 *
12 * Contributors:
13 * Amir Hanania <amir.hanania@intel.com>
14 * Chiranjeevi Rapolu <chiranjeevi.rapolu@intel.com>
15 * Sreehari Kancharla <sreehari.kancharla@intel.com>
16 */
17
18 #include <linux/acpi.h>
19 #include <linux/bits.h>
20 #include <linux/bitfield.h>
21 #include <linux/device.h>
22 #include <linux/delay.h>
23 #include <linux/gfp.h>
24 #include <linux/io.h>
25 #include <linux/irqreturn.h>
26 #include <linux/kthread.h>
27 #include <linux/skbuff.h>
28 #include <linux/spinlock.h>
29 #include <linux/string.h>
30 #include <linux/types.h>
31 #include <linux/wait.h>
32 #include <linux/workqueue.h>
33
34 #include "t7xx_cldma.h"
35 #include "t7xx_hif_cldma.h"
36 #include "t7xx_mhccif.h"
37 #include "t7xx_modem_ops.h"
38 #include "t7xx_netdev.h"
39 #include "t7xx_pci.h"
40 #include "t7xx_pcie_mac.h"
41 #include "t7xx_port.h"
42 #include "t7xx_port_proxy.h"
43 #include "t7xx_reg.h"
44 #include "t7xx_state_monitor.h"
45
46 #define RT_ID_MD_PORT_ENUM 0
47 #define RT_ID_AP_PORT_ENUM 1
48 /* Modem feature query identification code - "ICCC" */
49 #define MD_FEATURE_QUERY_ID 0x49434343
50
51 #define FEATURE_VER GENMASK(7, 4)
52 #define FEATURE_MSK GENMASK(3, 0)
53
54 #define RGU_RESET_DELAY_MS 10
55 #define PORT_RESET_DELAY_MS 2000
56 #define EX_HS_TIMEOUT_MS 5000
57 #define EX_HS_POLL_DELAY_MS 10
58
59 enum mtk_feature_support_type {
60 MTK_FEATURE_DOES_NOT_EXIST,
61 MTK_FEATURE_NOT_SUPPORTED,
62 MTK_FEATURE_MUST_BE_SUPPORTED,
63 };
64
t7xx_get_interrupt_status(struct t7xx_pci_dev * t7xx_dev)65 static unsigned int t7xx_get_interrupt_status(struct t7xx_pci_dev *t7xx_dev)
66 {
67 return t7xx_mhccif_read_sw_int_sts(t7xx_dev) & D2H_SW_INT_MASK;
68 }
69
70 /**
71 * t7xx_pci_mhccif_isr() - Process MHCCIF interrupts.
72 * @t7xx_dev: MTK device.
73 *
74 * Check the interrupt status and queue commands accordingly.
75 *
76 * Returns:
77 ** 0 - Success.
78 ** -EINVAL - Failure to get FSM control.
79 */
t7xx_pci_mhccif_isr(struct t7xx_pci_dev * t7xx_dev)80 int t7xx_pci_mhccif_isr(struct t7xx_pci_dev *t7xx_dev)
81 {
82 struct t7xx_modem *md = t7xx_dev->md;
83 struct t7xx_fsm_ctl *ctl;
84 unsigned int int_sta;
85 int ret = 0;
86 u32 mask;
87
88 ctl = md->fsm_ctl;
89 if (!ctl) {
90 dev_err_ratelimited(&t7xx_dev->pdev->dev,
91 "MHCCIF interrupt received before initializing MD monitor\n");
92 return -EINVAL;
93 }
94
95 spin_lock_bh(&md->exp_lock);
96 int_sta = t7xx_get_interrupt_status(t7xx_dev);
97 md->exp_id |= int_sta;
98 if (md->exp_id & D2H_INT_EXCEPTION_INIT) {
99 if (ctl->md_state == MD_STATE_INVALID ||
100 ctl->md_state == MD_STATE_WAITING_FOR_HS1 ||
101 ctl->md_state == MD_STATE_WAITING_FOR_HS2 ||
102 ctl->md_state == MD_STATE_READY) {
103 md->exp_id &= ~D2H_INT_EXCEPTION_INIT;
104 ret = t7xx_fsm_recv_md_intr(ctl, MD_IRQ_CCIF_EX);
105 }
106 } else if (md->exp_id & D2H_INT_PORT_ENUM) {
107 md->exp_id &= ~D2H_INT_PORT_ENUM;
108
109 if (ctl->curr_state == FSM_STATE_INIT || ctl->curr_state == FSM_STATE_PRE_START ||
110 ctl->curr_state == FSM_STATE_STOPPED)
111 ret = t7xx_fsm_recv_md_intr(ctl, MD_IRQ_PORT_ENUM);
112 } else if (ctl->md_state == MD_STATE_WAITING_FOR_HS1) {
113 mask = t7xx_mhccif_mask_get(t7xx_dev);
114 if ((md->exp_id & D2H_INT_ASYNC_MD_HK) && !(mask & D2H_INT_ASYNC_MD_HK)) {
115 md->exp_id &= ~D2H_INT_ASYNC_MD_HK;
116 queue_work(md->handshake_wq, &md->handshake_work);
117 }
118 }
119 spin_unlock_bh(&md->exp_lock);
120
121 return ret;
122 }
123
t7xx_clr_device_irq_via_pcie(struct t7xx_pci_dev * t7xx_dev)124 static void t7xx_clr_device_irq_via_pcie(struct t7xx_pci_dev *t7xx_dev)
125 {
126 struct t7xx_addr_base *pbase_addr = &t7xx_dev->base_addr;
127 void __iomem *reset_pcie_reg;
128 u32 val;
129
130 reset_pcie_reg = pbase_addr->pcie_ext_reg_base + TOPRGU_CH_PCIE_IRQ_STA -
131 pbase_addr->pcie_dev_reg_trsl_addr;
132 val = ioread32(reset_pcie_reg);
133 iowrite32(val, reset_pcie_reg);
134 }
135
t7xx_clear_rgu_irq(struct t7xx_pci_dev * t7xx_dev)136 void t7xx_clear_rgu_irq(struct t7xx_pci_dev *t7xx_dev)
137 {
138 /* Clear L2 */
139 t7xx_clr_device_irq_via_pcie(t7xx_dev);
140 /* Clear L1 */
141 t7xx_pcie_mac_clear_int_status(t7xx_dev, SAP_RGU_INT);
142 }
143
t7xx_acpi_reset(struct t7xx_pci_dev * t7xx_dev,char * fn_name)144 static int t7xx_acpi_reset(struct t7xx_pci_dev *t7xx_dev, char *fn_name)
145 {
146 #ifdef CONFIG_ACPI
147 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
148 struct device *dev = &t7xx_dev->pdev->dev;
149 acpi_status acpi_ret;
150 acpi_handle handle;
151
152 handle = ACPI_HANDLE(dev);
153 if (!handle) {
154 dev_err(dev, "ACPI handle not found\n");
155 return -EFAULT;
156 }
157
158 if (!acpi_has_method(handle, fn_name)) {
159 dev_err(dev, "%s method not found\n", fn_name);
160 return -EFAULT;
161 }
162
163 acpi_ret = acpi_evaluate_object(handle, fn_name, NULL, &buffer);
164 if (ACPI_FAILURE(acpi_ret)) {
165 dev_err(dev, "%s method fail: %s\n", fn_name, acpi_format_exception(acpi_ret));
166 return -EFAULT;
167 }
168
169 kfree(buffer.pointer);
170
171 #endif
172 return 0;
173 }
174
t7xx_acpi_fldr_func(struct t7xx_pci_dev * t7xx_dev)175 int t7xx_acpi_fldr_func(struct t7xx_pci_dev *t7xx_dev)
176 {
177 return t7xx_acpi_reset(t7xx_dev, "_RST");
178 }
179
t7xx_reset_device_via_pmic(struct t7xx_pci_dev * t7xx_dev)180 static void t7xx_reset_device_via_pmic(struct t7xx_pci_dev *t7xx_dev)
181 {
182 u32 val;
183
184 val = ioread32(IREG_BASE(t7xx_dev) + T7XX_PCIE_MISC_DEV_STATUS);
185 if (val & MISC_RESET_TYPE_PLDR)
186 t7xx_acpi_reset(t7xx_dev, "MRST._RST");
187 else if (val & MISC_RESET_TYPE_FLDR)
188 t7xx_acpi_fldr_func(t7xx_dev);
189 }
190
t7xx_rgu_isr_thread(int irq,void * data)191 static irqreturn_t t7xx_rgu_isr_thread(int irq, void *data)
192 {
193 struct t7xx_pci_dev *t7xx_dev = data;
194
195 msleep(RGU_RESET_DELAY_MS);
196 t7xx_reset_device_via_pmic(t7xx_dev);
197 return IRQ_HANDLED;
198 }
199
t7xx_rgu_isr_handler(int irq,void * data)200 static irqreturn_t t7xx_rgu_isr_handler(int irq, void *data)
201 {
202 struct t7xx_pci_dev *t7xx_dev = data;
203 struct t7xx_modem *modem;
204
205 t7xx_clear_rgu_irq(t7xx_dev);
206 if (!t7xx_dev->rgu_pci_irq_en)
207 return IRQ_HANDLED;
208
209 modem = t7xx_dev->md;
210 modem->rgu_irq_asserted = true;
211 t7xx_pcie_mac_clear_int(t7xx_dev, SAP_RGU_INT);
212 return IRQ_WAKE_THREAD;
213 }
214
t7xx_pcie_register_rgu_isr(struct t7xx_pci_dev * t7xx_dev)215 static void t7xx_pcie_register_rgu_isr(struct t7xx_pci_dev *t7xx_dev)
216 {
217 /* Registers RGU callback ISR with PCIe driver */
218 t7xx_pcie_mac_clear_int(t7xx_dev, SAP_RGU_INT);
219 t7xx_pcie_mac_clear_int_status(t7xx_dev, SAP_RGU_INT);
220
221 t7xx_dev->intr_handler[SAP_RGU_INT] = t7xx_rgu_isr_handler;
222 t7xx_dev->intr_thread[SAP_RGU_INT] = t7xx_rgu_isr_thread;
223 t7xx_dev->callback_param[SAP_RGU_INT] = t7xx_dev;
224 t7xx_pcie_mac_set_int(t7xx_dev, SAP_RGU_INT);
225 }
226
227 /**
228 * t7xx_cldma_exception() - CLDMA exception handler.
229 * @md_ctrl: modem control struct.
230 * @stage: exception stage.
231 *
232 * Part of the modem exception recovery.
233 * Stages are one after the other as describe below:
234 * HIF_EX_INIT: Disable and clear TXQ.
235 * HIF_EX_CLEARQ_DONE: Disable RX, flush TX/RX workqueues and clear RX.
236 * HIF_EX_ALLQ_RESET: HW is back in safe mode for re-initialization and restart.
237 */
238
239 /* Modem Exception Handshake Flow
240 *
241 * Modem HW Exception interrupt received
242 * (MD_IRQ_CCIF_EX)
243 * |
244 * +---------v--------+
245 * | HIF_EX_INIT | : Disable and clear TXQ
246 * +------------------+
247 * |
248 * +---------v--------+
249 * | HIF_EX_INIT_DONE | : Wait for the init to be done
250 * +------------------+
251 * |
252 * +---------v--------+
253 * |HIF_EX_CLEARQ_DONE| : Disable and clear RXQ
254 * +------------------+ : Flush TX/RX workqueues
255 * |
256 * +---------v--------+
257 * |HIF_EX_ALLQ_RESET | : Restart HW and CLDMA
258 * +------------------+
259 */
t7xx_cldma_exception(struct cldma_ctrl * md_ctrl,enum hif_ex_stage stage)260 static void t7xx_cldma_exception(struct cldma_ctrl *md_ctrl, enum hif_ex_stage stage)
261 {
262 switch (stage) {
263 case HIF_EX_INIT:
264 t7xx_cldma_stop_all_qs(md_ctrl, MTK_TX);
265 t7xx_cldma_clear_all_qs(md_ctrl, MTK_TX);
266 break;
267
268 case HIF_EX_CLEARQ_DONE:
269 /* We do not want to get CLDMA IRQ when MD is
270 * resetting CLDMA after it got clearq_ack.
271 */
272 t7xx_cldma_stop_all_qs(md_ctrl, MTK_RX);
273 t7xx_cldma_stop(md_ctrl);
274
275 if (md_ctrl->hif_id == CLDMA_ID_MD)
276 t7xx_cldma_hw_reset(md_ctrl->t7xx_dev->base_addr.infracfg_ao_base);
277
278 t7xx_cldma_clear_all_qs(md_ctrl, MTK_RX);
279 break;
280
281 case HIF_EX_ALLQ_RESET:
282 t7xx_cldma_hw_init(&md_ctrl->hw_info);
283 t7xx_cldma_start(md_ctrl);
284 break;
285
286 default:
287 break;
288 }
289 }
290
t7xx_md_exception(struct t7xx_modem * md,enum hif_ex_stage stage)291 static void t7xx_md_exception(struct t7xx_modem *md, enum hif_ex_stage stage)
292 {
293 struct t7xx_pci_dev *t7xx_dev = md->t7xx_dev;
294
295 if (stage == HIF_EX_CLEARQ_DONE) {
296 /* Give DHL time to flush data */
297 msleep(PORT_RESET_DELAY_MS);
298 t7xx_port_proxy_reset(md->port_prox);
299 }
300
301 t7xx_cldma_exception(md->md_ctrl[CLDMA_ID_MD], stage);
302 t7xx_cldma_exception(md->md_ctrl[CLDMA_ID_AP], stage);
303
304 if (stage == HIF_EX_INIT)
305 t7xx_mhccif_h2d_swint_trigger(t7xx_dev, H2D_CH_EXCEPTION_ACK);
306 else if (stage == HIF_EX_CLEARQ_DONE)
307 t7xx_mhccif_h2d_swint_trigger(t7xx_dev, H2D_CH_EXCEPTION_CLEARQ_ACK);
308 }
309
t7xx_wait_hif_ex_hk_event(struct t7xx_modem * md,int event_id)310 static int t7xx_wait_hif_ex_hk_event(struct t7xx_modem *md, int event_id)
311 {
312 unsigned int waited_time_ms = 0;
313
314 do {
315 if (md->exp_id & event_id)
316 return 0;
317
318 waited_time_ms += EX_HS_POLL_DELAY_MS;
319 msleep(EX_HS_POLL_DELAY_MS);
320 } while (waited_time_ms < EX_HS_TIMEOUT_MS);
321
322 return -EFAULT;
323 }
324
t7xx_md_sys_sw_init(struct t7xx_pci_dev * t7xx_dev)325 static void t7xx_md_sys_sw_init(struct t7xx_pci_dev *t7xx_dev)
326 {
327 /* Register the MHCCIF ISR for MD exception, port enum and
328 * async handshake notifications.
329 */
330 t7xx_mhccif_mask_set(t7xx_dev, D2H_SW_INT_MASK);
331 t7xx_mhccif_mask_clr(t7xx_dev, D2H_INT_PORT_ENUM);
332
333 /* Register RGU IRQ handler for sAP exception notification */
334 t7xx_dev->rgu_pci_irq_en = true;
335 t7xx_pcie_register_rgu_isr(t7xx_dev);
336 }
337
338 struct feature_query {
339 __le32 head_pattern;
340 u8 feature_set[FEATURE_COUNT];
341 __le32 tail_pattern;
342 };
343
t7xx_prepare_host_rt_data_query(struct t7xx_sys_info * core)344 static void t7xx_prepare_host_rt_data_query(struct t7xx_sys_info *core)
345 {
346 struct feature_query *ft_query;
347 struct sk_buff *skb;
348
349 skb = t7xx_ctrl_alloc_skb(sizeof(*ft_query));
350 if (!skb)
351 return;
352
353 ft_query = skb_put(skb, sizeof(*ft_query));
354 ft_query->head_pattern = cpu_to_le32(MD_FEATURE_QUERY_ID);
355 memcpy(ft_query->feature_set, core->feature_set, FEATURE_COUNT);
356 ft_query->tail_pattern = cpu_to_le32(MD_FEATURE_QUERY_ID);
357
358 /* Send HS1 message to device */
359 t7xx_port_send_ctl_skb(core->ctl_port, skb, CTL_ID_HS1_MSG, 0);
360 }
361
t7xx_prepare_device_rt_data(struct t7xx_sys_info * core,struct device * dev,void * data)362 static int t7xx_prepare_device_rt_data(struct t7xx_sys_info *core, struct device *dev,
363 void *data)
364 {
365 struct feature_query *md_feature = data;
366 struct mtk_runtime_feature *rt_feature;
367 unsigned int i, rt_data_len = 0;
368 struct sk_buff *skb;
369
370 /* Parse MD runtime data query */
371 if (le32_to_cpu(md_feature->head_pattern) != MD_FEATURE_QUERY_ID ||
372 le32_to_cpu(md_feature->tail_pattern) != MD_FEATURE_QUERY_ID) {
373 dev_err(dev, "Invalid feature pattern: head 0x%x, tail 0x%x\n",
374 le32_to_cpu(md_feature->head_pattern),
375 le32_to_cpu(md_feature->tail_pattern));
376 return -EINVAL;
377 }
378
379 for (i = 0; i < FEATURE_COUNT; i++) {
380 if (FIELD_GET(FEATURE_MSK, md_feature->feature_set[i]) !=
381 MTK_FEATURE_MUST_BE_SUPPORTED)
382 rt_data_len += sizeof(*rt_feature);
383 }
384
385 skb = t7xx_ctrl_alloc_skb(rt_data_len);
386 if (!skb)
387 return -ENOMEM;
388
389 rt_feature = skb_put(skb, rt_data_len);
390 memset(rt_feature, 0, rt_data_len);
391
392 /* Fill runtime feature */
393 for (i = 0; i < FEATURE_COUNT; i++) {
394 u8 md_feature_mask = FIELD_GET(FEATURE_MSK, md_feature->feature_set[i]);
395
396 if (md_feature_mask == MTK_FEATURE_MUST_BE_SUPPORTED)
397 continue;
398
399 rt_feature->feature_id = i;
400 if (md_feature_mask == MTK_FEATURE_DOES_NOT_EXIST)
401 rt_feature->support_info = md_feature->feature_set[i];
402
403 rt_feature++;
404 }
405
406 /* Send HS3 message to device */
407 t7xx_port_send_ctl_skb(core->ctl_port, skb, CTL_ID_HS3_MSG, 0);
408 return 0;
409 }
410
t7xx_parse_host_rt_data(struct t7xx_fsm_ctl * ctl,struct t7xx_sys_info * core,struct device * dev,void * data,int data_length)411 static int t7xx_parse_host_rt_data(struct t7xx_fsm_ctl *ctl, struct t7xx_sys_info *core,
412 struct device *dev, void *data, int data_length)
413 {
414 enum mtk_feature_support_type ft_spt_st, ft_spt_cfg;
415 struct mtk_runtime_feature *rt_feature;
416 int i, offset;
417
418 offset = sizeof(struct feature_query);
419 for (i = 0; i < FEATURE_COUNT && offset < data_length; i++) {
420 rt_feature = data + offset;
421 offset += sizeof(*rt_feature) + le32_to_cpu(rt_feature->data_len);
422
423 ft_spt_cfg = FIELD_GET(FEATURE_MSK, core->feature_set[i]);
424 if (ft_spt_cfg != MTK_FEATURE_MUST_BE_SUPPORTED)
425 continue;
426
427 ft_spt_st = FIELD_GET(FEATURE_MSK, rt_feature->support_info);
428 if (ft_spt_st != MTK_FEATURE_MUST_BE_SUPPORTED)
429 return -EINVAL;
430
431 if (i == RT_ID_MD_PORT_ENUM || i == RT_ID_AP_PORT_ENUM)
432 t7xx_port_enum_msg_handler(ctl->md, rt_feature->data);
433 }
434
435 return 0;
436 }
437
t7xx_core_reset(struct t7xx_modem * md)438 static int t7xx_core_reset(struct t7xx_modem *md)
439 {
440 struct device *dev = &md->t7xx_dev->pdev->dev;
441 struct t7xx_fsm_ctl *ctl = md->fsm_ctl;
442
443 md->core_md.ready = false;
444
445 if (!ctl) {
446 dev_err(dev, "FSM is not initialized\n");
447 return -EINVAL;
448 }
449
450 if (md->core_md.handshake_ongoing) {
451 int ret = t7xx_fsm_append_event(ctl, FSM_EVENT_MD_HS2_EXIT, NULL, 0);
452
453 if (ret)
454 return ret;
455 }
456
457 md->core_md.handshake_ongoing = false;
458 return 0;
459 }
460
t7xx_core_hk_handler(struct t7xx_modem * md,struct t7xx_sys_info * core_info,struct t7xx_fsm_ctl * ctl,enum t7xx_fsm_event_state event_id,enum t7xx_fsm_event_state err_detect)461 static void t7xx_core_hk_handler(struct t7xx_modem *md, struct t7xx_sys_info *core_info,
462 struct t7xx_fsm_ctl *ctl,
463 enum t7xx_fsm_event_state event_id,
464 enum t7xx_fsm_event_state err_detect)
465 {
466 struct t7xx_fsm_event *event = NULL, *event_next;
467 struct device *dev = &md->t7xx_dev->pdev->dev;
468 unsigned long flags;
469 int ret;
470
471 t7xx_prepare_host_rt_data_query(core_info);
472
473 while (!kthread_should_stop()) {
474 bool event_received = false;
475
476 spin_lock_irqsave(&ctl->event_lock, flags);
477 list_for_each_entry_safe(event, event_next, &ctl->event_queue, entry) {
478 if (event->event_id == err_detect) {
479 list_del(&event->entry);
480 spin_unlock_irqrestore(&ctl->event_lock, flags);
481 dev_err(dev, "Core handshake error event received\n");
482 goto err_free_event;
483 } else if (event->event_id == event_id) {
484 list_del(&event->entry);
485 event_received = true;
486 break;
487 }
488 }
489 spin_unlock_irqrestore(&ctl->event_lock, flags);
490
491 if (event_received)
492 break;
493
494 wait_event_interruptible(ctl->event_wq, !list_empty(&ctl->event_queue) ||
495 kthread_should_stop());
496 if (kthread_should_stop())
497 goto err_free_event;
498 }
499
500 if (!event || ctl->exp_flg)
501 goto err_free_event;
502
503 ret = t7xx_parse_host_rt_data(ctl, core_info, dev, event->data, event->length);
504 if (ret) {
505 dev_err(dev, "Host failure parsing runtime data: %d\n", ret);
506 goto err_free_event;
507 }
508
509 if (ctl->exp_flg)
510 goto err_free_event;
511
512 ret = t7xx_prepare_device_rt_data(core_info, dev, event->data);
513 if (ret) {
514 dev_err(dev, "Device failure parsing runtime data: %d", ret);
515 goto err_free_event;
516 }
517
518 core_info->ready = true;
519 core_info->handshake_ongoing = false;
520 wake_up(&ctl->async_hk_wq);
521 err_free_event:
522 kfree(event);
523 }
524
t7xx_md_hk_wq(struct work_struct * work)525 static void t7xx_md_hk_wq(struct work_struct *work)
526 {
527 struct t7xx_modem *md = container_of(work, struct t7xx_modem, handshake_work);
528 struct t7xx_fsm_ctl *ctl = md->fsm_ctl;
529
530 /* Clear the HS2 EXIT event appended in core_reset() */
531 t7xx_fsm_clr_event(ctl, FSM_EVENT_MD_HS2_EXIT);
532 t7xx_cldma_switch_cfg(md->md_ctrl[CLDMA_ID_MD]);
533 t7xx_cldma_start(md->md_ctrl[CLDMA_ID_MD]);
534 t7xx_fsm_broadcast_state(ctl, MD_STATE_WAITING_FOR_HS2);
535 md->core_md.handshake_ongoing = true;
536 t7xx_core_hk_handler(md, &md->core_md, ctl, FSM_EVENT_MD_HS2, FSM_EVENT_MD_HS2_EXIT);
537 }
538
t7xx_ap_hk_wq(struct work_struct * work)539 static void t7xx_ap_hk_wq(struct work_struct *work)
540 {
541 struct t7xx_modem *md = container_of(work, struct t7xx_modem, ap_handshake_work);
542 struct t7xx_fsm_ctl *ctl = md->fsm_ctl;
543
544 /* Clear the HS2 EXIT event appended in t7xx_core_reset(). */
545 t7xx_fsm_clr_event(ctl, FSM_EVENT_AP_HS2_EXIT);
546 t7xx_cldma_stop(md->md_ctrl[CLDMA_ID_AP]);
547 t7xx_cldma_switch_cfg(md->md_ctrl[CLDMA_ID_AP]);
548 t7xx_cldma_start(md->md_ctrl[CLDMA_ID_AP]);
549 md->core_ap.handshake_ongoing = true;
550 t7xx_core_hk_handler(md, &md->core_ap, ctl, FSM_EVENT_AP_HS2, FSM_EVENT_AP_HS2_EXIT);
551 }
552
t7xx_md_event_notify(struct t7xx_modem * md,enum md_event_id evt_id)553 void t7xx_md_event_notify(struct t7xx_modem *md, enum md_event_id evt_id)
554 {
555 struct t7xx_fsm_ctl *ctl = md->fsm_ctl;
556 unsigned int int_sta;
557 unsigned long flags;
558
559 switch (evt_id) {
560 case FSM_PRE_START:
561 t7xx_mhccif_mask_clr(md->t7xx_dev, D2H_INT_PORT_ENUM | D2H_INT_ASYNC_MD_HK |
562 D2H_INT_ASYNC_AP_HK);
563 break;
564
565 case FSM_START:
566 t7xx_mhccif_mask_set(md->t7xx_dev, D2H_INT_PORT_ENUM);
567
568 spin_lock_irqsave(&md->exp_lock, flags);
569 int_sta = t7xx_get_interrupt_status(md->t7xx_dev);
570 md->exp_id |= int_sta;
571 if (md->exp_id & D2H_INT_EXCEPTION_INIT) {
572 ctl->exp_flg = true;
573 md->exp_id &= ~D2H_INT_EXCEPTION_INIT;
574 md->exp_id &= ~D2H_INT_ASYNC_MD_HK;
575 md->exp_id &= ~D2H_INT_ASYNC_AP_HK;
576 } else if (ctl->exp_flg) {
577 md->exp_id &= ~D2H_INT_ASYNC_MD_HK;
578 md->exp_id &= ~D2H_INT_ASYNC_AP_HK;
579 } else {
580 void __iomem *mhccif_base = md->t7xx_dev->base_addr.mhccif_rc_base;
581
582 if (md->exp_id & D2H_INT_ASYNC_MD_HK) {
583 queue_work(md->handshake_wq, &md->handshake_work);
584 md->exp_id &= ~D2H_INT_ASYNC_MD_HK;
585 iowrite32(D2H_INT_ASYNC_MD_HK, mhccif_base + REG_EP2RC_SW_INT_ACK);
586 t7xx_mhccif_mask_set(md->t7xx_dev, D2H_INT_ASYNC_MD_HK);
587 }
588
589 if (md->exp_id & D2H_INT_ASYNC_AP_HK) {
590 queue_work(md->handshake_wq, &md->ap_handshake_work);
591 md->exp_id &= ~D2H_INT_ASYNC_AP_HK;
592 iowrite32(D2H_INT_ASYNC_AP_HK, mhccif_base + REG_EP2RC_SW_INT_ACK);
593 t7xx_mhccif_mask_set(md->t7xx_dev, D2H_INT_ASYNC_AP_HK);
594 }
595 }
596 spin_unlock_irqrestore(&md->exp_lock, flags);
597
598 t7xx_mhccif_mask_clr(md->t7xx_dev,
599 D2H_INT_EXCEPTION_INIT |
600 D2H_INT_EXCEPTION_INIT_DONE |
601 D2H_INT_EXCEPTION_CLEARQ_DONE |
602 D2H_INT_EXCEPTION_ALLQ_RESET);
603 break;
604
605 case FSM_READY:
606 t7xx_mhccif_mask_set(md->t7xx_dev, D2H_INT_ASYNC_MD_HK);
607 t7xx_mhccif_mask_set(md->t7xx_dev, D2H_INT_ASYNC_AP_HK);
608 break;
609
610 default:
611 break;
612 }
613 }
614
t7xx_md_exception_handshake(struct t7xx_modem * md)615 void t7xx_md_exception_handshake(struct t7xx_modem *md)
616 {
617 struct device *dev = &md->t7xx_dev->pdev->dev;
618 int ret;
619
620 t7xx_md_exception(md, HIF_EX_INIT);
621 ret = t7xx_wait_hif_ex_hk_event(md, D2H_INT_EXCEPTION_INIT_DONE);
622 if (ret)
623 dev_err(dev, "EX CCIF HS timeout, RCH 0x%lx\n", D2H_INT_EXCEPTION_INIT_DONE);
624
625 t7xx_md_exception(md, HIF_EX_INIT_DONE);
626 ret = t7xx_wait_hif_ex_hk_event(md, D2H_INT_EXCEPTION_CLEARQ_DONE);
627 if (ret)
628 dev_err(dev, "EX CCIF HS timeout, RCH 0x%lx\n", D2H_INT_EXCEPTION_CLEARQ_DONE);
629
630 t7xx_md_exception(md, HIF_EX_CLEARQ_DONE);
631 ret = t7xx_wait_hif_ex_hk_event(md, D2H_INT_EXCEPTION_ALLQ_RESET);
632 if (ret)
633 dev_err(dev, "EX CCIF HS timeout, RCH 0x%lx\n", D2H_INT_EXCEPTION_ALLQ_RESET);
634
635 t7xx_md_exception(md, HIF_EX_ALLQ_RESET);
636 }
637
t7xx_md_alloc(struct t7xx_pci_dev * t7xx_dev)638 static struct t7xx_modem *t7xx_md_alloc(struct t7xx_pci_dev *t7xx_dev)
639 {
640 struct device *dev = &t7xx_dev->pdev->dev;
641 struct t7xx_modem *md;
642
643 md = devm_kzalloc(dev, sizeof(*md), GFP_KERNEL);
644 if (!md)
645 return NULL;
646
647 md->t7xx_dev = t7xx_dev;
648 t7xx_dev->md = md;
649 spin_lock_init(&md->exp_lock);
650 md->handshake_wq = alloc_workqueue("%s", WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_HIGHPRI,
651 0, "md_hk_wq");
652 if (!md->handshake_wq)
653 return NULL;
654
655 INIT_WORK(&md->handshake_work, t7xx_md_hk_wq);
656 md->core_md.feature_set[RT_ID_MD_PORT_ENUM] &= ~FEATURE_MSK;
657 md->core_md.feature_set[RT_ID_MD_PORT_ENUM] |=
658 FIELD_PREP(FEATURE_MSK, MTK_FEATURE_MUST_BE_SUPPORTED);
659
660 INIT_WORK(&md->ap_handshake_work, t7xx_ap_hk_wq);
661 md->core_ap.feature_set[RT_ID_AP_PORT_ENUM] &= ~FEATURE_MSK;
662 md->core_ap.feature_set[RT_ID_AP_PORT_ENUM] |=
663 FIELD_PREP(FEATURE_MSK, MTK_FEATURE_MUST_BE_SUPPORTED);
664
665 return md;
666 }
667
t7xx_md_reset(struct t7xx_pci_dev * t7xx_dev)668 int t7xx_md_reset(struct t7xx_pci_dev *t7xx_dev)
669 {
670 struct t7xx_modem *md = t7xx_dev->md;
671
672 md->md_init_finish = false;
673 md->exp_id = 0;
674 t7xx_fsm_reset(md);
675 t7xx_cldma_reset(md->md_ctrl[CLDMA_ID_MD]);
676 t7xx_cldma_reset(md->md_ctrl[CLDMA_ID_AP]);
677 t7xx_port_proxy_reset(md->port_prox);
678 md->md_init_finish = true;
679 return t7xx_core_reset(md);
680 }
681
682 /**
683 * t7xx_md_init() - Initialize modem.
684 * @t7xx_dev: MTK device.
685 *
686 * Allocate and initialize MD control block, and initialize data path.
687 * Register MHCCIF ISR and RGU ISR, and start the state machine.
688 *
689 * Return:
690 ** 0 - Success.
691 ** -ENOMEM - Allocation failure.
692 */
t7xx_md_init(struct t7xx_pci_dev * t7xx_dev)693 int t7xx_md_init(struct t7xx_pci_dev *t7xx_dev)
694 {
695 struct t7xx_modem *md;
696 int ret;
697
698 md = t7xx_md_alloc(t7xx_dev);
699 if (!md)
700 return -ENOMEM;
701
702 ret = t7xx_cldma_alloc(CLDMA_ID_MD, t7xx_dev);
703 if (ret)
704 goto err_destroy_hswq;
705
706 ret = t7xx_cldma_alloc(CLDMA_ID_AP, t7xx_dev);
707 if (ret)
708 goto err_destroy_hswq;
709
710 ret = t7xx_fsm_init(md);
711 if (ret)
712 goto err_destroy_hswq;
713
714 ret = t7xx_ccmni_init(t7xx_dev);
715 if (ret)
716 goto err_uninit_fsm;
717
718 ret = t7xx_cldma_init(md->md_ctrl[CLDMA_ID_MD]);
719 if (ret)
720 goto err_uninit_ccmni;
721
722 ret = t7xx_cldma_init(md->md_ctrl[CLDMA_ID_AP]);
723 if (ret)
724 goto err_uninit_md_cldma;
725
726 ret = t7xx_port_proxy_init(md);
727 if (ret)
728 goto err_uninit_ap_cldma;
729
730 ret = t7xx_fsm_append_cmd(md->fsm_ctl, FSM_CMD_START, 0);
731 if (ret) /* t7xx_fsm_uninit() flushes cmd queue */
732 goto err_uninit_proxy;
733
734 t7xx_md_sys_sw_init(t7xx_dev);
735 md->md_init_finish = true;
736 return 0;
737
738 err_uninit_proxy:
739 t7xx_port_proxy_uninit(md->port_prox);
740
741 err_uninit_ap_cldma:
742 t7xx_cldma_exit(md->md_ctrl[CLDMA_ID_AP]);
743
744 err_uninit_md_cldma:
745 t7xx_cldma_exit(md->md_ctrl[CLDMA_ID_MD]);
746
747 err_uninit_ccmni:
748 t7xx_ccmni_exit(t7xx_dev);
749
750 err_uninit_fsm:
751 t7xx_fsm_uninit(md);
752
753 err_destroy_hswq:
754 destroy_workqueue(md->handshake_wq);
755 dev_err(&t7xx_dev->pdev->dev, "Modem init failed\n");
756 return ret;
757 }
758
t7xx_md_exit(struct t7xx_pci_dev * t7xx_dev)759 void t7xx_md_exit(struct t7xx_pci_dev *t7xx_dev)
760 {
761 struct t7xx_modem *md = t7xx_dev->md;
762
763 t7xx_pcie_mac_clear_int(t7xx_dev, SAP_RGU_INT);
764
765 if (!md->md_init_finish)
766 return;
767
768 t7xx_fsm_append_cmd(md->fsm_ctl, FSM_CMD_PRE_STOP, FSM_CMD_FLAG_WAIT_FOR_COMPLETION);
769 t7xx_port_proxy_uninit(md->port_prox);
770 t7xx_cldma_exit(md->md_ctrl[CLDMA_ID_AP]);
771 t7xx_cldma_exit(md->md_ctrl[CLDMA_ID_MD]);
772 t7xx_ccmni_exit(t7xx_dev);
773 t7xx_fsm_uninit(md);
774 destroy_workqueue(md->handshake_wq);
775 }
776