• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (c) 2021, MediaTek Inc.
4  * Copyright (c) 2021-2022, Intel Corporation.
5  *
6  * Authors:
7  *  Haijun Liu <haijun.liu@mediatek.com>
8  *  Eliot Lee <eliot.lee@intel.com>
9  *  Moises Veleta <moises.veleta@intel.com>
10  *  Ricardo Martinez <ricardo.martinez@linux.intel.com>
11  *
12  * Contributors:
13  *  Amir Hanania <amir.hanania@intel.com>
14  *  Sreehari Kancharla <sreehari.kancharla@intel.com>
15  */
16 
17 #include <linux/bits.h>
18 #include <linux/bitfield.h>
19 #include <linux/completion.h>
20 #include <linux/device.h>
21 #include <linux/delay.h>
22 #include <linux/err.h>
23 #include <linux/gfp.h>
24 #include <linux/iopoll.h>
25 #include <linux/jiffies.h>
26 #include <linux/kernel.h>
27 #include <linux/kthread.h>
28 #include <linux/list.h>
29 #include <linux/slab.h>
30 #include <linux/spinlock.h>
31 #include <linux/string.h>
32 #include <linux/types.h>
33 #include <linux/wait.h>
34 
35 #include "t7xx_hif_cldma.h"
36 #include "t7xx_mhccif.h"
37 #include "t7xx_modem_ops.h"
38 #include "t7xx_pci.h"
39 #include "t7xx_pcie_mac.h"
40 #include "t7xx_port_proxy.h"
41 #include "t7xx_reg.h"
42 #include "t7xx_state_monitor.h"
43 
44 #define FSM_DRM_DISABLE_DELAY_MS		200
45 #define FSM_EVENT_POLL_INTERVAL_MS		20
46 #define FSM_MD_EX_REC_OK_TIMEOUT_MS		10000
47 #define FSM_MD_EX_PASS_TIMEOUT_MS		45000
48 #define FSM_CMD_TIMEOUT_MS			2000
49 
t7xx_fsm_notifier_register(struct t7xx_modem * md,struct t7xx_fsm_notifier * notifier)50 void t7xx_fsm_notifier_register(struct t7xx_modem *md, struct t7xx_fsm_notifier *notifier)
51 {
52 	struct t7xx_fsm_ctl *ctl = md->fsm_ctl;
53 	unsigned long flags;
54 
55 	spin_lock_irqsave(&ctl->notifier_lock, flags);
56 	list_add_tail(&notifier->entry, &ctl->notifier_list);
57 	spin_unlock_irqrestore(&ctl->notifier_lock, flags);
58 }
59 
t7xx_fsm_notifier_unregister(struct t7xx_modem * md,struct t7xx_fsm_notifier * notifier)60 void t7xx_fsm_notifier_unregister(struct t7xx_modem *md, struct t7xx_fsm_notifier *notifier)
61 {
62 	struct t7xx_fsm_notifier *notifier_cur, *notifier_next;
63 	struct t7xx_fsm_ctl *ctl = md->fsm_ctl;
64 	unsigned long flags;
65 
66 	spin_lock_irqsave(&ctl->notifier_lock, flags);
67 	list_for_each_entry_safe(notifier_cur, notifier_next, &ctl->notifier_list, entry) {
68 		if (notifier_cur == notifier)
69 			list_del(&notifier->entry);
70 	}
71 	spin_unlock_irqrestore(&ctl->notifier_lock, flags);
72 }
73 
fsm_state_notify(struct t7xx_modem * md,enum md_state state)74 static void fsm_state_notify(struct t7xx_modem *md, enum md_state state)
75 {
76 	struct t7xx_fsm_ctl *ctl = md->fsm_ctl;
77 	struct t7xx_fsm_notifier *notifier;
78 	unsigned long flags;
79 
80 	spin_lock_irqsave(&ctl->notifier_lock, flags);
81 	list_for_each_entry(notifier, &ctl->notifier_list, entry) {
82 		spin_unlock_irqrestore(&ctl->notifier_lock, flags);
83 		if (notifier->notifier_fn)
84 			notifier->notifier_fn(state, notifier->data);
85 
86 		spin_lock_irqsave(&ctl->notifier_lock, flags);
87 	}
88 	spin_unlock_irqrestore(&ctl->notifier_lock, flags);
89 }
90 
t7xx_fsm_broadcast_state(struct t7xx_fsm_ctl * ctl,enum md_state state)91 void t7xx_fsm_broadcast_state(struct t7xx_fsm_ctl *ctl, enum md_state state)
92 {
93 	ctl->md_state = state;
94 
95 	/* Update to port first, otherwise sending message on HS2 may fail */
96 	t7xx_port_proxy_md_status_notify(ctl->md->port_prox, state);
97 	fsm_state_notify(ctl->md, state);
98 }
99 
fsm_finish_command(struct t7xx_fsm_ctl * ctl,struct t7xx_fsm_command * cmd,int result)100 static void fsm_finish_command(struct t7xx_fsm_ctl *ctl, struct t7xx_fsm_command *cmd, int result)
101 {
102 	if (cmd->flag & FSM_CMD_FLAG_WAIT_FOR_COMPLETION) {
103 		*cmd->ret = result;
104 		complete_all(cmd->done);
105 	}
106 
107 	kfree(cmd);
108 }
109 
fsm_del_kf_event(struct t7xx_fsm_event * event)110 static void fsm_del_kf_event(struct t7xx_fsm_event *event)
111 {
112 	list_del(&event->entry);
113 	kfree(event);
114 }
115 
fsm_flush_event_cmd_qs(struct t7xx_fsm_ctl * ctl)116 static void fsm_flush_event_cmd_qs(struct t7xx_fsm_ctl *ctl)
117 {
118 	struct device *dev = &ctl->md->t7xx_dev->pdev->dev;
119 	struct t7xx_fsm_event *event, *evt_next;
120 	struct t7xx_fsm_command *cmd, *cmd_next;
121 	unsigned long flags;
122 
123 	spin_lock_irqsave(&ctl->command_lock, flags);
124 	list_for_each_entry_safe(cmd, cmd_next, &ctl->command_queue, entry) {
125 		dev_warn(dev, "Unhandled command %d\n", cmd->cmd_id);
126 		list_del(&cmd->entry);
127 		fsm_finish_command(ctl, cmd, -EINVAL);
128 	}
129 	spin_unlock_irqrestore(&ctl->command_lock, flags);
130 
131 	spin_lock_irqsave(&ctl->event_lock, flags);
132 	list_for_each_entry_safe(event, evt_next, &ctl->event_queue, entry) {
133 		dev_warn(dev, "Unhandled event %d\n", event->event_id);
134 		fsm_del_kf_event(event);
135 	}
136 	spin_unlock_irqrestore(&ctl->event_lock, flags);
137 }
138 
fsm_wait_for_event(struct t7xx_fsm_ctl * ctl,enum t7xx_fsm_event_state event_expected,enum t7xx_fsm_event_state event_ignore,int retries)139 static void fsm_wait_for_event(struct t7xx_fsm_ctl *ctl, enum t7xx_fsm_event_state event_expected,
140 			       enum t7xx_fsm_event_state event_ignore, int retries)
141 {
142 	struct t7xx_fsm_event *event;
143 	bool event_received = false;
144 	unsigned long flags;
145 	int cnt = 0;
146 
147 	while (cnt++ < retries && !event_received) {
148 		bool sleep_required = true;
149 
150 		if (kthread_should_stop())
151 			return;
152 
153 		spin_lock_irqsave(&ctl->event_lock, flags);
154 		event = list_first_entry_or_null(&ctl->event_queue, struct t7xx_fsm_event, entry);
155 		if (event) {
156 			event_received = event->event_id == event_expected;
157 			if (event_received || event->event_id == event_ignore) {
158 				fsm_del_kf_event(event);
159 				sleep_required = false;
160 			}
161 		}
162 		spin_unlock_irqrestore(&ctl->event_lock, flags);
163 
164 		if (sleep_required)
165 			msleep(FSM_EVENT_POLL_INTERVAL_MS);
166 	}
167 }
168 
fsm_routine_exception(struct t7xx_fsm_ctl * ctl,struct t7xx_fsm_command * cmd,enum t7xx_ex_reason reason)169 static void fsm_routine_exception(struct t7xx_fsm_ctl *ctl, struct t7xx_fsm_command *cmd,
170 				  enum t7xx_ex_reason reason)
171 {
172 	struct device *dev = &ctl->md->t7xx_dev->pdev->dev;
173 
174 	if (ctl->curr_state != FSM_STATE_READY && ctl->curr_state != FSM_STATE_STARTING) {
175 		if (cmd)
176 			fsm_finish_command(ctl, cmd, -EINVAL);
177 
178 		return;
179 	}
180 
181 	ctl->curr_state = FSM_STATE_EXCEPTION;
182 
183 	switch (reason) {
184 	case EXCEPTION_HS_TIMEOUT:
185 		dev_err(dev, "Boot Handshake failure\n");
186 		break;
187 
188 	case EXCEPTION_EVENT:
189 		dev_err(dev, "Exception event\n");
190 		t7xx_fsm_broadcast_state(ctl, MD_STATE_EXCEPTION);
191 		t7xx_pci_pm_exp_detected(ctl->md->t7xx_dev);
192 		t7xx_md_exception_handshake(ctl->md);
193 
194 		fsm_wait_for_event(ctl, FSM_EVENT_MD_EX_REC_OK, FSM_EVENT_MD_EX,
195 				   FSM_MD_EX_REC_OK_TIMEOUT_MS / FSM_EVENT_POLL_INTERVAL_MS);
196 		fsm_wait_for_event(ctl, FSM_EVENT_MD_EX_PASS, FSM_EVENT_INVALID,
197 				   FSM_MD_EX_PASS_TIMEOUT_MS / FSM_EVENT_POLL_INTERVAL_MS);
198 		break;
199 
200 	default:
201 		dev_err(dev, "Exception %d\n", reason);
202 		break;
203 	}
204 
205 	if (cmd)
206 		fsm_finish_command(ctl, cmd, 0);
207 }
208 
fsm_stopped_handler(struct t7xx_fsm_ctl * ctl)209 static int fsm_stopped_handler(struct t7xx_fsm_ctl *ctl)
210 {
211 	ctl->curr_state = FSM_STATE_STOPPED;
212 
213 	t7xx_fsm_broadcast_state(ctl, MD_STATE_STOPPED);
214 	return t7xx_md_reset(ctl->md->t7xx_dev);
215 }
216 
fsm_routine_stopped(struct t7xx_fsm_ctl * ctl,struct t7xx_fsm_command * cmd)217 static void fsm_routine_stopped(struct t7xx_fsm_ctl *ctl, struct t7xx_fsm_command *cmd)
218 {
219 	if (ctl->curr_state == FSM_STATE_STOPPED) {
220 		fsm_finish_command(ctl, cmd, -EINVAL);
221 		return;
222 	}
223 
224 	fsm_finish_command(ctl, cmd, fsm_stopped_handler(ctl));
225 }
226 
fsm_routine_stopping(struct t7xx_fsm_ctl * ctl,struct t7xx_fsm_command * cmd)227 static void fsm_routine_stopping(struct t7xx_fsm_ctl *ctl, struct t7xx_fsm_command *cmd)
228 {
229 	struct t7xx_pci_dev *t7xx_dev;
230 	struct cldma_ctrl *md_ctrl;
231 	int err;
232 
233 	if (ctl->curr_state == FSM_STATE_STOPPED || ctl->curr_state == FSM_STATE_STOPPING) {
234 		fsm_finish_command(ctl, cmd, -EINVAL);
235 		return;
236 	}
237 
238 	md_ctrl = ctl->md->md_ctrl[CLDMA_ID_MD];
239 	t7xx_dev = ctl->md->t7xx_dev;
240 
241 	ctl->curr_state = FSM_STATE_STOPPING;
242 	t7xx_fsm_broadcast_state(ctl, MD_STATE_WAITING_TO_STOP);
243 	t7xx_cldma_stop(md_ctrl);
244 
245 	if (!ctl->md->rgu_irq_asserted) {
246 		t7xx_mhccif_h2d_swint_trigger(t7xx_dev, H2D_CH_DRM_DISABLE_AP);
247 		/* Wait for the DRM disable to take effect */
248 		msleep(FSM_DRM_DISABLE_DELAY_MS);
249 
250 		err = t7xx_acpi_fldr_func(t7xx_dev);
251 		if (err)
252 			t7xx_mhccif_h2d_swint_trigger(t7xx_dev, H2D_CH_DEVICE_RESET);
253 	}
254 
255 	fsm_finish_command(ctl, cmd, fsm_stopped_handler(ctl));
256 }
257 
t7xx_fsm_broadcast_ready_state(struct t7xx_fsm_ctl * ctl)258 static void t7xx_fsm_broadcast_ready_state(struct t7xx_fsm_ctl *ctl)
259 {
260 	if (ctl->md_state != MD_STATE_WAITING_FOR_HS2)
261 		return;
262 
263 	ctl->md_state = MD_STATE_READY;
264 
265 	fsm_state_notify(ctl->md, MD_STATE_READY);
266 	t7xx_port_proxy_md_status_notify(ctl->md->port_prox, MD_STATE_READY);
267 }
268 
fsm_routine_ready(struct t7xx_fsm_ctl * ctl)269 static void fsm_routine_ready(struct t7xx_fsm_ctl *ctl)
270 {
271 	struct t7xx_modem *md = ctl->md;
272 
273 	ctl->curr_state = FSM_STATE_READY;
274 	t7xx_fsm_broadcast_ready_state(ctl);
275 	t7xx_md_event_notify(md, FSM_READY);
276 }
277 
fsm_routine_starting(struct t7xx_fsm_ctl * ctl)278 static int fsm_routine_starting(struct t7xx_fsm_ctl *ctl)
279 {
280 	struct t7xx_modem *md = ctl->md;
281 	struct device *dev;
282 
283 	ctl->curr_state = FSM_STATE_STARTING;
284 
285 	t7xx_fsm_broadcast_state(ctl, MD_STATE_WAITING_FOR_HS1);
286 	t7xx_md_event_notify(md, FSM_START);
287 
288 	wait_event_interruptible_timeout(ctl->async_hk_wq, md->core_md.ready || ctl->exp_flg,
289 					 HZ * 60);
290 	dev = &md->t7xx_dev->pdev->dev;
291 
292 	if (ctl->exp_flg)
293 		dev_err(dev, "MD exception is captured during handshake\n");
294 
295 	if (!md->core_md.ready) {
296 		dev_err(dev, "MD handshake timeout\n");
297 		if (md->core_md.handshake_ongoing)
298 			t7xx_fsm_append_event(ctl, FSM_EVENT_MD_HS2_EXIT, NULL, 0);
299 
300 		fsm_routine_exception(ctl, NULL, EXCEPTION_HS_TIMEOUT);
301 		return -ETIMEDOUT;
302 	}
303 
304 	t7xx_pci_pm_init_late(md->t7xx_dev);
305 	fsm_routine_ready(ctl);
306 	return 0;
307 }
308 
fsm_routine_start(struct t7xx_fsm_ctl * ctl,struct t7xx_fsm_command * cmd)309 static void fsm_routine_start(struct t7xx_fsm_ctl *ctl, struct t7xx_fsm_command *cmd)
310 {
311 	struct t7xx_modem *md = ctl->md;
312 	u32 dev_status;
313 	int ret;
314 
315 	if (!md)
316 		return;
317 
318 	if (ctl->curr_state != FSM_STATE_INIT && ctl->curr_state != FSM_STATE_PRE_START &&
319 	    ctl->curr_state != FSM_STATE_STOPPED) {
320 		fsm_finish_command(ctl, cmd, -EINVAL);
321 		return;
322 	}
323 
324 	ctl->curr_state = FSM_STATE_PRE_START;
325 	t7xx_md_event_notify(md, FSM_PRE_START);
326 
327 	ret = read_poll_timeout(ioread32, dev_status,
328 				(dev_status & MISC_STAGE_MASK) == LINUX_STAGE, 20000, 2000000,
329 				false, IREG_BASE(md->t7xx_dev) + T7XX_PCIE_MISC_DEV_STATUS);
330 	if (ret) {
331 		struct device *dev = &md->t7xx_dev->pdev->dev;
332 
333 		fsm_finish_command(ctl, cmd, -ETIMEDOUT);
334 		dev_err(dev, "Invalid device status 0x%lx\n", dev_status & MISC_STAGE_MASK);
335 		return;
336 	}
337 
338 	t7xx_cldma_hif_hw_init(md->md_ctrl[CLDMA_ID_MD]);
339 	fsm_finish_command(ctl, cmd, fsm_routine_starting(ctl));
340 }
341 
fsm_main_thread(void * data)342 static int fsm_main_thread(void *data)
343 {
344 	struct t7xx_fsm_ctl *ctl = data;
345 	struct t7xx_fsm_command *cmd;
346 	unsigned long flags;
347 
348 	while (!kthread_should_stop()) {
349 		if (wait_event_interruptible(ctl->command_wq, !list_empty(&ctl->command_queue) ||
350 					     kthread_should_stop()))
351 			continue;
352 
353 		if (kthread_should_stop())
354 			break;
355 
356 		spin_lock_irqsave(&ctl->command_lock, flags);
357 		cmd = list_first_entry(&ctl->command_queue, struct t7xx_fsm_command, entry);
358 		list_del(&cmd->entry);
359 		spin_unlock_irqrestore(&ctl->command_lock, flags);
360 
361 		switch (cmd->cmd_id) {
362 		case FSM_CMD_START:
363 			fsm_routine_start(ctl, cmd);
364 			break;
365 
366 		case FSM_CMD_EXCEPTION:
367 			fsm_routine_exception(ctl, cmd, FIELD_GET(FSM_CMD_EX_REASON, cmd->flag));
368 			break;
369 
370 		case FSM_CMD_PRE_STOP:
371 			fsm_routine_stopping(ctl, cmd);
372 			break;
373 
374 		case FSM_CMD_STOP:
375 			fsm_routine_stopped(ctl, cmd);
376 			break;
377 
378 		default:
379 			fsm_finish_command(ctl, cmd, -EINVAL);
380 			fsm_flush_event_cmd_qs(ctl);
381 			break;
382 		}
383 	}
384 
385 	return 0;
386 }
387 
t7xx_fsm_append_cmd(struct t7xx_fsm_ctl * ctl,enum t7xx_fsm_cmd_state cmd_id,unsigned int flag)388 int t7xx_fsm_append_cmd(struct t7xx_fsm_ctl *ctl, enum t7xx_fsm_cmd_state cmd_id, unsigned int flag)
389 {
390 	DECLARE_COMPLETION_ONSTACK(done);
391 	struct t7xx_fsm_command *cmd;
392 	unsigned long flags;
393 	int ret;
394 
395 	cmd = kzalloc(sizeof(*cmd), flag & FSM_CMD_FLAG_IN_INTERRUPT ? GFP_ATOMIC : GFP_KERNEL);
396 	if (!cmd)
397 		return -ENOMEM;
398 
399 	INIT_LIST_HEAD(&cmd->entry);
400 	cmd->cmd_id = cmd_id;
401 	cmd->flag = flag;
402 	if (flag & FSM_CMD_FLAG_WAIT_FOR_COMPLETION) {
403 		cmd->done = &done;
404 		cmd->ret = &ret;
405 	}
406 
407 	spin_lock_irqsave(&ctl->command_lock, flags);
408 	list_add_tail(&cmd->entry, &ctl->command_queue);
409 	spin_unlock_irqrestore(&ctl->command_lock, flags);
410 
411 	wake_up(&ctl->command_wq);
412 
413 	if (flag & FSM_CMD_FLAG_WAIT_FOR_COMPLETION) {
414 		unsigned long wait_ret;
415 
416 		wait_ret = wait_for_completion_timeout(&done,
417 						       msecs_to_jiffies(FSM_CMD_TIMEOUT_MS));
418 		if (!wait_ret)
419 			return -ETIMEDOUT;
420 
421 		return ret;
422 	}
423 
424 	return 0;
425 }
426 
t7xx_fsm_append_event(struct t7xx_fsm_ctl * ctl,enum t7xx_fsm_event_state event_id,unsigned char * data,unsigned int length)427 int t7xx_fsm_append_event(struct t7xx_fsm_ctl *ctl, enum t7xx_fsm_event_state event_id,
428 			  unsigned char *data, unsigned int length)
429 {
430 	struct device *dev = &ctl->md->t7xx_dev->pdev->dev;
431 	struct t7xx_fsm_event *event;
432 	unsigned long flags;
433 
434 	if (event_id <= FSM_EVENT_INVALID || event_id >= FSM_EVENT_MAX) {
435 		dev_err(dev, "Invalid event %d\n", event_id);
436 		return -EINVAL;
437 	}
438 
439 	event = kmalloc(sizeof(*event) + length, in_interrupt() ? GFP_ATOMIC : GFP_KERNEL);
440 	if (!event)
441 		return -ENOMEM;
442 
443 	INIT_LIST_HEAD(&event->entry);
444 	event->event_id = event_id;
445 	event->length = length;
446 
447 	if (data && length)
448 		memcpy(event->data, data, length);
449 
450 	spin_lock_irqsave(&ctl->event_lock, flags);
451 	list_add_tail(&event->entry, &ctl->event_queue);
452 	spin_unlock_irqrestore(&ctl->event_lock, flags);
453 
454 	wake_up_all(&ctl->event_wq);
455 	return 0;
456 }
457 
t7xx_fsm_clr_event(struct t7xx_fsm_ctl * ctl,enum t7xx_fsm_event_state event_id)458 void t7xx_fsm_clr_event(struct t7xx_fsm_ctl *ctl, enum t7xx_fsm_event_state event_id)
459 {
460 	struct t7xx_fsm_event *event, *evt_next;
461 	unsigned long flags;
462 
463 	spin_lock_irqsave(&ctl->event_lock, flags);
464 	list_for_each_entry_safe(event, evt_next, &ctl->event_queue, entry) {
465 		if (event->event_id == event_id)
466 			fsm_del_kf_event(event);
467 	}
468 	spin_unlock_irqrestore(&ctl->event_lock, flags);
469 }
470 
t7xx_fsm_get_md_state(struct t7xx_fsm_ctl * ctl)471 enum md_state t7xx_fsm_get_md_state(struct t7xx_fsm_ctl *ctl)
472 {
473 	if (ctl)
474 		return ctl->md_state;
475 
476 	return MD_STATE_INVALID;
477 }
478 
t7xx_fsm_get_ctl_state(struct t7xx_fsm_ctl * ctl)479 unsigned int t7xx_fsm_get_ctl_state(struct t7xx_fsm_ctl *ctl)
480 {
481 	if (ctl)
482 		return ctl->curr_state;
483 
484 	return FSM_STATE_STOPPED;
485 }
486 
t7xx_fsm_recv_md_intr(struct t7xx_fsm_ctl * ctl,enum t7xx_md_irq_type type)487 int t7xx_fsm_recv_md_intr(struct t7xx_fsm_ctl *ctl, enum t7xx_md_irq_type type)
488 {
489 	unsigned int cmd_flags = FSM_CMD_FLAG_IN_INTERRUPT;
490 
491 	if (type == MD_IRQ_PORT_ENUM) {
492 		return t7xx_fsm_append_cmd(ctl, FSM_CMD_START, cmd_flags);
493 	} else if (type == MD_IRQ_CCIF_EX) {
494 		ctl->exp_flg = true;
495 		wake_up(&ctl->async_hk_wq);
496 		cmd_flags |= FIELD_PREP(FSM_CMD_EX_REASON, EXCEPTION_EVENT);
497 		return t7xx_fsm_append_cmd(ctl, FSM_CMD_EXCEPTION, cmd_flags);
498 	}
499 
500 	return -EINVAL;
501 }
502 
t7xx_fsm_reset(struct t7xx_modem * md)503 void t7xx_fsm_reset(struct t7xx_modem *md)
504 {
505 	struct t7xx_fsm_ctl *ctl = md->fsm_ctl;
506 
507 	fsm_flush_event_cmd_qs(ctl);
508 	ctl->curr_state = FSM_STATE_STOPPED;
509 	ctl->exp_flg = false;
510 }
511 
t7xx_fsm_init(struct t7xx_modem * md)512 int t7xx_fsm_init(struct t7xx_modem *md)
513 {
514 	struct device *dev = &md->t7xx_dev->pdev->dev;
515 	struct t7xx_fsm_ctl *ctl;
516 
517 	ctl = devm_kzalloc(dev, sizeof(*ctl), GFP_KERNEL);
518 	if (!ctl)
519 		return -ENOMEM;
520 
521 	md->fsm_ctl = ctl;
522 	ctl->md = md;
523 	ctl->curr_state = FSM_STATE_INIT;
524 	INIT_LIST_HEAD(&ctl->command_queue);
525 	INIT_LIST_HEAD(&ctl->event_queue);
526 	init_waitqueue_head(&ctl->async_hk_wq);
527 	init_waitqueue_head(&ctl->event_wq);
528 	INIT_LIST_HEAD(&ctl->notifier_list);
529 	init_waitqueue_head(&ctl->command_wq);
530 	spin_lock_init(&ctl->event_lock);
531 	spin_lock_init(&ctl->command_lock);
532 	ctl->exp_flg = false;
533 	spin_lock_init(&ctl->notifier_lock);
534 
535 	ctl->fsm_thread = kthread_run(fsm_main_thread, ctl, "t7xx_fsm");
536 	return PTR_ERR_OR_ZERO(ctl->fsm_thread);
537 }
538 
t7xx_fsm_uninit(struct t7xx_modem * md)539 void t7xx_fsm_uninit(struct t7xx_modem *md)
540 {
541 	struct t7xx_fsm_ctl *ctl = md->fsm_ctl;
542 
543 	if (!ctl)
544 		return;
545 
546 	if (ctl->fsm_thread)
547 		kthread_stop(ctl->fsm_thread);
548 
549 	fsm_flush_event_cmd_qs(ctl);
550 }
551