1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) 2020-21 Intel Corporation.
4 */
5
6 #include <linux/delay.h>
7
8 #include "iosm_ipc_chnl_cfg.h"
9 #include "iosm_ipc_devlink.h"
10 #include "iosm_ipc_flash.h"
11 #include "iosm_ipc_imem.h"
12 #include "iosm_ipc_port.h"
13 #include "iosm_ipc_trace.h"
14 #include "iosm_ipc_debugfs.h"
15
16 /* Check the wwan ips if it is valid with Channel as input. */
ipc_imem_check_wwan_ips(struct ipc_mem_channel * chnl)17 static int ipc_imem_check_wwan_ips(struct ipc_mem_channel *chnl)
18 {
19 if (chnl)
20 return chnl->ctype == IPC_CTYPE_WWAN &&
21 chnl->if_id == IPC_MEM_MUX_IP_CH_IF_ID;
22 return false;
23 }
24
ipc_imem_msg_send_device_sleep(struct iosm_imem * ipc_imem,u32 state)25 static int ipc_imem_msg_send_device_sleep(struct iosm_imem *ipc_imem, u32 state)
26 {
27 union ipc_msg_prep_args prep_args = {
28 .sleep.target = 1,
29 .sleep.state = state,
30 };
31
32 ipc_imem->device_sleep = state;
33
34 return ipc_protocol_tq_msg_send(ipc_imem->ipc_protocol,
35 IPC_MSG_PREP_SLEEP, &prep_args, NULL);
36 }
37
ipc_imem_dl_skb_alloc(struct iosm_imem * ipc_imem,struct ipc_pipe * pipe)38 static bool ipc_imem_dl_skb_alloc(struct iosm_imem *ipc_imem,
39 struct ipc_pipe *pipe)
40 {
41 /* limit max. nr of entries */
42 if (pipe->nr_of_queued_entries >= pipe->max_nr_of_queued_entries)
43 return false;
44
45 return ipc_protocol_dl_td_prepare(ipc_imem->ipc_protocol, pipe);
46 }
47
48 /* This timer handler will retry DL buff allocation if a pipe has no free buf
49 * and gives doorbell if TD is available
50 */
ipc_imem_tq_td_alloc_timer(struct iosm_imem * ipc_imem,int arg,void * msg,size_t size)51 static int ipc_imem_tq_td_alloc_timer(struct iosm_imem *ipc_imem, int arg,
52 void *msg, size_t size)
53 {
54 bool new_buffers_available = false;
55 bool retry_allocation = false;
56 int i;
57
58 for (i = 0; i < IPC_MEM_MAX_CHANNELS; i++) {
59 struct ipc_pipe *pipe = &ipc_imem->channels[i].dl_pipe;
60
61 if (!pipe->is_open || pipe->nr_of_queued_entries > 0)
62 continue;
63
64 while (ipc_imem_dl_skb_alloc(ipc_imem, pipe))
65 new_buffers_available = true;
66
67 if (pipe->nr_of_queued_entries == 0)
68 retry_allocation = true;
69 }
70
71 if (new_buffers_available)
72 ipc_protocol_doorbell_trigger(ipc_imem->ipc_protocol,
73 IPC_HP_DL_PROCESS);
74
75 if (retry_allocation) {
76 ipc_imem->hrtimer_period =
77 ktime_set(0, IPC_TD_ALLOC_TIMER_PERIOD_MS * 1000 * 1000ULL);
78 if (!hrtimer_active(&ipc_imem->td_alloc_timer))
79 hrtimer_start(&ipc_imem->td_alloc_timer,
80 ipc_imem->hrtimer_period,
81 HRTIMER_MODE_REL);
82 }
83 return 0;
84 }
85
ipc_imem_td_alloc_timer_cb(struct hrtimer * hr_timer)86 static enum hrtimer_restart ipc_imem_td_alloc_timer_cb(struct hrtimer *hr_timer)
87 {
88 struct iosm_imem *ipc_imem =
89 container_of(hr_timer, struct iosm_imem, td_alloc_timer);
90 /* Post an async tasklet event to trigger HP update Doorbell */
91 ipc_task_queue_send_task(ipc_imem, ipc_imem_tq_td_alloc_timer, 0, NULL,
92 0, false);
93 return HRTIMER_NORESTART;
94 }
95
96 /* Fast update timer tasklet handler to trigger HP update */
ipc_imem_tq_fast_update_timer_cb(struct iosm_imem * ipc_imem,int arg,void * msg,size_t size)97 static int ipc_imem_tq_fast_update_timer_cb(struct iosm_imem *ipc_imem, int arg,
98 void *msg, size_t size)
99 {
100 ipc_protocol_doorbell_trigger(ipc_imem->ipc_protocol,
101 IPC_HP_FAST_TD_UPD_TMR);
102
103 return 0;
104 }
105
106 static enum hrtimer_restart
ipc_imem_fast_update_timer_cb(struct hrtimer * hr_timer)107 ipc_imem_fast_update_timer_cb(struct hrtimer *hr_timer)
108 {
109 struct iosm_imem *ipc_imem =
110 container_of(hr_timer, struct iosm_imem, fast_update_timer);
111 /* Post an async tasklet event to trigger HP update Doorbell */
112 ipc_task_queue_send_task(ipc_imem, ipc_imem_tq_fast_update_timer_cb, 0,
113 NULL, 0, false);
114 return HRTIMER_NORESTART;
115 }
116
ipc_imem_tq_adb_timer_cb(struct iosm_imem * ipc_imem,int arg,void * msg,size_t size)117 static int ipc_imem_tq_adb_timer_cb(struct iosm_imem *ipc_imem, int arg,
118 void *msg, size_t size)
119 {
120 ipc_mux_ul_adb_finish(ipc_imem->mux);
121 return 0;
122 }
123
124 static enum hrtimer_restart
ipc_imem_adb_timer_cb(struct hrtimer * hr_timer)125 ipc_imem_adb_timer_cb(struct hrtimer *hr_timer)
126 {
127 struct iosm_imem *ipc_imem =
128 container_of(hr_timer, struct iosm_imem, adb_timer);
129
130 ipc_task_queue_send_task(ipc_imem, ipc_imem_tq_adb_timer_cb, 0,
131 NULL, 0, false);
132 return HRTIMER_NORESTART;
133 }
134
ipc_imem_setup_cp_mux_cap_init(struct iosm_imem * ipc_imem,struct ipc_mux_config * cfg)135 static int ipc_imem_setup_cp_mux_cap_init(struct iosm_imem *ipc_imem,
136 struct ipc_mux_config *cfg)
137 {
138 ipc_mmio_update_cp_capability(ipc_imem->mmio);
139
140 if (ipc_imem->mmio->mux_protocol == MUX_UNKNOWN) {
141 dev_err(ipc_imem->dev, "Failed to get Mux capability.");
142 return -EINVAL;
143 }
144
145 cfg->protocol = ipc_imem->mmio->mux_protocol;
146
147 cfg->ul_flow = (ipc_imem->mmio->has_ul_flow_credit == 1) ?
148 MUX_UL_ON_CREDITS :
149 MUX_UL;
150
151 /* The instance ID is same as channel ID because this is been reused
152 * for channel alloc function.
153 */
154 cfg->instance_id = IPC_MEM_MUX_IP_CH_IF_ID;
155
156 return 0;
157 }
158
ipc_imem_msg_send_feature_set(struct iosm_imem * ipc_imem,unsigned int reset_enable,bool atomic_ctx)159 void ipc_imem_msg_send_feature_set(struct iosm_imem *ipc_imem,
160 unsigned int reset_enable, bool atomic_ctx)
161 {
162 union ipc_msg_prep_args prep_args = { .feature_set.reset_enable =
163 reset_enable };
164
165 if (atomic_ctx)
166 ipc_protocol_tq_msg_send(ipc_imem->ipc_protocol,
167 IPC_MSG_PREP_FEATURE_SET, &prep_args,
168 NULL);
169 else
170 ipc_protocol_msg_send(ipc_imem->ipc_protocol,
171 IPC_MSG_PREP_FEATURE_SET, &prep_args);
172 }
173
174 /**
175 * ipc_imem_td_update_timer_start - Starts the TD Update Timer if not started.
176 * @ipc_imem: Pointer to imem data-struct
177 */
ipc_imem_td_update_timer_start(struct iosm_imem * ipc_imem)178 void ipc_imem_td_update_timer_start(struct iosm_imem *ipc_imem)
179 {
180 /* Use the TD update timer only in the runtime phase */
181 if (!ipc_imem->enter_runtime || ipc_imem->td_update_timer_suspended) {
182 /* trigger the doorbell irq on CP directly. */
183 ipc_protocol_doorbell_trigger(ipc_imem->ipc_protocol,
184 IPC_HP_TD_UPD_TMR_START);
185 return;
186 }
187
188 if (!hrtimer_active(&ipc_imem->tdupdate_timer)) {
189 ipc_imem->hrtimer_period =
190 ktime_set(0, TD_UPDATE_DEFAULT_TIMEOUT_USEC * 1000ULL);
191 if (!hrtimer_active(&ipc_imem->tdupdate_timer))
192 hrtimer_start(&ipc_imem->tdupdate_timer,
193 ipc_imem->hrtimer_period,
194 HRTIMER_MODE_REL);
195 }
196 }
197
ipc_imem_hrtimer_stop(struct hrtimer * hr_timer)198 void ipc_imem_hrtimer_stop(struct hrtimer *hr_timer)
199 {
200 if (hrtimer_active(hr_timer))
201 hrtimer_cancel(hr_timer);
202 }
203
204 /**
205 * ipc_imem_adb_timer_start - Starts the adb Timer if not starting.
206 * @ipc_imem: Pointer to imem data-struct
207 */
ipc_imem_adb_timer_start(struct iosm_imem * ipc_imem)208 void ipc_imem_adb_timer_start(struct iosm_imem *ipc_imem)
209 {
210 if (!hrtimer_active(&ipc_imem->adb_timer)) {
211 ipc_imem->hrtimer_period =
212 ktime_set(0, IOSM_AGGR_MUX_ADB_FINISH_TIMEOUT_NSEC);
213 hrtimer_start(&ipc_imem->adb_timer,
214 ipc_imem->hrtimer_period,
215 HRTIMER_MODE_REL);
216 }
217 }
218
ipc_imem_ul_write_td(struct iosm_imem * ipc_imem)219 bool ipc_imem_ul_write_td(struct iosm_imem *ipc_imem)
220 {
221 struct ipc_mem_channel *channel;
222 bool hpda_ctrl_pending = false;
223 struct sk_buff_head *ul_list;
224 bool hpda_pending = false;
225 struct ipc_pipe *pipe;
226 int i;
227
228 /* Analyze the uplink pipe of all active channels. */
229 for (i = 0; i < ipc_imem->nr_of_channels; i++) {
230 channel = &ipc_imem->channels[i];
231
232 if (channel->state != IMEM_CHANNEL_ACTIVE)
233 continue;
234
235 pipe = &channel->ul_pipe;
236
237 /* Get the reference to the skbuf accumulator list. */
238 ul_list = &channel->ul_list;
239
240 /* Fill the transfer descriptor with the uplink buffer info. */
241 if (!ipc_imem_check_wwan_ips(channel)) {
242 hpda_ctrl_pending |=
243 ipc_protocol_ul_td_send(ipc_imem->ipc_protocol,
244 pipe, ul_list);
245 } else {
246 hpda_pending |=
247 ipc_protocol_ul_td_send(ipc_imem->ipc_protocol,
248 pipe, ul_list);
249 }
250 }
251
252 /* forced HP update needed for non data channels */
253 if (hpda_ctrl_pending) {
254 hpda_pending = false;
255 ipc_protocol_doorbell_trigger(ipc_imem->ipc_protocol,
256 IPC_HP_UL_WRITE_TD);
257 }
258
259 return hpda_pending;
260 }
261
ipc_imem_ipc_init_check(struct iosm_imem * ipc_imem)262 void ipc_imem_ipc_init_check(struct iosm_imem *ipc_imem)
263 {
264 int timeout = IPC_MODEM_BOOT_TIMEOUT;
265
266 ipc_imem->ipc_requested_state = IPC_MEM_DEVICE_IPC_INIT;
267
268 /* Trigger the CP interrupt to enter the init state. */
269 ipc_doorbell_fire(ipc_imem->pcie, IPC_DOORBELL_IRQ_IPC,
270 IPC_MEM_DEVICE_IPC_INIT);
271 /* Wait for the CP update. */
272 do {
273 if (ipc_mmio_get_ipc_state(ipc_imem->mmio) ==
274 ipc_imem->ipc_requested_state) {
275 /* Prepare the MMIO space */
276 ipc_mmio_config(ipc_imem->mmio);
277
278 /* Trigger the CP irq to enter the running state. */
279 ipc_imem->ipc_requested_state =
280 IPC_MEM_DEVICE_IPC_RUNNING;
281 ipc_doorbell_fire(ipc_imem->pcie, IPC_DOORBELL_IRQ_IPC,
282 IPC_MEM_DEVICE_IPC_RUNNING);
283
284 return;
285 }
286 msleep(20);
287 } while (--timeout);
288
289 /* timeout */
290 dev_err(ipc_imem->dev, "%s: ipc_status(%d) ne. IPC_MEM_DEVICE_IPC_INIT",
291 ipc_imem_phase_get_string(ipc_imem->phase),
292 ipc_mmio_get_ipc_state(ipc_imem->mmio));
293
294 ipc_uevent_send(ipc_imem->dev, UEVENT_MDM_TIMEOUT);
295 }
296
297 /* Analyze the packet type and distribute it. */
ipc_imem_dl_skb_process(struct iosm_imem * ipc_imem,struct ipc_pipe * pipe,struct sk_buff * skb)298 static void ipc_imem_dl_skb_process(struct iosm_imem *ipc_imem,
299 struct ipc_pipe *pipe, struct sk_buff *skb)
300 {
301 u16 port_id;
302
303 if (!skb)
304 return;
305
306 /* An AT/control or IP packet is expected. */
307 switch (pipe->channel->ctype) {
308 case IPC_CTYPE_CTRL:
309 port_id = pipe->channel->channel_id;
310 ipc_pcie_addr_unmap(ipc_imem->pcie, IPC_CB(skb)->len,
311 IPC_CB(skb)->mapping,
312 IPC_CB(skb)->direction);
313 if (port_id == IPC_MEM_CTRL_CHL_ID_7)
314 ipc_imem_sys_devlink_notify_rx(ipc_imem->ipc_devlink,
315 skb);
316 else if (ipc_is_trace_channel(ipc_imem, port_id))
317 ipc_trace_port_rx(ipc_imem, skb);
318 else
319 wwan_port_rx(ipc_imem->ipc_port[port_id]->iosm_port,
320 skb);
321 break;
322
323 case IPC_CTYPE_WWAN:
324 if (pipe->channel->if_id == IPC_MEM_MUX_IP_CH_IF_ID)
325 ipc_mux_dl_decode(ipc_imem->mux, skb);
326 break;
327 default:
328 dev_err(ipc_imem->dev, "Invalid channel type");
329 break;
330 }
331 }
332
333 /* Process the downlink data and pass them to the char or net layer. */
ipc_imem_dl_pipe_process(struct iosm_imem * ipc_imem,struct ipc_pipe * pipe)334 static void ipc_imem_dl_pipe_process(struct iosm_imem *ipc_imem,
335 struct ipc_pipe *pipe)
336 {
337 s32 cnt = 0, processed_td_cnt = 0;
338 struct ipc_mem_channel *channel;
339 u32 head = 0, tail = 0;
340 bool processed = false;
341 struct sk_buff *skb;
342
343 channel = pipe->channel;
344
345 ipc_protocol_get_head_tail_index(ipc_imem->ipc_protocol, pipe, &head,
346 &tail);
347 if (pipe->old_tail != tail) {
348 if (pipe->old_tail < tail)
349 cnt = tail - pipe->old_tail;
350 else
351 cnt = pipe->nr_of_entries - pipe->old_tail + tail;
352 }
353
354 processed_td_cnt = cnt;
355
356 /* Seek for pipes with pending DL data. */
357 while (cnt--) {
358 skb = ipc_protocol_dl_td_process(ipc_imem->ipc_protocol, pipe);
359
360 /* Analyze the packet type and distribute it. */
361 ipc_imem_dl_skb_process(ipc_imem, pipe, skb);
362 }
363
364 /* try to allocate new empty DL SKbs from head..tail - 1*/
365 while (ipc_imem_dl_skb_alloc(ipc_imem, pipe))
366 processed = true;
367
368 if (processed && !ipc_imem_check_wwan_ips(channel)) {
369 /* Force HP update for non IP channels */
370 ipc_protocol_doorbell_trigger(ipc_imem->ipc_protocol,
371 IPC_HP_DL_PROCESS);
372 processed = false;
373
374 /* If Fast Update timer is already running then stop */
375 ipc_imem_hrtimer_stop(&ipc_imem->fast_update_timer);
376 }
377
378 /* Any control channel process will get immediate HP update.
379 * Start Fast update timer only for IP channel if all the TDs were
380 * used in last process.
381 */
382 if (processed && (processed_td_cnt == pipe->nr_of_entries - 1)) {
383 ipc_imem->hrtimer_period =
384 ktime_set(0, FORCE_UPDATE_DEFAULT_TIMEOUT_USEC * 1000ULL);
385 hrtimer_start(&ipc_imem->fast_update_timer,
386 ipc_imem->hrtimer_period, HRTIMER_MODE_REL);
387 }
388
389 if (ipc_imem->app_notify_dl_pend)
390 complete(&ipc_imem->dl_pend_sem);
391 }
392
393 /* process open uplink pipe */
ipc_imem_ul_pipe_process(struct iosm_imem * ipc_imem,struct ipc_pipe * pipe)394 static void ipc_imem_ul_pipe_process(struct iosm_imem *ipc_imem,
395 struct ipc_pipe *pipe)
396 {
397 struct ipc_mem_channel *channel;
398 u32 tail = 0, head = 0;
399 struct sk_buff *skb;
400 s32 cnt = 0;
401
402 channel = pipe->channel;
403
404 /* Get the internal phase. */
405 ipc_protocol_get_head_tail_index(ipc_imem->ipc_protocol, pipe, &head,
406 &tail);
407
408 if (pipe->old_tail != tail) {
409 if (pipe->old_tail < tail)
410 cnt = tail - pipe->old_tail;
411 else
412 cnt = pipe->nr_of_entries - pipe->old_tail + tail;
413 }
414
415 /* Free UL buffers. */
416 while (cnt--) {
417 skb = ipc_protocol_ul_td_process(ipc_imem->ipc_protocol, pipe);
418
419 if (!skb)
420 continue;
421
422 /* If the user app was suspended in uplink direction - blocking
423 * write, resume it.
424 */
425 if (IPC_CB(skb)->op_type == UL_USR_OP_BLOCKED)
426 complete(&channel->ul_sem);
427
428 /* Free the skbuf element. */
429 if (IPC_CB(skb)->op_type == UL_MUX_OP_ADB) {
430 if (channel->if_id == IPC_MEM_MUX_IP_CH_IF_ID)
431 ipc_mux_ul_encoded_process(ipc_imem->mux, skb);
432 else
433 dev_err(ipc_imem->dev,
434 "OP Type is UL_MUX, unknown if_id %d",
435 channel->if_id);
436 } else {
437 ipc_pcie_kfree_skb(ipc_imem->pcie, skb);
438 }
439 }
440
441 /* Trace channel stats for IP UL pipe. */
442 if (ipc_imem_check_wwan_ips(pipe->channel))
443 ipc_mux_check_n_restart_tx(ipc_imem->mux);
444
445 if (ipc_imem->app_notify_ul_pend)
446 complete(&ipc_imem->ul_pend_sem);
447 }
448
449 /* Executes the irq. */
ipc_imem_rom_irq_exec(struct iosm_imem * ipc_imem)450 static void ipc_imem_rom_irq_exec(struct iosm_imem *ipc_imem)
451 {
452 struct ipc_mem_channel *channel;
453
454 channel = ipc_imem->ipc_devlink->devlink_sio.channel;
455 ipc_imem->rom_exit_code = ipc_mmio_get_rom_exit_code(ipc_imem->mmio);
456 complete(&channel->ul_sem);
457 }
458
459 /* Execute the UL bundle timer actions, generating the doorbell irq. */
ipc_imem_tq_td_update_timer_cb(struct iosm_imem * ipc_imem,int arg,void * msg,size_t size)460 static int ipc_imem_tq_td_update_timer_cb(struct iosm_imem *ipc_imem, int arg,
461 void *msg, size_t size)
462 {
463 ipc_protocol_doorbell_trigger(ipc_imem->ipc_protocol,
464 IPC_HP_TD_UPD_TMR);
465 return 0;
466 }
467
468 /* Consider link power management in the runtime phase. */
ipc_imem_slp_control_exec(struct iosm_imem * ipc_imem)469 static void ipc_imem_slp_control_exec(struct iosm_imem *ipc_imem)
470 {
471 /* link will go down, Test pending UL packets.*/
472 if (ipc_protocol_pm_dev_sleep_handle(ipc_imem->ipc_protocol) &&
473 hrtimer_active(&ipc_imem->tdupdate_timer)) {
474 /* Generate the doorbell irq. */
475 ipc_imem_tq_td_update_timer_cb(ipc_imem, 0, NULL, 0);
476 /* Stop the TD update timer. */
477 ipc_imem_hrtimer_stop(&ipc_imem->tdupdate_timer);
478 /* Stop the fast update timer. */
479 ipc_imem_hrtimer_stop(&ipc_imem->fast_update_timer);
480 }
481 }
482
483 /* Execute startup timer and wait for delayed start (e.g. NAND) */
ipc_imem_tq_startup_timer_cb(struct iosm_imem * ipc_imem,int arg,void * msg,size_t size)484 static int ipc_imem_tq_startup_timer_cb(struct iosm_imem *ipc_imem, int arg,
485 void *msg, size_t size)
486 {
487 /* Update & check the current operation phase. */
488 if (ipc_imem_phase_update(ipc_imem) != IPC_P_RUN)
489 return -EIO;
490
491 if (ipc_mmio_get_ipc_state(ipc_imem->mmio) ==
492 IPC_MEM_DEVICE_IPC_UNINIT) {
493 ipc_imem->ipc_requested_state = IPC_MEM_DEVICE_IPC_INIT;
494
495 ipc_doorbell_fire(ipc_imem->pcie, IPC_DOORBELL_IRQ_IPC,
496 IPC_MEM_DEVICE_IPC_INIT);
497
498 ipc_imem->hrtimer_period = ktime_set(0, 100 * 1000UL * 1000ULL);
499 /* reduce period to 100 ms to check for mmio init state */
500 if (!hrtimer_active(&ipc_imem->startup_timer))
501 hrtimer_start(&ipc_imem->startup_timer,
502 ipc_imem->hrtimer_period,
503 HRTIMER_MODE_REL);
504 } else if (ipc_mmio_get_ipc_state(ipc_imem->mmio) ==
505 IPC_MEM_DEVICE_IPC_INIT) {
506 /* Startup complete - disable timer */
507 ipc_imem_hrtimer_stop(&ipc_imem->startup_timer);
508
509 /* Prepare the MMIO space */
510 ipc_mmio_config(ipc_imem->mmio);
511 ipc_imem->ipc_requested_state = IPC_MEM_DEVICE_IPC_RUNNING;
512 ipc_doorbell_fire(ipc_imem->pcie, IPC_DOORBELL_IRQ_IPC,
513 IPC_MEM_DEVICE_IPC_RUNNING);
514 }
515
516 return 0;
517 }
518
ipc_imem_startup_timer_cb(struct hrtimer * hr_timer)519 static enum hrtimer_restart ipc_imem_startup_timer_cb(struct hrtimer *hr_timer)
520 {
521 enum hrtimer_restart result = HRTIMER_NORESTART;
522 struct iosm_imem *ipc_imem =
523 container_of(hr_timer, struct iosm_imem, startup_timer);
524
525 if (ktime_to_ns(ipc_imem->hrtimer_period)) {
526 hrtimer_forward_now(&ipc_imem->startup_timer,
527 ipc_imem->hrtimer_period);
528 result = HRTIMER_RESTART;
529 }
530
531 ipc_task_queue_send_task(ipc_imem, ipc_imem_tq_startup_timer_cb, 0,
532 NULL, 0, false);
533 return result;
534 }
535
536 /* Get the CP execution stage */
537 static enum ipc_mem_exec_stage
ipc_imem_get_exec_stage_buffered(struct iosm_imem * ipc_imem)538 ipc_imem_get_exec_stage_buffered(struct iosm_imem *ipc_imem)
539 {
540 return (ipc_imem->phase == IPC_P_RUN &&
541 ipc_imem->ipc_status == IPC_MEM_DEVICE_IPC_RUNNING) ?
542 ipc_protocol_get_ap_exec_stage(ipc_imem->ipc_protocol) :
543 ipc_mmio_get_exec_stage(ipc_imem->mmio);
544 }
545
546 /* Callback to send the modem ready uevent */
ipc_imem_send_mdm_rdy_cb(struct iosm_imem * ipc_imem,int arg,void * msg,size_t size)547 static int ipc_imem_send_mdm_rdy_cb(struct iosm_imem *ipc_imem, int arg,
548 void *msg, size_t size)
549 {
550 enum ipc_mem_exec_stage exec_stage =
551 ipc_imem_get_exec_stage_buffered(ipc_imem);
552
553 if (exec_stage == IPC_MEM_EXEC_STAGE_RUN)
554 ipc_uevent_send(ipc_imem->dev, UEVENT_MDM_READY);
555
556 return 0;
557 }
558
559 /* This function is executed in a task context via an ipc_worker object,
560 * as the creation or removal of device can't be done from tasklet.
561 */
ipc_imem_run_state_worker(struct work_struct * instance)562 static void ipc_imem_run_state_worker(struct work_struct *instance)
563 {
564 struct ipc_chnl_cfg chnl_cfg_port = { 0 };
565 struct ipc_mux_config mux_cfg;
566 struct iosm_imem *ipc_imem;
567 u8 ctrl_chl_idx = 0;
568 int ret;
569
570 ipc_imem = container_of(instance, struct iosm_imem, run_state_worker);
571
572 if (ipc_imem->phase != IPC_P_RUN) {
573 dev_err(ipc_imem->dev,
574 "Modem link down. Exit run state worker.");
575 goto err_out;
576 }
577
578 if (test_and_clear_bit(IOSM_DEVLINK_INIT, &ipc_imem->flag))
579 ipc_devlink_deinit(ipc_imem->ipc_devlink);
580
581 ret = ipc_imem_setup_cp_mux_cap_init(ipc_imem, &mux_cfg);
582 if (ret < 0)
583 goto err_out;
584
585 ipc_imem->mux = ipc_mux_init(&mux_cfg, ipc_imem);
586 if (!ipc_imem->mux)
587 goto err_out;
588
589 ret = ipc_imem_wwan_channel_init(ipc_imem, mux_cfg.protocol);
590 if (ret < 0)
591 goto err_ipc_mux_deinit;
592
593 ipc_imem->mux->wwan = ipc_imem->wwan;
594
595 while (ctrl_chl_idx < IPC_MEM_MAX_CHANNELS) {
596 if (!ipc_chnl_cfg_get(&chnl_cfg_port, ctrl_chl_idx)) {
597 ipc_imem->ipc_port[ctrl_chl_idx] = NULL;
598 if (ipc_imem->pcie->pci->device == INTEL_CP_DEVICE_7360_ID &&
599 chnl_cfg_port.wwan_port_type == WWAN_PORT_MBIM) {
600 ctrl_chl_idx++;
601 continue;
602 }
603 if (chnl_cfg_port.wwan_port_type != WWAN_PORT_UNKNOWN) {
604 ipc_imem_channel_init(ipc_imem, IPC_CTYPE_CTRL,
605 chnl_cfg_port,
606 IRQ_MOD_OFF);
607 ipc_imem->ipc_port[ctrl_chl_idx] =
608 ipc_port_init(ipc_imem, chnl_cfg_port);
609 }
610 }
611 ctrl_chl_idx++;
612 }
613
614 ipc_debugfs_init(ipc_imem);
615
616 ipc_task_queue_send_task(ipc_imem, ipc_imem_send_mdm_rdy_cb, 0, NULL, 0,
617 false);
618
619 /* Complete all memory stores before setting bit */
620 smp_mb__before_atomic();
621
622 set_bit(FULLY_FUNCTIONAL, &ipc_imem->flag);
623
624 /* Complete all memory stores after setting bit */
625 smp_mb__after_atomic();
626
627 return;
628
629 err_ipc_mux_deinit:
630 ipc_mux_deinit(ipc_imem->mux);
631 err_out:
632 ipc_uevent_send(ipc_imem->dev, UEVENT_CD_READY_LINK_DOWN);
633 }
634
ipc_imem_handle_irq(struct iosm_imem * ipc_imem,int irq)635 static void ipc_imem_handle_irq(struct iosm_imem *ipc_imem, int irq)
636 {
637 enum ipc_mem_device_ipc_state curr_ipc_status;
638 enum ipc_phase old_phase, phase;
639 bool retry_allocation = false;
640 bool ul_pending = false;
641 int i;
642
643 if (irq != IMEM_IRQ_DONT_CARE)
644 ipc_imem->ev_irq_pending[irq] = false;
645
646 /* Get the internal phase. */
647 old_phase = ipc_imem->phase;
648
649 if (old_phase == IPC_P_OFF_REQ) {
650 dev_dbg(ipc_imem->dev,
651 "[%s]: Ignoring MSI. Deinit sequence in progress!",
652 ipc_imem_phase_get_string(old_phase));
653 return;
654 }
655
656 /* Update the phase controlled by CP. */
657 phase = ipc_imem_phase_update(ipc_imem);
658
659 switch (phase) {
660 case IPC_P_RUN:
661 if (!ipc_imem->enter_runtime) {
662 /* Excute the transition from flash/boot to runtime. */
663 ipc_imem->enter_runtime = 1;
664
665 /* allow device to sleep, default value is
666 * IPC_HOST_SLEEP_ENTER_SLEEP
667 */
668 ipc_imem_msg_send_device_sleep(ipc_imem,
669 ipc_imem->device_sleep);
670
671 ipc_imem_msg_send_feature_set(ipc_imem,
672 IPC_MEM_INBAND_CRASH_SIG,
673 true);
674 }
675
676 curr_ipc_status =
677 ipc_protocol_get_ipc_status(ipc_imem->ipc_protocol);
678
679 /* check ipc_status change */
680 if (ipc_imem->ipc_status != curr_ipc_status) {
681 ipc_imem->ipc_status = curr_ipc_status;
682
683 if (ipc_imem->ipc_status ==
684 IPC_MEM_DEVICE_IPC_RUNNING) {
685 schedule_work(&ipc_imem->run_state_worker);
686 }
687 }
688
689 /* Consider power management in the runtime phase. */
690 ipc_imem_slp_control_exec(ipc_imem);
691 break; /* Continue with skbuf processing. */
692
693 /* Unexpected phases. */
694 case IPC_P_OFF:
695 case IPC_P_OFF_REQ:
696 dev_err(ipc_imem->dev, "confused phase %s",
697 ipc_imem_phase_get_string(phase));
698 return;
699
700 case IPC_P_PSI:
701 if (old_phase != IPC_P_ROM)
702 break;
703
704 fallthrough;
705 /* On CP the PSI phase is already active. */
706
707 case IPC_P_ROM:
708 /* Before CP ROM driver starts the PSI image, it sets
709 * the exit_code field on the doorbell scratchpad and
710 * triggers the irq.
711 */
712 ipc_imem_rom_irq_exec(ipc_imem);
713 return;
714
715 default:
716 break;
717 }
718
719 /* process message ring */
720 ipc_protocol_msg_process(ipc_imem, irq);
721
722 /* process all open pipes */
723 for (i = 0; i < IPC_MEM_MAX_CHANNELS; i++) {
724 struct ipc_pipe *ul_pipe = &ipc_imem->channels[i].ul_pipe;
725 struct ipc_pipe *dl_pipe = &ipc_imem->channels[i].dl_pipe;
726
727 if (dl_pipe->is_open &&
728 (irq == IMEM_IRQ_DONT_CARE || irq == dl_pipe->irq)) {
729 ipc_imem_dl_pipe_process(ipc_imem, dl_pipe);
730
731 if (dl_pipe->nr_of_queued_entries == 0)
732 retry_allocation = true;
733 }
734
735 if (ul_pipe->is_open)
736 ipc_imem_ul_pipe_process(ipc_imem, ul_pipe);
737 }
738
739 /* Try to generate new ADB or ADGH. */
740 if (ipc_mux_ul_data_encode(ipc_imem->mux)) {
741 ipc_imem_td_update_timer_start(ipc_imem);
742 if (ipc_imem->mux->protocol == MUX_AGGREGATION)
743 ipc_imem_adb_timer_start(ipc_imem);
744 }
745
746 /* Continue the send procedure with accumulated SIO or NETIF packets.
747 * Reset the debounce flags.
748 */
749 ul_pending |= ipc_imem_ul_write_td(ipc_imem);
750
751 /* if UL data is pending restart TD update timer */
752 if (ul_pending) {
753 ipc_imem->hrtimer_period =
754 ktime_set(0, TD_UPDATE_DEFAULT_TIMEOUT_USEC * 1000ULL);
755 if (!hrtimer_active(&ipc_imem->tdupdate_timer))
756 hrtimer_start(&ipc_imem->tdupdate_timer,
757 ipc_imem->hrtimer_period,
758 HRTIMER_MODE_REL);
759 }
760
761 /* If CP has executed the transition
762 * from IPC_INIT to IPC_RUNNING in the PSI
763 * phase, wake up the flash app to open the pipes.
764 */
765 if ((phase == IPC_P_PSI || phase == IPC_P_EBL) &&
766 ipc_imem->ipc_requested_state == IPC_MEM_DEVICE_IPC_RUNNING &&
767 ipc_mmio_get_ipc_state(ipc_imem->mmio) ==
768 IPC_MEM_DEVICE_IPC_RUNNING) {
769 complete(&ipc_imem->ipc_devlink->devlink_sio.channel->ul_sem);
770 }
771
772 /* Reset the expected CP state. */
773 ipc_imem->ipc_requested_state = IPC_MEM_DEVICE_IPC_DONT_CARE;
774
775 if (retry_allocation) {
776 ipc_imem->hrtimer_period =
777 ktime_set(0, IPC_TD_ALLOC_TIMER_PERIOD_MS * 1000 * 1000ULL);
778 if (!hrtimer_active(&ipc_imem->td_alloc_timer))
779 hrtimer_start(&ipc_imem->td_alloc_timer,
780 ipc_imem->hrtimer_period,
781 HRTIMER_MODE_REL);
782 }
783 }
784
785 /* Callback by tasklet for handling interrupt events. */
ipc_imem_tq_irq_cb(struct iosm_imem * ipc_imem,int arg,void * msg,size_t size)786 static int ipc_imem_tq_irq_cb(struct iosm_imem *ipc_imem, int arg, void *msg,
787 size_t size)
788 {
789 ipc_imem_handle_irq(ipc_imem, arg);
790
791 return 0;
792 }
793
ipc_imem_ul_send(struct iosm_imem * ipc_imem)794 void ipc_imem_ul_send(struct iosm_imem *ipc_imem)
795 {
796 /* start doorbell irq delay timer if UL is pending */
797 if (ipc_imem_ul_write_td(ipc_imem))
798 ipc_imem_td_update_timer_start(ipc_imem);
799 }
800
801 /* Check the execution stage and update the AP phase */
ipc_imem_phase_update_check(struct iosm_imem * ipc_imem,enum ipc_mem_exec_stage stage)802 static enum ipc_phase ipc_imem_phase_update_check(struct iosm_imem *ipc_imem,
803 enum ipc_mem_exec_stage stage)
804 {
805 switch (stage) {
806 case IPC_MEM_EXEC_STAGE_BOOT:
807 if (ipc_imem->phase != IPC_P_ROM) {
808 /* Send this event only once */
809 ipc_uevent_send(ipc_imem->dev, UEVENT_ROM_READY);
810 }
811
812 ipc_imem->phase = IPC_P_ROM;
813 break;
814
815 case IPC_MEM_EXEC_STAGE_PSI:
816 ipc_imem->phase = IPC_P_PSI;
817 break;
818
819 case IPC_MEM_EXEC_STAGE_EBL:
820 ipc_imem->phase = IPC_P_EBL;
821 break;
822
823 case IPC_MEM_EXEC_STAGE_RUN:
824 if (ipc_imem->phase != IPC_P_RUN &&
825 ipc_imem->ipc_status == IPC_MEM_DEVICE_IPC_RUNNING) {
826 ipc_uevent_send(ipc_imem->dev, UEVENT_MDM_READY);
827 }
828 ipc_imem->phase = IPC_P_RUN;
829 break;
830
831 case IPC_MEM_EXEC_STAGE_CRASH:
832 if (ipc_imem->phase != IPC_P_CRASH)
833 ipc_uevent_send(ipc_imem->dev, UEVENT_CRASH);
834
835 ipc_imem->phase = IPC_P_CRASH;
836 break;
837
838 case IPC_MEM_EXEC_STAGE_CD_READY:
839 if (ipc_imem->phase != IPC_P_CD_READY)
840 ipc_uevent_send(ipc_imem->dev, UEVENT_CD_READY);
841 ipc_imem->phase = IPC_P_CD_READY;
842 break;
843
844 default:
845 /* unknown exec stage:
846 * assume that link is down and send info to listeners
847 */
848 ipc_uevent_send(ipc_imem->dev, UEVENT_CD_READY_LINK_DOWN);
849 break;
850 }
851
852 return ipc_imem->phase;
853 }
854
855 /* Send msg to device to open pipe */
ipc_imem_pipe_open(struct iosm_imem * ipc_imem,struct ipc_pipe * pipe)856 static bool ipc_imem_pipe_open(struct iosm_imem *ipc_imem,
857 struct ipc_pipe *pipe)
858 {
859 union ipc_msg_prep_args prep_args = {
860 .pipe_open.pipe = pipe,
861 };
862
863 if (ipc_protocol_msg_send(ipc_imem->ipc_protocol,
864 IPC_MSG_PREP_PIPE_OPEN, &prep_args) == 0)
865 pipe->is_open = true;
866
867 return pipe->is_open;
868 }
869
870 /* Allocates the TDs for the given pipe along with firing HP update DB. */
ipc_imem_tq_pipe_td_alloc(struct iosm_imem * ipc_imem,int arg,void * msg,size_t size)871 static int ipc_imem_tq_pipe_td_alloc(struct iosm_imem *ipc_imem, int arg,
872 void *msg, size_t size)
873 {
874 struct ipc_pipe *dl_pipe = msg;
875 bool processed = false;
876 int i;
877
878 for (i = 0; i < dl_pipe->nr_of_entries - 1; i++)
879 processed |= ipc_imem_dl_skb_alloc(ipc_imem, dl_pipe);
880
881 /* Trigger the doorbell irq to inform CP that new downlink buffers are
882 * available.
883 */
884 if (processed)
885 ipc_protocol_doorbell_trigger(ipc_imem->ipc_protocol, arg);
886
887 return 0;
888 }
889
890 static enum hrtimer_restart
ipc_imem_td_update_timer_cb(struct hrtimer * hr_timer)891 ipc_imem_td_update_timer_cb(struct hrtimer *hr_timer)
892 {
893 struct iosm_imem *ipc_imem =
894 container_of(hr_timer, struct iosm_imem, tdupdate_timer);
895
896 ipc_task_queue_send_task(ipc_imem, ipc_imem_tq_td_update_timer_cb, 0,
897 NULL, 0, false);
898 return HRTIMER_NORESTART;
899 }
900
901 /* Get the CP execution state and map it to the AP phase. */
ipc_imem_phase_update(struct iosm_imem * ipc_imem)902 enum ipc_phase ipc_imem_phase_update(struct iosm_imem *ipc_imem)
903 {
904 enum ipc_mem_exec_stage exec_stage =
905 ipc_imem_get_exec_stage_buffered(ipc_imem);
906 /* If the CP stage is undef, return the internal precalculated phase. */
907 return ipc_imem->phase == IPC_P_OFF_REQ ?
908 ipc_imem->phase :
909 ipc_imem_phase_update_check(ipc_imem, exec_stage);
910 }
911
ipc_imem_phase_get_string(enum ipc_phase phase)912 const char *ipc_imem_phase_get_string(enum ipc_phase phase)
913 {
914 switch (phase) {
915 case IPC_P_RUN:
916 return "A-RUN";
917
918 case IPC_P_OFF:
919 return "A-OFF";
920
921 case IPC_P_ROM:
922 return "A-ROM";
923
924 case IPC_P_PSI:
925 return "A-PSI";
926
927 case IPC_P_EBL:
928 return "A-EBL";
929
930 case IPC_P_CRASH:
931 return "A-CRASH";
932
933 case IPC_P_CD_READY:
934 return "A-CD_READY";
935
936 case IPC_P_OFF_REQ:
937 return "A-OFF_REQ";
938
939 default:
940 return "A-???";
941 }
942 }
943
ipc_imem_pipe_close(struct iosm_imem * ipc_imem,struct ipc_pipe * pipe)944 void ipc_imem_pipe_close(struct iosm_imem *ipc_imem, struct ipc_pipe *pipe)
945 {
946 union ipc_msg_prep_args prep_args = { .pipe_close.pipe = pipe };
947
948 pipe->is_open = false;
949 ipc_protocol_msg_send(ipc_imem->ipc_protocol, IPC_MSG_PREP_PIPE_CLOSE,
950 &prep_args);
951
952 ipc_imem_pipe_cleanup(ipc_imem, pipe);
953 }
954
ipc_imem_channel_close(struct iosm_imem * ipc_imem,int channel_id)955 void ipc_imem_channel_close(struct iosm_imem *ipc_imem, int channel_id)
956 {
957 struct ipc_mem_channel *channel;
958
959 if (channel_id < 0 || channel_id >= ipc_imem->nr_of_channels) {
960 dev_err(ipc_imem->dev, "invalid channel id %d", channel_id);
961 return;
962 }
963
964 channel = &ipc_imem->channels[channel_id];
965
966 if (channel->state == IMEM_CHANNEL_FREE) {
967 dev_err(ipc_imem->dev, "ch[%d]: invalid channel state %d",
968 channel_id, channel->state);
969 return;
970 }
971
972 /* Free only the channel id in the CP power off mode. */
973 if (channel->state == IMEM_CHANNEL_RESERVED)
974 /* Release only the channel id. */
975 goto channel_free;
976
977 if (ipc_imem->phase == IPC_P_RUN) {
978 ipc_imem_pipe_close(ipc_imem, &channel->ul_pipe);
979 ipc_imem_pipe_close(ipc_imem, &channel->dl_pipe);
980 }
981
982 ipc_imem_pipe_cleanup(ipc_imem, &channel->ul_pipe);
983 ipc_imem_pipe_cleanup(ipc_imem, &channel->dl_pipe);
984
985 channel_free:
986 ipc_imem_channel_free(channel);
987 }
988
ipc_imem_channel_open(struct iosm_imem * ipc_imem,int channel_id,u32 db_id)989 struct ipc_mem_channel *ipc_imem_channel_open(struct iosm_imem *ipc_imem,
990 int channel_id, u32 db_id)
991 {
992 struct ipc_mem_channel *channel;
993
994 if (channel_id < 0 || channel_id >= IPC_MEM_MAX_CHANNELS) {
995 dev_err(ipc_imem->dev, "invalid channel ID: %d", channel_id);
996 return NULL;
997 }
998
999 channel = &ipc_imem->channels[channel_id];
1000
1001 channel->state = IMEM_CHANNEL_ACTIVE;
1002
1003 if (!ipc_imem_pipe_open(ipc_imem, &channel->ul_pipe))
1004 goto ul_pipe_err;
1005
1006 if (!ipc_imem_pipe_open(ipc_imem, &channel->dl_pipe))
1007 goto dl_pipe_err;
1008
1009 /* Allocate the downlink buffers in tasklet context. */
1010 if (ipc_task_queue_send_task(ipc_imem, ipc_imem_tq_pipe_td_alloc, db_id,
1011 &channel->dl_pipe, 0, false)) {
1012 dev_err(ipc_imem->dev, "td allocation failed : %d", channel_id);
1013 goto task_failed;
1014 }
1015
1016 /* Active channel. */
1017 return channel;
1018 task_failed:
1019 ipc_imem_pipe_close(ipc_imem, &channel->dl_pipe);
1020 dl_pipe_err:
1021 ipc_imem_pipe_close(ipc_imem, &channel->ul_pipe);
1022 ul_pipe_err:
1023 ipc_imem_channel_free(channel);
1024 return NULL;
1025 }
1026
ipc_imem_pm_suspend(struct iosm_imem * ipc_imem)1027 void ipc_imem_pm_suspend(struct iosm_imem *ipc_imem)
1028 {
1029 ipc_protocol_suspend(ipc_imem->ipc_protocol);
1030 }
1031
ipc_imem_pm_s2idle_sleep(struct iosm_imem * ipc_imem,bool sleep)1032 void ipc_imem_pm_s2idle_sleep(struct iosm_imem *ipc_imem, bool sleep)
1033 {
1034 ipc_protocol_s2idle_sleep(ipc_imem->ipc_protocol, sleep);
1035 }
1036
ipc_imem_pm_resume(struct iosm_imem * ipc_imem)1037 void ipc_imem_pm_resume(struct iosm_imem *ipc_imem)
1038 {
1039 enum ipc_mem_exec_stage stage;
1040
1041 if (ipc_protocol_resume(ipc_imem->ipc_protocol)) {
1042 stage = ipc_mmio_get_exec_stage(ipc_imem->mmio);
1043 ipc_imem_phase_update_check(ipc_imem, stage);
1044 }
1045 }
1046
ipc_imem_channel_free(struct ipc_mem_channel * channel)1047 void ipc_imem_channel_free(struct ipc_mem_channel *channel)
1048 {
1049 /* Reset dynamic channel elements. */
1050 channel->state = IMEM_CHANNEL_FREE;
1051 }
1052
ipc_imem_channel_alloc(struct iosm_imem * ipc_imem,int index,enum ipc_ctype ctype)1053 int ipc_imem_channel_alloc(struct iosm_imem *ipc_imem, int index,
1054 enum ipc_ctype ctype)
1055 {
1056 struct ipc_mem_channel *channel;
1057 int i;
1058
1059 /* Find channel of given type/index */
1060 for (i = 0; i < ipc_imem->nr_of_channels; i++) {
1061 channel = &ipc_imem->channels[i];
1062 if (channel->ctype == ctype && channel->index == index)
1063 break;
1064 }
1065
1066 if (i >= ipc_imem->nr_of_channels) {
1067 dev_dbg(ipc_imem->dev,
1068 "no channel definition for index=%d ctype=%d", index,
1069 ctype);
1070 return -ECHRNG;
1071 }
1072
1073 if (ipc_imem->channels[i].state != IMEM_CHANNEL_FREE) {
1074 dev_dbg(ipc_imem->dev, "channel is in use");
1075 return -EBUSY;
1076 }
1077
1078 if (channel->ctype == IPC_CTYPE_WWAN &&
1079 index == IPC_MEM_MUX_IP_CH_IF_ID)
1080 channel->if_id = index;
1081
1082 channel->channel_id = index;
1083 channel->state = IMEM_CHANNEL_RESERVED;
1084
1085 return i;
1086 }
1087
ipc_imem_channel_init(struct iosm_imem * ipc_imem,enum ipc_ctype ctype,struct ipc_chnl_cfg chnl_cfg,u32 irq_moderation)1088 void ipc_imem_channel_init(struct iosm_imem *ipc_imem, enum ipc_ctype ctype,
1089 struct ipc_chnl_cfg chnl_cfg, u32 irq_moderation)
1090 {
1091 struct ipc_mem_channel *channel;
1092
1093 if (chnl_cfg.ul_pipe >= IPC_MEM_MAX_PIPES ||
1094 chnl_cfg.dl_pipe >= IPC_MEM_MAX_PIPES) {
1095 dev_err(ipc_imem->dev, "invalid pipe: ul_pipe=%d, dl_pipe=%d",
1096 chnl_cfg.ul_pipe, chnl_cfg.dl_pipe);
1097 return;
1098 }
1099
1100 if (ipc_imem->nr_of_channels >= IPC_MEM_MAX_CHANNELS) {
1101 dev_err(ipc_imem->dev, "too many channels");
1102 return;
1103 }
1104
1105 channel = &ipc_imem->channels[ipc_imem->nr_of_channels];
1106 channel->channel_id = ipc_imem->nr_of_channels;
1107 channel->ctype = ctype;
1108 channel->index = chnl_cfg.id;
1109 channel->net_err_count = 0;
1110 channel->state = IMEM_CHANNEL_FREE;
1111 ipc_imem->nr_of_channels++;
1112
1113 ipc_imem_channel_update(ipc_imem, channel->channel_id, chnl_cfg,
1114 IRQ_MOD_OFF);
1115
1116 skb_queue_head_init(&channel->ul_list);
1117
1118 init_completion(&channel->ul_sem);
1119 }
1120
ipc_imem_channel_update(struct iosm_imem * ipc_imem,int id,struct ipc_chnl_cfg chnl_cfg,u32 irq_moderation)1121 void ipc_imem_channel_update(struct iosm_imem *ipc_imem, int id,
1122 struct ipc_chnl_cfg chnl_cfg, u32 irq_moderation)
1123 {
1124 struct ipc_mem_channel *channel;
1125
1126 if (id < 0 || id >= ipc_imem->nr_of_channels) {
1127 dev_err(ipc_imem->dev, "invalid channel id %d", id);
1128 return;
1129 }
1130
1131 channel = &ipc_imem->channels[id];
1132
1133 if (channel->state != IMEM_CHANNEL_FREE &&
1134 channel->state != IMEM_CHANNEL_RESERVED) {
1135 dev_err(ipc_imem->dev, "invalid channel state %d",
1136 channel->state);
1137 return;
1138 }
1139
1140 channel->ul_pipe.nr_of_entries = chnl_cfg.ul_nr_of_entries;
1141 channel->ul_pipe.pipe_nr = chnl_cfg.ul_pipe;
1142 channel->ul_pipe.is_open = false;
1143 channel->ul_pipe.irq = IPC_UL_PIPE_IRQ_VECTOR;
1144 channel->ul_pipe.channel = channel;
1145 channel->ul_pipe.dir = IPC_MEM_DIR_UL;
1146 channel->ul_pipe.accumulation_backoff = chnl_cfg.accumulation_backoff;
1147 channel->ul_pipe.irq_moderation = irq_moderation;
1148 channel->ul_pipe.buf_size = 0;
1149
1150 channel->dl_pipe.nr_of_entries = chnl_cfg.dl_nr_of_entries;
1151 channel->dl_pipe.pipe_nr = chnl_cfg.dl_pipe;
1152 channel->dl_pipe.is_open = false;
1153 channel->dl_pipe.irq = IPC_DL_PIPE_IRQ_VECTOR;
1154 channel->dl_pipe.channel = channel;
1155 channel->dl_pipe.dir = IPC_MEM_DIR_DL;
1156 channel->dl_pipe.accumulation_backoff = chnl_cfg.accumulation_backoff;
1157 channel->dl_pipe.irq_moderation = irq_moderation;
1158 channel->dl_pipe.buf_size = chnl_cfg.dl_buf_size;
1159 }
1160
ipc_imem_channel_reset(struct iosm_imem * ipc_imem)1161 static void ipc_imem_channel_reset(struct iosm_imem *ipc_imem)
1162 {
1163 int i;
1164
1165 for (i = 0; i < ipc_imem->nr_of_channels; i++) {
1166 struct ipc_mem_channel *channel;
1167
1168 channel = &ipc_imem->channels[i];
1169
1170 ipc_imem_pipe_cleanup(ipc_imem, &channel->dl_pipe);
1171 ipc_imem_pipe_cleanup(ipc_imem, &channel->ul_pipe);
1172
1173 ipc_imem_channel_free(channel);
1174 }
1175 }
1176
ipc_imem_pipe_cleanup(struct iosm_imem * ipc_imem,struct ipc_pipe * pipe)1177 void ipc_imem_pipe_cleanup(struct iosm_imem *ipc_imem, struct ipc_pipe *pipe)
1178 {
1179 struct sk_buff *skb;
1180
1181 /* Force pipe to closed state also when not explicitly closed through
1182 * ipc_imem_pipe_close()
1183 */
1184 pipe->is_open = false;
1185
1186 /* Empty the uplink skb accumulator. */
1187 while ((skb = skb_dequeue(&pipe->channel->ul_list)))
1188 ipc_pcie_kfree_skb(ipc_imem->pcie, skb);
1189
1190 ipc_protocol_pipe_cleanup(ipc_imem->ipc_protocol, pipe);
1191 }
1192
1193 /* Send IPC protocol uninit to the modem when Link is active. */
ipc_imem_device_ipc_uninit(struct iosm_imem * ipc_imem)1194 static void ipc_imem_device_ipc_uninit(struct iosm_imem *ipc_imem)
1195 {
1196 int timeout = IPC_MODEM_UNINIT_TIMEOUT_MS;
1197 enum ipc_mem_device_ipc_state ipc_state;
1198
1199 /* When PCIe link is up set IPC_UNINIT
1200 * of the modem otherwise ignore it when PCIe link down happens.
1201 */
1202 if (ipc_pcie_check_data_link_active(ipc_imem->pcie)) {
1203 /* set modem to UNINIT
1204 * (in case we want to reload the AP driver without resetting
1205 * the modem)
1206 */
1207 ipc_doorbell_fire(ipc_imem->pcie, IPC_DOORBELL_IRQ_IPC,
1208 IPC_MEM_DEVICE_IPC_UNINIT);
1209 ipc_state = ipc_mmio_get_ipc_state(ipc_imem->mmio);
1210
1211 /* Wait for maximum 30ms to allow the Modem to uninitialize the
1212 * protocol.
1213 */
1214 while ((ipc_state <= IPC_MEM_DEVICE_IPC_DONT_CARE) &&
1215 (ipc_state != IPC_MEM_DEVICE_IPC_UNINIT) &&
1216 (timeout > 0)) {
1217 usleep_range(1000, 1250);
1218 timeout--;
1219 ipc_state = ipc_mmio_get_ipc_state(ipc_imem->mmio);
1220 }
1221 }
1222 }
1223
ipc_imem_cleanup(struct iosm_imem * ipc_imem)1224 void ipc_imem_cleanup(struct iosm_imem *ipc_imem)
1225 {
1226 ipc_imem->phase = IPC_P_OFF_REQ;
1227
1228 /* forward MDM_NOT_READY to listeners */
1229 ipc_uevent_send(ipc_imem->dev, UEVENT_MDM_NOT_READY);
1230
1231 hrtimer_cancel(&ipc_imem->td_alloc_timer);
1232 hrtimer_cancel(&ipc_imem->tdupdate_timer);
1233 hrtimer_cancel(&ipc_imem->fast_update_timer);
1234 hrtimer_cancel(&ipc_imem->startup_timer);
1235
1236 /* cancel the workqueue */
1237 cancel_work_sync(&ipc_imem->run_state_worker);
1238
1239 if (test_and_clear_bit(FULLY_FUNCTIONAL, &ipc_imem->flag)) {
1240 ipc_mux_deinit(ipc_imem->mux);
1241 ipc_debugfs_deinit(ipc_imem);
1242 ipc_wwan_deinit(ipc_imem->wwan);
1243 ipc_port_deinit(ipc_imem->ipc_port);
1244 }
1245
1246 if (test_and_clear_bit(IOSM_DEVLINK_INIT, &ipc_imem->flag))
1247 ipc_devlink_deinit(ipc_imem->ipc_devlink);
1248
1249 ipc_imem_device_ipc_uninit(ipc_imem);
1250 ipc_imem_channel_reset(ipc_imem);
1251
1252 ipc_protocol_deinit(ipc_imem->ipc_protocol);
1253 ipc_task_deinit(ipc_imem->ipc_task);
1254
1255 kfree(ipc_imem->ipc_task);
1256 kfree(ipc_imem->mmio);
1257
1258 ipc_imem->phase = IPC_P_OFF;
1259 }
1260
1261 /* After CP has unblocked the PCIe link, save the start address of the doorbell
1262 * scratchpad and prepare the shared memory region. If the flashing to RAM
1263 * procedure shall be executed, copy the chip information from the doorbell
1264 * scratchtpad to the application buffer and wake up the flash app.
1265 */
ipc_imem_config(struct iosm_imem * ipc_imem)1266 static int ipc_imem_config(struct iosm_imem *ipc_imem)
1267 {
1268 enum ipc_phase phase;
1269
1270 /* Initialize the semaphore for the blocking read UL/DL transfer. */
1271 init_completion(&ipc_imem->ul_pend_sem);
1272
1273 init_completion(&ipc_imem->dl_pend_sem);
1274
1275 /* clear internal flags */
1276 ipc_imem->ipc_status = IPC_MEM_DEVICE_IPC_UNINIT;
1277 ipc_imem->enter_runtime = 0;
1278
1279 phase = ipc_imem_phase_update(ipc_imem);
1280
1281 /* Either CP shall be in the power off or power on phase. */
1282 switch (phase) {
1283 case IPC_P_ROM:
1284 ipc_imem->hrtimer_period = ktime_set(0, 1000 * 1000 * 1000ULL);
1285 /* poll execution stage (for delayed start, e.g. NAND) */
1286 if (!hrtimer_active(&ipc_imem->startup_timer))
1287 hrtimer_start(&ipc_imem->startup_timer,
1288 ipc_imem->hrtimer_period,
1289 HRTIMER_MODE_REL);
1290 return 0;
1291
1292 case IPC_P_PSI:
1293 case IPC_P_EBL:
1294 case IPC_P_RUN:
1295 /* The initial IPC state is IPC_MEM_DEVICE_IPC_UNINIT. */
1296 ipc_imem->ipc_requested_state = IPC_MEM_DEVICE_IPC_UNINIT;
1297
1298 /* Verify the exepected initial state. */
1299 if (ipc_imem->ipc_requested_state ==
1300 ipc_mmio_get_ipc_state(ipc_imem->mmio)) {
1301 ipc_imem_ipc_init_check(ipc_imem);
1302
1303 return 0;
1304 }
1305 dev_err(ipc_imem->dev,
1306 "ipc_status(%d) != IPC_MEM_DEVICE_IPC_UNINIT",
1307 ipc_mmio_get_ipc_state(ipc_imem->mmio));
1308 break;
1309 case IPC_P_CRASH:
1310 case IPC_P_CD_READY:
1311 dev_dbg(ipc_imem->dev,
1312 "Modem is in phase %d, reset Modem to collect CD",
1313 phase);
1314 return 0;
1315 default:
1316 dev_err(ipc_imem->dev, "unexpected operation phase %d", phase);
1317 break;
1318 }
1319
1320 complete(&ipc_imem->dl_pend_sem);
1321 complete(&ipc_imem->ul_pend_sem);
1322 ipc_imem->phase = IPC_P_OFF;
1323 return -EIO;
1324 }
1325
1326 /* Pass the dev ptr to the shared memory driver and request the entry points */
ipc_imem_init(struct iosm_pcie * pcie,unsigned int device_id,void __iomem * mmio,struct device * dev)1327 struct iosm_imem *ipc_imem_init(struct iosm_pcie *pcie, unsigned int device_id,
1328 void __iomem *mmio, struct device *dev)
1329 {
1330 struct iosm_imem *ipc_imem = kzalloc(sizeof(*pcie->imem), GFP_KERNEL);
1331 enum ipc_mem_exec_stage stage;
1332
1333 if (!ipc_imem)
1334 return NULL;
1335
1336 /* Save the device address. */
1337 ipc_imem->pcie = pcie;
1338 ipc_imem->dev = dev;
1339
1340 ipc_imem->pci_device_id = device_id;
1341
1342 ipc_imem->cp_version = 0;
1343 ipc_imem->device_sleep = IPC_HOST_SLEEP_ENTER_SLEEP;
1344
1345 /* Reset the max number of configured channels */
1346 ipc_imem->nr_of_channels = 0;
1347
1348 /* allocate IPC MMIO */
1349 ipc_imem->mmio = ipc_mmio_init(mmio, ipc_imem->dev);
1350 if (!ipc_imem->mmio) {
1351 dev_err(ipc_imem->dev, "failed to initialize mmio region");
1352 goto mmio_init_fail;
1353 }
1354
1355 ipc_imem->ipc_task = kzalloc(sizeof(*ipc_imem->ipc_task),
1356 GFP_KERNEL);
1357
1358 /* Create tasklet for event handling*/
1359 if (!ipc_imem->ipc_task)
1360 goto ipc_task_fail;
1361
1362 if (ipc_task_init(ipc_imem->ipc_task))
1363 goto ipc_task_init_fail;
1364
1365 ipc_imem->ipc_task->dev = ipc_imem->dev;
1366
1367 INIT_WORK(&ipc_imem->run_state_worker, ipc_imem_run_state_worker);
1368
1369 ipc_imem->ipc_protocol = ipc_protocol_init(ipc_imem);
1370
1371 if (!ipc_imem->ipc_protocol)
1372 goto protocol_init_fail;
1373
1374 /* The phase is set to power off. */
1375 ipc_imem->phase = IPC_P_OFF;
1376
1377 hrtimer_init(&ipc_imem->startup_timer, CLOCK_MONOTONIC,
1378 HRTIMER_MODE_REL);
1379 ipc_imem->startup_timer.function = ipc_imem_startup_timer_cb;
1380
1381 hrtimer_init(&ipc_imem->tdupdate_timer, CLOCK_MONOTONIC,
1382 HRTIMER_MODE_REL);
1383 ipc_imem->tdupdate_timer.function = ipc_imem_td_update_timer_cb;
1384
1385 hrtimer_init(&ipc_imem->fast_update_timer, CLOCK_MONOTONIC,
1386 HRTIMER_MODE_REL);
1387 ipc_imem->fast_update_timer.function = ipc_imem_fast_update_timer_cb;
1388
1389 hrtimer_init(&ipc_imem->td_alloc_timer, CLOCK_MONOTONIC,
1390 HRTIMER_MODE_REL);
1391 ipc_imem->td_alloc_timer.function = ipc_imem_td_alloc_timer_cb;
1392
1393 hrtimer_init(&ipc_imem->adb_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1394 ipc_imem->adb_timer.function = ipc_imem_adb_timer_cb;
1395
1396 if (ipc_imem_config(ipc_imem)) {
1397 dev_err(ipc_imem->dev, "failed to initialize the imem");
1398 goto imem_config_fail;
1399 }
1400
1401 stage = ipc_mmio_get_exec_stage(ipc_imem->mmio);
1402 if (stage == IPC_MEM_EXEC_STAGE_BOOT) {
1403 /* Alloc and Register devlink */
1404 ipc_imem->ipc_devlink = ipc_devlink_init(ipc_imem);
1405 if (!ipc_imem->ipc_devlink) {
1406 dev_err(ipc_imem->dev, "Devlink register failed");
1407 goto imem_config_fail;
1408 }
1409
1410 if (ipc_flash_link_establish(ipc_imem))
1411 goto devlink_channel_fail;
1412
1413 set_bit(IOSM_DEVLINK_INIT, &ipc_imem->flag);
1414 }
1415 return ipc_imem;
1416 devlink_channel_fail:
1417 ipc_devlink_deinit(ipc_imem->ipc_devlink);
1418 imem_config_fail:
1419 hrtimer_cancel(&ipc_imem->td_alloc_timer);
1420 hrtimer_cancel(&ipc_imem->fast_update_timer);
1421 hrtimer_cancel(&ipc_imem->tdupdate_timer);
1422 hrtimer_cancel(&ipc_imem->startup_timer);
1423 protocol_init_fail:
1424 cancel_work_sync(&ipc_imem->run_state_worker);
1425 ipc_task_deinit(ipc_imem->ipc_task);
1426 ipc_task_init_fail:
1427 kfree(ipc_imem->ipc_task);
1428 ipc_task_fail:
1429 kfree(ipc_imem->mmio);
1430 mmio_init_fail:
1431 kfree(ipc_imem);
1432 return NULL;
1433 }
1434
ipc_imem_irq_process(struct iosm_imem * ipc_imem,int irq)1435 void ipc_imem_irq_process(struct iosm_imem *ipc_imem, int irq)
1436 {
1437 /* Debounce IPC_EV_IRQ. */
1438 if (ipc_imem && !ipc_imem->ev_irq_pending[irq]) {
1439 ipc_imem->ev_irq_pending[irq] = true;
1440 ipc_task_queue_send_task(ipc_imem, ipc_imem_tq_irq_cb, irq,
1441 NULL, 0, false);
1442 }
1443 }
1444
ipc_imem_td_update_timer_suspend(struct iosm_imem * ipc_imem,bool suspend)1445 void ipc_imem_td_update_timer_suspend(struct iosm_imem *ipc_imem, bool suspend)
1446 {
1447 ipc_imem->td_update_timer_suspended = suspend;
1448 }
1449
1450 /* Verify the CP execution state, copy the chip info,
1451 * change the execution phase to ROM
1452 */
ipc_imem_devlink_trigger_chip_info_cb(struct iosm_imem * ipc_imem,int arg,void * msg,size_t msgsize)1453 static int ipc_imem_devlink_trigger_chip_info_cb(struct iosm_imem *ipc_imem,
1454 int arg, void *msg,
1455 size_t msgsize)
1456 {
1457 enum ipc_mem_exec_stage stage;
1458 struct sk_buff *skb;
1459 int rc = -EINVAL;
1460 size_t size;
1461
1462 /* Test the CP execution state. */
1463 stage = ipc_mmio_get_exec_stage(ipc_imem->mmio);
1464 if (stage != IPC_MEM_EXEC_STAGE_BOOT) {
1465 dev_err(ipc_imem->dev,
1466 "Execution_stage: expected BOOT, received = %X", stage);
1467 goto trigger_chip_info_fail;
1468 }
1469 /* Allocate a new sk buf for the chip info. */
1470 size = ipc_imem->mmio->chip_info_size;
1471 if (size > IOSM_CHIP_INFO_SIZE_MAX)
1472 goto trigger_chip_info_fail;
1473
1474 skb = ipc_pcie_alloc_local_skb(ipc_imem->pcie, GFP_ATOMIC, size);
1475 if (!skb) {
1476 dev_err(ipc_imem->dev, "exhausted skbuf kernel DL memory");
1477 rc = -ENOMEM;
1478 goto trigger_chip_info_fail;
1479 }
1480 /* Copy the chip info characters into the ipc_skb. */
1481 ipc_mmio_copy_chip_info(ipc_imem->mmio, skb_put(skb, size), size);
1482 /* First change to the ROM boot phase. */
1483 dev_dbg(ipc_imem->dev, "execution_stage[%X] eq. BOOT", stage);
1484 ipc_imem->phase = ipc_imem_phase_update(ipc_imem);
1485 ipc_imem_sys_devlink_notify_rx(ipc_imem->ipc_devlink, skb);
1486 rc = 0;
1487 trigger_chip_info_fail:
1488 return rc;
1489 }
1490
ipc_imem_devlink_trigger_chip_info(struct iosm_imem * ipc_imem)1491 int ipc_imem_devlink_trigger_chip_info(struct iosm_imem *ipc_imem)
1492 {
1493 return ipc_task_queue_send_task(ipc_imem,
1494 ipc_imem_devlink_trigger_chip_info_cb,
1495 0, NULL, 0, true);
1496 }
1497