1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (c) 2021, MediaTek Inc.
4 * Copyright (c) 2021-2022, Intel Corporation.
5 *
6 * Authors:
7 * Haijun Liu <haijun.liu@mediatek.com>
8 * Eliot Lee <eliot.lee@intel.com>
9 * Moises Veleta <moises.veleta@intel.com>
10 * Ricardo Martinez <ricardo.martinez@linux.intel.com>
11 *
12 * Contributors:
13 * Amir Hanania <amir.hanania@intel.com>
14 * Sreehari Kancharla <sreehari.kancharla@intel.com>
15 */
16
17#include <linux/bits.h>
18#include <linux/bitfield.h>
19#include <linux/completion.h>
20#include <linux/device.h>
21#include <linux/delay.h>
22#include <linux/err.h>
23#include <linux/gfp.h>
24#include <linux/iopoll.h>
25#include <linux/jiffies.h>
26#include <linux/kernel.h>
27#include <linux/kthread.h>
28#include <linux/list.h>
29#include <linux/slab.h>
30#include <linux/spinlock.h>
31#include <linux/string.h>
32#include <linux/types.h>
33#include <linux/wait.h>
34
35#include "t7xx_hif_cldma.h"
36#include "t7xx_mhccif.h"
37#include "t7xx_modem_ops.h"
38#include "t7xx_pci.h"
39#include "t7xx_pcie_mac.h"
40#include "t7xx_port_proxy.h"
41#include "t7xx_reg.h"
42#include "t7xx_state_monitor.h"
43
44#define FSM_DRM_DISABLE_DELAY_MS 200
45#define FSM_EVENT_POLL_INTERVAL_MS 20
46#define FSM_MD_EX_REC_OK_TIMEOUT_MS 10000
47#define FSM_MD_EX_PASS_TIMEOUT_MS 45000
48#define FSM_CMD_TIMEOUT_MS 2000
49
50#define wait_for_expected_dev_stage(status) \
51 read_poll_timeout(ioread32, status, \
52 ((status & MISC_STAGE_MASK) == T7XX_DEV_STAGE_LINUX) || \
53 ((status & MISC_STAGE_MASK) == T7XX_DEV_STAGE_LK), 100000, \
54 20000000, false, IREG_BASE(md->t7xx_dev) + \
55 T7XX_PCIE_MISC_DEV_STATUS)
56
57void t7xx_fsm_notifier_register(struct t7xx_modem *md, struct t7xx_fsm_notifier *notifier)
58{
59 struct t7xx_fsm_ctl *ctl = md->fsm_ctl;
60 unsigned long flags;
61
62 spin_lock_irqsave(&ctl->notifier_lock, flags);
63 list_add_tail(new: &notifier->entry, head: &ctl->notifier_list);
64 spin_unlock_irqrestore(lock: &ctl->notifier_lock, flags);
65}
66
67void t7xx_fsm_notifier_unregister(struct t7xx_modem *md, struct t7xx_fsm_notifier *notifier)
68{
69 struct t7xx_fsm_notifier *notifier_cur, *notifier_next;
70 struct t7xx_fsm_ctl *ctl = md->fsm_ctl;
71 unsigned long flags;
72
73 spin_lock_irqsave(&ctl->notifier_lock, flags);
74 list_for_each_entry_safe(notifier_cur, notifier_next, &ctl->notifier_list, entry) {
75 if (notifier_cur == notifier)
76 list_del(entry: &notifier->entry);
77 }
78 spin_unlock_irqrestore(lock: &ctl->notifier_lock, flags);
79}
80
81static void fsm_state_notify(struct t7xx_modem *md, enum md_state state)
82{
83 struct t7xx_fsm_ctl *ctl = md->fsm_ctl;
84 struct t7xx_fsm_notifier *notifier;
85 unsigned long flags;
86
87 spin_lock_irqsave(&ctl->notifier_lock, flags);
88 list_for_each_entry(notifier, &ctl->notifier_list, entry) {
89 spin_unlock_irqrestore(lock: &ctl->notifier_lock, flags);
90 if (notifier->notifier_fn)
91 notifier->notifier_fn(state, notifier->data);
92
93 spin_lock_irqsave(&ctl->notifier_lock, flags);
94 }
95 spin_unlock_irqrestore(lock: &ctl->notifier_lock, flags);
96}
97
98void t7xx_fsm_broadcast_state(struct t7xx_fsm_ctl *ctl, enum md_state state)
99{
100 ctl->md_state = state;
101
102 /* Update to port first, otherwise sending message on HS2 may fail */
103 t7xx_port_proxy_md_status_notify(port_prox: ctl->md->port_prox, state);
104 fsm_state_notify(md: ctl->md, state);
105}
106
107static void fsm_finish_command(struct t7xx_fsm_ctl *ctl, struct t7xx_fsm_command *cmd, int result)
108{
109 if (cmd->flag & FSM_CMD_FLAG_WAIT_FOR_COMPLETION) {
110 *cmd->ret = result;
111 complete_all(cmd->done);
112 }
113
114 kfree(objp: cmd);
115}
116
117static void fsm_del_kf_event(struct t7xx_fsm_event *event)
118{
119 list_del(entry: &event->entry);
120 kfree(objp: event);
121}
122
123static void fsm_flush_event_cmd_qs(struct t7xx_fsm_ctl *ctl)
124{
125 struct device *dev = &ctl->md->t7xx_dev->pdev->dev;
126 struct t7xx_fsm_event *event, *evt_next;
127 struct t7xx_fsm_command *cmd, *cmd_next;
128 unsigned long flags;
129
130 spin_lock_irqsave(&ctl->command_lock, flags);
131 list_for_each_entry_safe(cmd, cmd_next, &ctl->command_queue, entry) {
132 dev_warn(dev, "Unhandled command %d\n", cmd->cmd_id);
133 list_del(entry: &cmd->entry);
134 fsm_finish_command(ctl, cmd, result: -EINVAL);
135 }
136 spin_unlock_irqrestore(lock: &ctl->command_lock, flags);
137
138 spin_lock_irqsave(&ctl->event_lock, flags);
139 list_for_each_entry_safe(event, evt_next, &ctl->event_queue, entry) {
140 dev_warn(dev, "Unhandled event %d\n", event->event_id);
141 fsm_del_kf_event(event);
142 }
143 spin_unlock_irqrestore(lock: &ctl->event_lock, flags);
144}
145
146static void fsm_wait_for_event(struct t7xx_fsm_ctl *ctl, enum t7xx_fsm_event_state event_expected,
147 enum t7xx_fsm_event_state event_ignore, int retries)
148{
149 struct t7xx_fsm_event *event;
150 bool event_received = false;
151 unsigned long flags;
152 int cnt = 0;
153
154 while (cnt++ < retries && !event_received) {
155 bool sleep_required = true;
156
157 if (kthread_should_stop())
158 return;
159
160 spin_lock_irqsave(&ctl->event_lock, flags);
161 event = list_first_entry_or_null(&ctl->event_queue, struct t7xx_fsm_event, entry);
162 if (event) {
163 event_received = event->event_id == event_expected;
164 if (event_received || event->event_id == event_ignore) {
165 fsm_del_kf_event(event);
166 sleep_required = false;
167 }
168 }
169 spin_unlock_irqrestore(lock: &ctl->event_lock, flags);
170
171 if (sleep_required)
172 msleep(FSM_EVENT_POLL_INTERVAL_MS);
173 }
174}
175
176static void fsm_routine_exception(struct t7xx_fsm_ctl *ctl, struct t7xx_fsm_command *cmd,
177 enum t7xx_ex_reason reason)
178{
179 struct device *dev = &ctl->md->t7xx_dev->pdev->dev;
180
181 if (ctl->curr_state != FSM_STATE_READY && ctl->curr_state != FSM_STATE_STARTING) {
182 if (cmd)
183 fsm_finish_command(ctl, cmd, result: -EINVAL);
184
185 return;
186 }
187
188 ctl->curr_state = FSM_STATE_EXCEPTION;
189
190 switch (reason) {
191 case EXCEPTION_HS_TIMEOUT:
192 dev_err(dev, "Boot Handshake failure\n");
193 break;
194
195 case EXCEPTION_EVENT:
196 dev_err(dev, "Exception event\n");
197 t7xx_fsm_broadcast_state(ctl, state: MD_STATE_EXCEPTION);
198 t7xx_pci_pm_exp_detected(t7xx_dev: ctl->md->t7xx_dev);
199 t7xx_md_exception_handshake(md: ctl->md);
200
201 fsm_wait_for_event(ctl, event_expected: FSM_EVENT_MD_EX_REC_OK, event_ignore: FSM_EVENT_MD_EX,
202 FSM_MD_EX_REC_OK_TIMEOUT_MS / FSM_EVENT_POLL_INTERVAL_MS);
203 fsm_wait_for_event(ctl, event_expected: FSM_EVENT_MD_EX_PASS, event_ignore: FSM_EVENT_INVALID,
204 FSM_MD_EX_PASS_TIMEOUT_MS / FSM_EVENT_POLL_INTERVAL_MS);
205 break;
206
207 default:
208 dev_err(dev, "Exception %d\n", reason);
209 break;
210 }
211
212 if (cmd)
213 fsm_finish_command(ctl, cmd, result: 0);
214}
215
216static void t7xx_host_event_notify(struct t7xx_modem *md, unsigned int event_id)
217{
218 u32 value;
219
220 value = ioread32(IREG_BASE(md->t7xx_dev) + T7XX_PCIE_MISC_DEV_STATUS);
221 value &= ~HOST_EVENT_MASK;
222 value |= FIELD_PREP(HOST_EVENT_MASK, event_id);
223 iowrite32(value, IREG_BASE(md->t7xx_dev) + T7XX_PCIE_MISC_DEV_STATUS);
224}
225
226static void t7xx_lk_stage_event_handling(struct t7xx_fsm_ctl *ctl, unsigned int status)
227{
228 struct t7xx_modem *md = ctl->md;
229 struct cldma_ctrl *md_ctrl;
230 enum lk_event_id lk_event;
231 struct device *dev;
232 struct t7xx_port *port;
233
234 dev = &md->t7xx_dev->pdev->dev;
235 lk_event = FIELD_GET(MISC_LK_EVENT_MASK, status);
236 switch (lk_event) {
237 case LK_EVENT_NORMAL:
238 case LK_EVENT_RESET:
239 break;
240
241 case LK_EVENT_CREATE_PD_PORT:
242 case LK_EVENT_CREATE_POST_DL_PORT:
243 md_ctrl = md->md_ctrl[CLDMA_ID_AP];
244 t7xx_cldma_hif_hw_init(md_ctrl);
245 t7xx_cldma_stop(md_ctrl);
246 t7xx_cldma_switch_cfg(md_ctrl, cfg_id: CLDMA_DEDICATED_Q_CFG);
247
248 port = &ctl->md->port_prox->ports[0];
249 port->port_conf->ops->enable_chl(port);
250
251 t7xx_cldma_start(md_ctrl);
252
253 if (lk_event == LK_EVENT_CREATE_POST_DL_PORT)
254 t7xx_mode_update(t7xx_dev: md->t7xx_dev, mode: T7XX_FASTBOOT_DOWNLOAD);
255 else
256 t7xx_mode_update(t7xx_dev: md->t7xx_dev, mode: T7XX_FASTBOOT_DUMP);
257 break;
258
259 default:
260 dev_err(dev, "Invalid LK event %d\n", lk_event);
261 break;
262 }
263}
264
265static int fsm_stopped_handler(struct t7xx_fsm_ctl *ctl)
266{
267 ctl->curr_state = FSM_STATE_STOPPED;
268
269 t7xx_fsm_broadcast_state(ctl, state: MD_STATE_STOPPED);
270 return t7xx_md_reset(t7xx_dev: ctl->md->t7xx_dev);
271}
272
273static void fsm_routine_stopped(struct t7xx_fsm_ctl *ctl, struct t7xx_fsm_command *cmd)
274{
275 if (ctl->curr_state == FSM_STATE_STOPPED) {
276 fsm_finish_command(ctl, cmd, result: -EINVAL);
277 return;
278 }
279
280 fsm_finish_command(ctl, cmd, result: fsm_stopped_handler(ctl));
281}
282
283static void fsm_routine_stopping(struct t7xx_fsm_ctl *ctl, struct t7xx_fsm_command *cmd)
284{
285 struct cldma_ctrl *md_ctrl = ctl->md->md_ctrl[CLDMA_ID_MD];
286 struct t7xx_pci_dev *t7xx_dev = ctl->md->t7xx_dev;
287 enum t7xx_mode mode = READ_ONCE(t7xx_dev->mode);
288 int err;
289
290 if (ctl->curr_state == FSM_STATE_STOPPED || ctl->curr_state == FSM_STATE_STOPPING) {
291 fsm_finish_command(ctl, cmd, result: -EINVAL);
292 return;
293 }
294
295 ctl->curr_state = FSM_STATE_STOPPING;
296 t7xx_fsm_broadcast_state(ctl, state: MD_STATE_WAITING_TO_STOP);
297 t7xx_cldma_stop(md_ctrl);
298
299 if (mode == T7XX_FASTBOOT_SWITCHING)
300 t7xx_host_event_notify(md: ctl->md, event_id: FASTBOOT_DL_NOTIFY);
301
302 t7xx_mhccif_h2d_swint_trigger(t7xx_dev, H2D_CH_DRM_DISABLE_AP);
303 /* Wait for the DRM disable to take effect */
304 msleep(FSM_DRM_DISABLE_DELAY_MS);
305
306 if (mode == T7XX_FASTBOOT_SWITCHING) {
307 t7xx_mhccif_h2d_swint_trigger(t7xx_dev, H2D_CH_DEVICE_RESET);
308 } else {
309 err = t7xx_acpi_fldr_func(t7xx_dev);
310 if (err)
311 t7xx_mhccif_h2d_swint_trigger(t7xx_dev, H2D_CH_DEVICE_RESET);
312 }
313
314 fsm_finish_command(ctl, cmd, result: fsm_stopped_handler(ctl));
315}
316
317static void t7xx_fsm_broadcast_ready_state(struct t7xx_fsm_ctl *ctl)
318{
319 if (ctl->md_state != MD_STATE_WAITING_FOR_HS2)
320 return;
321
322 ctl->md_state = MD_STATE_READY;
323
324 fsm_state_notify(md: ctl->md, state: MD_STATE_READY);
325 t7xx_port_proxy_md_status_notify(port_prox: ctl->md->port_prox, state: MD_STATE_READY);
326}
327
328static void fsm_routine_ready(struct t7xx_fsm_ctl *ctl)
329{
330 struct t7xx_modem *md = ctl->md;
331
332 ctl->curr_state = FSM_STATE_READY;
333 t7xx_fsm_broadcast_ready_state(ctl);
334 t7xx_mode_update(t7xx_dev: md->t7xx_dev, mode: T7XX_READY);
335 t7xx_md_event_notify(md, evt_id: FSM_READY);
336}
337
338static int fsm_routine_starting(struct t7xx_fsm_ctl *ctl)
339{
340 struct t7xx_modem *md = ctl->md;
341 struct device *dev;
342
343 ctl->curr_state = FSM_STATE_STARTING;
344
345 t7xx_fsm_broadcast_state(ctl, state: MD_STATE_WAITING_FOR_HS1);
346 t7xx_md_event_notify(md, evt_id: FSM_START);
347
348 wait_event_interruptible_timeout(ctl->async_hk_wq,
349 (md->core_md.ready && md->core_ap.ready) ||
350 ctl->exp_flg, HZ * 60);
351 dev = &md->t7xx_dev->pdev->dev;
352
353 if (ctl->exp_flg)
354 dev_err(dev, "MD exception is captured during handshake\n");
355
356 if (!md->core_md.ready) {
357 dev_err(dev, "MD handshake timeout\n");
358 if (md->core_md.handshake_ongoing)
359 t7xx_fsm_append_event(ctl, event_id: FSM_EVENT_MD_HS2_EXIT, NULL, length: 0);
360
361 fsm_routine_exception(ctl, NULL, reason: EXCEPTION_HS_TIMEOUT);
362 return -ETIMEDOUT;
363 } else if (!md->core_ap.ready) {
364 dev_err(dev, "AP handshake timeout\n");
365 if (md->core_ap.handshake_ongoing)
366 t7xx_fsm_append_event(ctl, event_id: FSM_EVENT_AP_HS2_EXIT, NULL, length: 0);
367
368 fsm_routine_exception(ctl, NULL, reason: EXCEPTION_HS_TIMEOUT);
369 return -ETIMEDOUT;
370 }
371
372 t7xx_pci_pm_init_late(t7xx_dev: md->t7xx_dev);
373 fsm_routine_ready(ctl);
374 return 0;
375}
376
377static void fsm_routine_start(struct t7xx_fsm_ctl *ctl, struct t7xx_fsm_command *cmd)
378{
379 struct t7xx_modem *md = ctl->md;
380 struct device *dev;
381 u32 status;
382 int ret;
383
384 if (!md)
385 return;
386
387 if (ctl->curr_state != FSM_STATE_INIT && ctl->curr_state != FSM_STATE_PRE_START &&
388 ctl->curr_state != FSM_STATE_STOPPED) {
389 fsm_finish_command(ctl, cmd, result: -EINVAL);
390 return;
391 }
392
393 dev = &md->t7xx_dev->pdev->dev;
394 ctl->curr_state = FSM_STATE_PRE_START;
395 t7xx_md_event_notify(md, evt_id: FSM_PRE_START);
396
397 ret = wait_for_expected_dev_stage(status);
398
399 if (ret) {
400 dev_err(dev, "read poll timeout %d\n", ret);
401 goto finish_command;
402 }
403
404 if (status != ctl->status || cmd->flag != 0) {
405 u32 stage = FIELD_GET(MISC_STAGE_MASK, status);
406
407 switch (stage) {
408 case T7XX_DEV_STAGE_INIT:
409 case T7XX_DEV_STAGE_BROM_PRE:
410 case T7XX_DEV_STAGE_BROM_POST:
411 dev_dbg(dev, "BROM_STAGE Entered\n");
412 ret = t7xx_fsm_append_cmd(ctl, cmd_id: FSM_CMD_START, flag: 0);
413 break;
414
415 case T7XX_DEV_STAGE_LK:
416 dev_dbg(dev, "LK_STAGE Entered\n");
417 t7xx_lk_stage_event_handling(ctl, status);
418 break;
419
420 case T7XX_DEV_STAGE_LINUX:
421 dev_dbg(dev, "LINUX_STAGE Entered\n");
422 t7xx_mhccif_mask_clr(t7xx_dev: md->t7xx_dev, D2H_INT_PORT_ENUM |
423 D2H_INT_ASYNC_MD_HK | D2H_INT_ASYNC_AP_HK);
424 if (cmd->flag == 0)
425 break;
426 t7xx_cldma_hif_hw_init(md_ctrl: md->md_ctrl[CLDMA_ID_AP]);
427 t7xx_cldma_hif_hw_init(md_ctrl: md->md_ctrl[CLDMA_ID_MD]);
428 t7xx_port_proxy_set_cfg(md, cfg_id: PORT_CFG_ID_NORMAL);
429 ret = fsm_routine_starting(ctl);
430 break;
431
432 default:
433 break;
434 }
435 ctl->status = status;
436 }
437
438finish_command:
439 fsm_finish_command(ctl, cmd, result: ret);
440}
441
442static int fsm_main_thread(void *data)
443{
444 struct t7xx_fsm_ctl *ctl = data;
445 struct t7xx_fsm_command *cmd;
446 unsigned long flags;
447
448 while (!kthread_should_stop()) {
449 if (wait_event_interruptible(ctl->command_wq, !list_empty(&ctl->command_queue) ||
450 kthread_should_stop()))
451 continue;
452
453 if (kthread_should_stop())
454 break;
455
456 spin_lock_irqsave(&ctl->command_lock, flags);
457 cmd = list_first_entry(&ctl->command_queue, struct t7xx_fsm_command, entry);
458 list_del(entry: &cmd->entry);
459 spin_unlock_irqrestore(lock: &ctl->command_lock, flags);
460
461 switch (cmd->cmd_id) {
462 case FSM_CMD_START:
463 fsm_routine_start(ctl, cmd);
464 break;
465
466 case FSM_CMD_EXCEPTION:
467 fsm_routine_exception(ctl, cmd, FIELD_GET(FSM_CMD_EX_REASON, cmd->flag));
468 break;
469
470 case FSM_CMD_PRE_STOP:
471 fsm_routine_stopping(ctl, cmd);
472 break;
473
474 case FSM_CMD_STOP:
475 fsm_routine_stopped(ctl, cmd);
476 break;
477
478 default:
479 fsm_finish_command(ctl, cmd, result: -EINVAL);
480 fsm_flush_event_cmd_qs(ctl);
481 break;
482 }
483 }
484
485 return 0;
486}
487
488int t7xx_fsm_append_cmd(struct t7xx_fsm_ctl *ctl, enum t7xx_fsm_cmd_state cmd_id, unsigned int flag)
489{
490 DECLARE_COMPLETION_ONSTACK(done);
491 struct t7xx_fsm_command *cmd;
492 unsigned long flags;
493 int ret;
494
495 cmd = kzalloc(size: sizeof(*cmd), flags: flag & FSM_CMD_FLAG_IN_INTERRUPT ? GFP_ATOMIC : GFP_KERNEL);
496 if (!cmd)
497 return -ENOMEM;
498
499 INIT_LIST_HEAD(list: &cmd->entry);
500 cmd->cmd_id = cmd_id;
501 cmd->flag = flag;
502 if (flag & FSM_CMD_FLAG_WAIT_FOR_COMPLETION) {
503 cmd->done = &done;
504 cmd->ret = &ret;
505 }
506
507 spin_lock_irqsave(&ctl->command_lock, flags);
508 list_add_tail(new: &cmd->entry, head: &ctl->command_queue);
509 spin_unlock_irqrestore(lock: &ctl->command_lock, flags);
510
511 wake_up(&ctl->command_wq);
512
513 if (flag & FSM_CMD_FLAG_WAIT_FOR_COMPLETION) {
514 unsigned long wait_ret;
515
516 wait_ret = wait_for_completion_timeout(x: &done,
517 timeout: msecs_to_jiffies(FSM_CMD_TIMEOUT_MS));
518 if (!wait_ret)
519 return -ETIMEDOUT;
520
521 return ret;
522 }
523
524 return 0;
525}
526
527int t7xx_fsm_append_event(struct t7xx_fsm_ctl *ctl, enum t7xx_fsm_event_state event_id,
528 unsigned char *data, unsigned int length)
529{
530 struct device *dev = &ctl->md->t7xx_dev->pdev->dev;
531 struct t7xx_fsm_event *event;
532 unsigned long flags;
533
534 if (event_id <= FSM_EVENT_INVALID || event_id >= FSM_EVENT_MAX) {
535 dev_err(dev, "Invalid event %d\n", event_id);
536 return -EINVAL;
537 }
538
539 event = kmalloc(struct_size(event, data, length),
540 in_interrupt() ? GFP_ATOMIC : GFP_KERNEL);
541 if (!event)
542 return -ENOMEM;
543
544 INIT_LIST_HEAD(list: &event->entry);
545 event->event_id = event_id;
546 event->length = length;
547
548 if (data && length)
549 memcpy(event->data, data, length);
550
551 spin_lock_irqsave(&ctl->event_lock, flags);
552 list_add_tail(new: &event->entry, head: &ctl->event_queue);
553 spin_unlock_irqrestore(lock: &ctl->event_lock, flags);
554
555 wake_up_all(&ctl->event_wq);
556 return 0;
557}
558
559void t7xx_fsm_clr_event(struct t7xx_fsm_ctl *ctl, enum t7xx_fsm_event_state event_id)
560{
561 struct t7xx_fsm_event *event, *evt_next;
562 unsigned long flags;
563
564 spin_lock_irqsave(&ctl->event_lock, flags);
565 list_for_each_entry_safe(event, evt_next, &ctl->event_queue, entry) {
566 if (event->event_id == event_id)
567 fsm_del_kf_event(event);
568 }
569 spin_unlock_irqrestore(lock: &ctl->event_lock, flags);
570}
571
572enum md_state t7xx_fsm_get_md_state(struct t7xx_fsm_ctl *ctl)
573{
574 if (ctl)
575 return ctl->md_state;
576
577 return MD_STATE_INVALID;
578}
579
580unsigned int t7xx_fsm_get_ctl_state(struct t7xx_fsm_ctl *ctl)
581{
582 if (ctl)
583 return ctl->curr_state;
584
585 return FSM_STATE_STOPPED;
586}
587
588int t7xx_fsm_recv_md_intr(struct t7xx_fsm_ctl *ctl, enum t7xx_md_irq_type type)
589{
590 unsigned int cmd_flags = FSM_CMD_FLAG_IN_INTERRUPT;
591
592 if (type == MD_IRQ_PORT_ENUM) {
593 return t7xx_fsm_append_cmd(ctl, cmd_id: FSM_CMD_START, flag: cmd_flags);
594 } else if (type == MD_IRQ_CCIF_EX) {
595 ctl->exp_flg = true;
596 wake_up(&ctl->async_hk_wq);
597 cmd_flags |= FIELD_PREP(FSM_CMD_EX_REASON, EXCEPTION_EVENT);
598 return t7xx_fsm_append_cmd(ctl, cmd_id: FSM_CMD_EXCEPTION, flag: cmd_flags);
599 }
600
601 return -EINVAL;
602}
603
604void t7xx_fsm_reset(struct t7xx_modem *md)
605{
606 struct t7xx_fsm_ctl *ctl = md->fsm_ctl;
607
608 fsm_flush_event_cmd_qs(ctl);
609 ctl->curr_state = FSM_STATE_STOPPED;
610 ctl->exp_flg = false;
611 ctl->status = T7XX_DEV_STAGE_INIT;
612}
613
614int t7xx_fsm_init(struct t7xx_modem *md)
615{
616 struct device *dev = &md->t7xx_dev->pdev->dev;
617 struct t7xx_fsm_ctl *ctl;
618
619 ctl = devm_kzalloc(dev, size: sizeof(*ctl), GFP_KERNEL);
620 if (!ctl)
621 return -ENOMEM;
622
623 md->fsm_ctl = ctl;
624 ctl->md = md;
625 ctl->curr_state = FSM_STATE_INIT;
626 INIT_LIST_HEAD(list: &ctl->command_queue);
627 INIT_LIST_HEAD(list: &ctl->event_queue);
628 init_waitqueue_head(&ctl->async_hk_wq);
629 init_waitqueue_head(&ctl->event_wq);
630 INIT_LIST_HEAD(list: &ctl->notifier_list);
631 init_waitqueue_head(&ctl->command_wq);
632 spin_lock_init(&ctl->event_lock);
633 spin_lock_init(&ctl->command_lock);
634 ctl->exp_flg = false;
635 spin_lock_init(&ctl->notifier_lock);
636
637 ctl->fsm_thread = kthread_run(fsm_main_thread, ctl, "t7xx_fsm");
638 return PTR_ERR_OR_ZERO(ptr: ctl->fsm_thread);
639}
640
641void t7xx_fsm_uninit(struct t7xx_modem *md)
642{
643 struct t7xx_fsm_ctl *ctl = md->fsm_ctl;
644
645 if (!ctl)
646 return;
647
648 if (ctl->fsm_thread)
649 kthread_stop(k: ctl->fsm_thread);
650
651 fsm_flush_event_cmd_qs(ctl);
652}
653

source code of linux/drivers/net/wwan/t7xx/t7xx_state_monitor.c