1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (c) 2021, MediaTek Inc.
4 * Copyright (c) 2021-2022, Intel Corporation.
5 *
6 * Authors:
7 * Haijun Liu <haijun.liu@mediatek.com>
8 * Ricardo Martinez <ricardo.martinez@linux.intel.com>
9 * Sreehari Kancharla <sreehari.kancharla@intel.com>
10 *
11 * Contributors:
12 * Amir Hanania <amir.hanania@intel.com>
13 * Andy Shevchenko <andriy.shevchenko@linux.intel.com>
14 * Chiranjeevi Rapolu <chiranjeevi.rapolu@intel.com>
15 * Eliot Lee <eliot.lee@intel.com>
16 * Moises Veleta <moises.veleta@intel.com>
17 */
18
19 #include <linux/atomic.h>
20 #include <linux/bits.h>
21 #include <linux/completion.h>
22 #include <linux/device.h>
23 #include <linux/dma-mapping.h>
24 #include <linux/gfp.h>
25 #include <linux/interrupt.h>
26 #include <linux/io.h>
27 #include <linux/iopoll.h>
28 #include <linux/jiffies.h>
29 #include <linux/list.h>
30 #include <linux/module.h>
31 #include <linux/mutex.h>
32 #include <linux/pci.h>
33 #include <linux/pm.h>
34 #include <linux/pm_runtime.h>
35 #include <linux/pm_wakeup.h>
36 #include <linux/spinlock.h>
37
38 #include "t7xx_mhccif.h"
39 #include "t7xx_modem_ops.h"
40 #include "t7xx_pci.h"
41 #include "t7xx_pcie_mac.h"
42 #include "t7xx_reg.h"
43 #include "t7xx_state_monitor.h"
44
45 #define T7XX_PCI_IREG_BASE 0
46 #define T7XX_PCI_EREG_BASE 2
47
48 #define PM_SLEEP_DIS_TIMEOUT_MS 20
49 #define PM_ACK_TIMEOUT_MS 1500
50 #define PM_AUTOSUSPEND_MS 20000
51 #define PM_RESOURCE_POLL_TIMEOUT_US 10000
52 #define PM_RESOURCE_POLL_STEP_US 100
53
54 enum t7xx_pm_state {
55 MTK_PM_EXCEPTION,
56 MTK_PM_INIT, /* Device initialized, but handshake not completed */
57 MTK_PM_SUSPENDED,
58 MTK_PM_RESUMED,
59 };
60
t7xx_dev_set_sleep_capability(struct t7xx_pci_dev * t7xx_dev,bool enable)61 static void t7xx_dev_set_sleep_capability(struct t7xx_pci_dev *t7xx_dev, bool enable)
62 {
63 void __iomem *ctrl_reg = IREG_BASE(t7xx_dev) + T7XX_PCIE_MISC_CTRL;
64 u32 value;
65
66 value = ioread32(ctrl_reg);
67
68 if (enable)
69 value &= ~T7XX_PCIE_MISC_MAC_SLEEP_DIS;
70 else
71 value |= T7XX_PCIE_MISC_MAC_SLEEP_DIS;
72
73 iowrite32(value, ctrl_reg);
74 }
75
t7xx_wait_pm_config(struct t7xx_pci_dev * t7xx_dev)76 static int t7xx_wait_pm_config(struct t7xx_pci_dev *t7xx_dev)
77 {
78 int ret, val;
79
80 ret = read_poll_timeout(ioread32, val,
81 (val & T7XX_PCIE_RESOURCE_STS_MSK) == T7XX_PCIE_RESOURCE_STS_MSK,
82 PM_RESOURCE_POLL_STEP_US, PM_RESOURCE_POLL_TIMEOUT_US, true,
83 IREG_BASE(t7xx_dev) + T7XX_PCIE_RESOURCE_STATUS);
84 if (ret == -ETIMEDOUT)
85 dev_err(&t7xx_dev->pdev->dev, "PM configuration timed out\n");
86
87 return ret;
88 }
89
t7xx_pci_pm_init(struct t7xx_pci_dev * t7xx_dev)90 static int t7xx_pci_pm_init(struct t7xx_pci_dev *t7xx_dev)
91 {
92 struct pci_dev *pdev = t7xx_dev->pdev;
93
94 INIT_LIST_HEAD(&t7xx_dev->md_pm_entities);
95 mutex_init(&t7xx_dev->md_pm_entity_mtx);
96 spin_lock_init(&t7xx_dev->md_pm_lock);
97 init_completion(&t7xx_dev->sleep_lock_acquire);
98 init_completion(&t7xx_dev->pm_sr_ack);
99 atomic_set(&t7xx_dev->md_pm_state, MTK_PM_INIT);
100
101 device_init_wakeup(&pdev->dev, true);
102 dev_pm_set_driver_flags(&pdev->dev, pdev->dev.power.driver_flags |
103 DPM_FLAG_NO_DIRECT_COMPLETE);
104
105 iowrite32(T7XX_L1_BIT(0), IREG_BASE(t7xx_dev) + DISABLE_ASPM_LOWPWR);
106 pm_runtime_set_autosuspend_delay(&pdev->dev, PM_AUTOSUSPEND_MS);
107 pm_runtime_use_autosuspend(&pdev->dev);
108
109 return t7xx_wait_pm_config(t7xx_dev);
110 }
111
t7xx_pci_pm_init_late(struct t7xx_pci_dev * t7xx_dev)112 void t7xx_pci_pm_init_late(struct t7xx_pci_dev *t7xx_dev)
113 {
114 /* Enable the PCIe resource lock only after MD deep sleep is done */
115 t7xx_mhccif_mask_clr(t7xx_dev,
116 D2H_INT_DS_LOCK_ACK |
117 D2H_INT_SUSPEND_ACK |
118 D2H_INT_RESUME_ACK |
119 D2H_INT_SUSPEND_ACK_AP |
120 D2H_INT_RESUME_ACK_AP);
121 iowrite32(T7XX_L1_BIT(0), IREG_BASE(t7xx_dev) + ENABLE_ASPM_LOWPWR);
122 atomic_set(&t7xx_dev->md_pm_state, MTK_PM_RESUMED);
123
124 pm_runtime_mark_last_busy(&t7xx_dev->pdev->dev);
125 pm_runtime_allow(&t7xx_dev->pdev->dev);
126 pm_runtime_put_noidle(&t7xx_dev->pdev->dev);
127 }
128
t7xx_pci_pm_reinit(struct t7xx_pci_dev * t7xx_dev)129 static int t7xx_pci_pm_reinit(struct t7xx_pci_dev *t7xx_dev)
130 {
131 /* The device is kept in FSM re-init flow
132 * so just roll back PM setting to the init setting.
133 */
134 atomic_set(&t7xx_dev->md_pm_state, MTK_PM_INIT);
135
136 pm_runtime_get_noresume(&t7xx_dev->pdev->dev);
137
138 iowrite32(T7XX_L1_BIT(0), IREG_BASE(t7xx_dev) + DISABLE_ASPM_LOWPWR);
139 return t7xx_wait_pm_config(t7xx_dev);
140 }
141
t7xx_pci_pm_exp_detected(struct t7xx_pci_dev * t7xx_dev)142 void t7xx_pci_pm_exp_detected(struct t7xx_pci_dev *t7xx_dev)
143 {
144 iowrite32(T7XX_L1_BIT(0), IREG_BASE(t7xx_dev) + DISABLE_ASPM_LOWPWR);
145 t7xx_wait_pm_config(t7xx_dev);
146 atomic_set(&t7xx_dev->md_pm_state, MTK_PM_EXCEPTION);
147 }
148
t7xx_pci_pm_entity_register(struct t7xx_pci_dev * t7xx_dev,struct md_pm_entity * pm_entity)149 int t7xx_pci_pm_entity_register(struct t7xx_pci_dev *t7xx_dev, struct md_pm_entity *pm_entity)
150 {
151 struct md_pm_entity *entity;
152
153 mutex_lock(&t7xx_dev->md_pm_entity_mtx);
154 list_for_each_entry(entity, &t7xx_dev->md_pm_entities, entity) {
155 if (entity->id == pm_entity->id) {
156 mutex_unlock(&t7xx_dev->md_pm_entity_mtx);
157 return -EEXIST;
158 }
159 }
160
161 list_add_tail(&pm_entity->entity, &t7xx_dev->md_pm_entities);
162 mutex_unlock(&t7xx_dev->md_pm_entity_mtx);
163 return 0;
164 }
165
t7xx_pci_pm_entity_unregister(struct t7xx_pci_dev * t7xx_dev,struct md_pm_entity * pm_entity)166 int t7xx_pci_pm_entity_unregister(struct t7xx_pci_dev *t7xx_dev, struct md_pm_entity *pm_entity)
167 {
168 struct md_pm_entity *entity, *tmp_entity;
169
170 mutex_lock(&t7xx_dev->md_pm_entity_mtx);
171 list_for_each_entry_safe(entity, tmp_entity, &t7xx_dev->md_pm_entities, entity) {
172 if (entity->id == pm_entity->id) {
173 list_del(&pm_entity->entity);
174 mutex_unlock(&t7xx_dev->md_pm_entity_mtx);
175 return 0;
176 }
177 }
178
179 mutex_unlock(&t7xx_dev->md_pm_entity_mtx);
180
181 return -ENXIO;
182 }
183
t7xx_pci_sleep_disable_complete(struct t7xx_pci_dev * t7xx_dev)184 int t7xx_pci_sleep_disable_complete(struct t7xx_pci_dev *t7xx_dev)
185 {
186 struct device *dev = &t7xx_dev->pdev->dev;
187 int ret;
188
189 ret = wait_for_completion_timeout(&t7xx_dev->sleep_lock_acquire,
190 msecs_to_jiffies(PM_SLEEP_DIS_TIMEOUT_MS));
191 if (!ret)
192 dev_err_ratelimited(dev, "Resource wait complete timed out\n");
193
194 return ret;
195 }
196
197 /**
198 * t7xx_pci_disable_sleep() - Disable deep sleep capability.
199 * @t7xx_dev: MTK device.
200 *
201 * Lock the deep sleep capability, note that the device can still go into deep sleep
202 * state while device is in D0 state, from the host's point-of-view.
203 *
204 * If device is in deep sleep state, wake up the device and disable deep sleep capability.
205 */
t7xx_pci_disable_sleep(struct t7xx_pci_dev * t7xx_dev)206 void t7xx_pci_disable_sleep(struct t7xx_pci_dev *t7xx_dev)
207 {
208 unsigned long flags;
209
210 spin_lock_irqsave(&t7xx_dev->md_pm_lock, flags);
211 t7xx_dev->sleep_disable_count++;
212 if (atomic_read(&t7xx_dev->md_pm_state) < MTK_PM_RESUMED)
213 goto unlock_and_complete;
214
215 if (t7xx_dev->sleep_disable_count == 1) {
216 u32 status;
217
218 reinit_completion(&t7xx_dev->sleep_lock_acquire);
219 t7xx_dev_set_sleep_capability(t7xx_dev, false);
220
221 status = ioread32(IREG_BASE(t7xx_dev) + T7XX_PCIE_RESOURCE_STATUS);
222 if (status & T7XX_PCIE_RESOURCE_STS_MSK)
223 goto unlock_and_complete;
224
225 t7xx_mhccif_h2d_swint_trigger(t7xx_dev, H2D_CH_DS_LOCK);
226 }
227 spin_unlock_irqrestore(&t7xx_dev->md_pm_lock, flags);
228 return;
229
230 unlock_and_complete:
231 spin_unlock_irqrestore(&t7xx_dev->md_pm_lock, flags);
232 complete_all(&t7xx_dev->sleep_lock_acquire);
233 }
234
235 /**
236 * t7xx_pci_enable_sleep() - Enable deep sleep capability.
237 * @t7xx_dev: MTK device.
238 *
239 * After enabling deep sleep, device can enter into deep sleep state.
240 */
t7xx_pci_enable_sleep(struct t7xx_pci_dev * t7xx_dev)241 void t7xx_pci_enable_sleep(struct t7xx_pci_dev *t7xx_dev)
242 {
243 unsigned long flags;
244
245 spin_lock_irqsave(&t7xx_dev->md_pm_lock, flags);
246 t7xx_dev->sleep_disable_count--;
247 if (atomic_read(&t7xx_dev->md_pm_state) < MTK_PM_RESUMED)
248 goto unlock;
249
250 if (t7xx_dev->sleep_disable_count == 0)
251 t7xx_dev_set_sleep_capability(t7xx_dev, true);
252
253 unlock:
254 spin_unlock_irqrestore(&t7xx_dev->md_pm_lock, flags);
255 }
256
t7xx_send_pm_request(struct t7xx_pci_dev * t7xx_dev,u32 request)257 static int t7xx_send_pm_request(struct t7xx_pci_dev *t7xx_dev, u32 request)
258 {
259 unsigned long wait_ret;
260
261 reinit_completion(&t7xx_dev->pm_sr_ack);
262 t7xx_mhccif_h2d_swint_trigger(t7xx_dev, request);
263 wait_ret = wait_for_completion_timeout(&t7xx_dev->pm_sr_ack,
264 msecs_to_jiffies(PM_ACK_TIMEOUT_MS));
265 if (!wait_ret)
266 return -ETIMEDOUT;
267
268 return 0;
269 }
270
__t7xx_pci_pm_suspend(struct pci_dev * pdev)271 static int __t7xx_pci_pm_suspend(struct pci_dev *pdev)
272 {
273 enum t7xx_pm_id entity_id = PM_ENTITY_ID_INVALID;
274 struct t7xx_pci_dev *t7xx_dev;
275 struct md_pm_entity *entity;
276 int ret;
277
278 t7xx_dev = pci_get_drvdata(pdev);
279 if (atomic_read(&t7xx_dev->md_pm_state) <= MTK_PM_INIT) {
280 dev_err(&pdev->dev, "[PM] Exiting suspend, modem in invalid state\n");
281 return -EFAULT;
282 }
283
284 iowrite32(T7XX_L1_BIT(0), IREG_BASE(t7xx_dev) + DISABLE_ASPM_LOWPWR);
285 ret = t7xx_wait_pm_config(t7xx_dev);
286 if (ret) {
287 iowrite32(T7XX_L1_BIT(0), IREG_BASE(t7xx_dev) + ENABLE_ASPM_LOWPWR);
288 return ret;
289 }
290
291 atomic_set(&t7xx_dev->md_pm_state, MTK_PM_SUSPENDED);
292 t7xx_pcie_mac_clear_int(t7xx_dev, SAP_RGU_INT);
293 t7xx_dev->rgu_pci_irq_en = false;
294
295 list_for_each_entry(entity, &t7xx_dev->md_pm_entities, entity) {
296 if (!entity->suspend)
297 continue;
298
299 ret = entity->suspend(t7xx_dev, entity->entity_param);
300 if (ret) {
301 entity_id = entity->id;
302 dev_err(&pdev->dev, "[PM] Suspend error: %d, id: %d\n", ret, entity_id);
303 goto abort_suspend;
304 }
305 }
306
307 ret = t7xx_send_pm_request(t7xx_dev, H2D_CH_SUSPEND_REQ);
308 if (ret) {
309 dev_err(&pdev->dev, "[PM] MD suspend error: %d\n", ret);
310 goto abort_suspend;
311 }
312
313 ret = t7xx_send_pm_request(t7xx_dev, H2D_CH_SUSPEND_REQ_AP);
314 if (ret) {
315 t7xx_send_pm_request(t7xx_dev, H2D_CH_RESUME_REQ);
316 dev_err(&pdev->dev, "[PM] SAP suspend error: %d\n", ret);
317 goto abort_suspend;
318 }
319
320 list_for_each_entry(entity, &t7xx_dev->md_pm_entities, entity) {
321 if (entity->suspend_late)
322 entity->suspend_late(t7xx_dev, entity->entity_param);
323 }
324
325 iowrite32(T7XX_L1_BIT(0), IREG_BASE(t7xx_dev) + ENABLE_ASPM_LOWPWR);
326 return 0;
327
328 abort_suspend:
329 list_for_each_entry(entity, &t7xx_dev->md_pm_entities, entity) {
330 if (entity_id == entity->id)
331 break;
332
333 if (entity->resume)
334 entity->resume(t7xx_dev, entity->entity_param);
335 }
336
337 iowrite32(T7XX_L1_BIT(0), IREG_BASE(t7xx_dev) + ENABLE_ASPM_LOWPWR);
338 atomic_set(&t7xx_dev->md_pm_state, MTK_PM_RESUMED);
339 t7xx_pcie_mac_set_int(t7xx_dev, SAP_RGU_INT);
340 return ret;
341 }
342
t7xx_pcie_interrupt_reinit(struct t7xx_pci_dev * t7xx_dev)343 static void t7xx_pcie_interrupt_reinit(struct t7xx_pci_dev *t7xx_dev)
344 {
345 t7xx_pcie_set_mac_msix_cfg(t7xx_dev, EXT_INT_NUM);
346
347 /* Disable interrupt first and let the IPs enable them */
348 iowrite32(MSIX_MSK_SET_ALL, IREG_BASE(t7xx_dev) + IMASK_HOST_MSIX_CLR_GRP0_0);
349
350 /* Device disables PCIe interrupts during resume and
351 * following function will re-enable PCIe interrupts.
352 */
353 t7xx_pcie_mac_interrupts_en(t7xx_dev);
354 t7xx_pcie_mac_set_int(t7xx_dev, MHCCIF_INT);
355 }
356
t7xx_pcie_reinit(struct t7xx_pci_dev * t7xx_dev,bool is_d3)357 static int t7xx_pcie_reinit(struct t7xx_pci_dev *t7xx_dev, bool is_d3)
358 {
359 int ret;
360
361 ret = pcim_enable_device(t7xx_dev->pdev);
362 if (ret)
363 return ret;
364
365 t7xx_pcie_mac_atr_init(t7xx_dev);
366 t7xx_pcie_interrupt_reinit(t7xx_dev);
367
368 if (is_d3) {
369 t7xx_mhccif_init(t7xx_dev);
370 return t7xx_pci_pm_reinit(t7xx_dev);
371 }
372
373 return 0;
374 }
375
t7xx_send_fsm_command(struct t7xx_pci_dev * t7xx_dev,u32 event)376 static int t7xx_send_fsm_command(struct t7xx_pci_dev *t7xx_dev, u32 event)
377 {
378 struct t7xx_fsm_ctl *fsm_ctl = t7xx_dev->md->fsm_ctl;
379 struct device *dev = &t7xx_dev->pdev->dev;
380 int ret = -EINVAL;
381
382 switch (event) {
383 case FSM_CMD_STOP:
384 ret = t7xx_fsm_append_cmd(fsm_ctl, FSM_CMD_STOP, FSM_CMD_FLAG_WAIT_FOR_COMPLETION);
385 break;
386
387 case FSM_CMD_START:
388 t7xx_pcie_mac_clear_int(t7xx_dev, SAP_RGU_INT);
389 t7xx_pcie_mac_clear_int_status(t7xx_dev, SAP_RGU_INT);
390 t7xx_dev->rgu_pci_irq_en = true;
391 t7xx_pcie_mac_set_int(t7xx_dev, SAP_RGU_INT);
392 ret = t7xx_fsm_append_cmd(fsm_ctl, FSM_CMD_START, 0);
393 break;
394
395 default:
396 break;
397 }
398
399 if (ret)
400 dev_err(dev, "Failure handling FSM command %u, %d\n", event, ret);
401
402 return ret;
403 }
404
__t7xx_pci_pm_resume(struct pci_dev * pdev,bool state_check)405 static int __t7xx_pci_pm_resume(struct pci_dev *pdev, bool state_check)
406 {
407 struct t7xx_pci_dev *t7xx_dev;
408 struct md_pm_entity *entity;
409 u32 prev_state;
410 int ret = 0;
411
412 t7xx_dev = pci_get_drvdata(pdev);
413 if (atomic_read(&t7xx_dev->md_pm_state) <= MTK_PM_INIT) {
414 iowrite32(T7XX_L1_BIT(0), IREG_BASE(t7xx_dev) + ENABLE_ASPM_LOWPWR);
415 return 0;
416 }
417
418 t7xx_pcie_mac_interrupts_en(t7xx_dev);
419 prev_state = ioread32(IREG_BASE(t7xx_dev) + T7XX_PCIE_PM_RESUME_STATE);
420
421 if (state_check) {
422 /* For D3/L3 resume, the device could boot so quickly that the
423 * initial value of the dummy register might be overwritten.
424 * Identify new boots if the ATR source address register is not initialized.
425 */
426 u32 atr_reg_val = ioread32(IREG_BASE(t7xx_dev) +
427 ATR_PCIE_WIN0_T0_ATR_PARAM_SRC_ADDR);
428 if (prev_state == PM_RESUME_REG_STATE_L3 ||
429 (prev_state == PM_RESUME_REG_STATE_INIT &&
430 atr_reg_val == ATR_SRC_ADDR_INVALID)) {
431 ret = t7xx_send_fsm_command(t7xx_dev, FSM_CMD_STOP);
432 if (ret)
433 return ret;
434
435 ret = t7xx_pcie_reinit(t7xx_dev, true);
436 if (ret)
437 return ret;
438
439 t7xx_clear_rgu_irq(t7xx_dev);
440 return t7xx_send_fsm_command(t7xx_dev, FSM_CMD_START);
441 }
442
443 if (prev_state == PM_RESUME_REG_STATE_EXP ||
444 prev_state == PM_RESUME_REG_STATE_L2_EXP) {
445 if (prev_state == PM_RESUME_REG_STATE_L2_EXP) {
446 ret = t7xx_pcie_reinit(t7xx_dev, false);
447 if (ret)
448 return ret;
449 }
450
451 atomic_set(&t7xx_dev->md_pm_state, MTK_PM_SUSPENDED);
452 t7xx_dev->rgu_pci_irq_en = true;
453 t7xx_pcie_mac_set_int(t7xx_dev, SAP_RGU_INT);
454
455 t7xx_mhccif_mask_clr(t7xx_dev,
456 D2H_INT_EXCEPTION_INIT |
457 D2H_INT_EXCEPTION_INIT_DONE |
458 D2H_INT_EXCEPTION_CLEARQ_DONE |
459 D2H_INT_EXCEPTION_ALLQ_RESET |
460 D2H_INT_PORT_ENUM);
461
462 return ret;
463 }
464
465 if (prev_state == PM_RESUME_REG_STATE_L2) {
466 ret = t7xx_pcie_reinit(t7xx_dev, false);
467 if (ret)
468 return ret;
469
470 } else if (prev_state != PM_RESUME_REG_STATE_L1 &&
471 prev_state != PM_RESUME_REG_STATE_INIT) {
472 ret = t7xx_send_fsm_command(t7xx_dev, FSM_CMD_STOP);
473 if (ret)
474 return ret;
475
476 t7xx_clear_rgu_irq(t7xx_dev);
477 atomic_set(&t7xx_dev->md_pm_state, MTK_PM_SUSPENDED);
478 return 0;
479 }
480 }
481
482 iowrite32(T7XX_L1_BIT(0), IREG_BASE(t7xx_dev) + DISABLE_ASPM_LOWPWR);
483 t7xx_wait_pm_config(t7xx_dev);
484
485 list_for_each_entry(entity, &t7xx_dev->md_pm_entities, entity) {
486 if (entity->resume_early)
487 entity->resume_early(t7xx_dev, entity->entity_param);
488 }
489
490 ret = t7xx_send_pm_request(t7xx_dev, H2D_CH_RESUME_REQ);
491 if (ret)
492 dev_err(&pdev->dev, "[PM] MD resume error: %d\n", ret);
493
494 ret = t7xx_send_pm_request(t7xx_dev, H2D_CH_RESUME_REQ_AP);
495 if (ret)
496 dev_err(&pdev->dev, "[PM] SAP resume error: %d\n", ret);
497
498 list_for_each_entry(entity, &t7xx_dev->md_pm_entities, entity) {
499 if (entity->resume) {
500 ret = entity->resume(t7xx_dev, entity->entity_param);
501 if (ret)
502 dev_err(&pdev->dev, "[PM] Resume entry ID: %d error: %d\n",
503 entity->id, ret);
504 }
505 }
506
507 t7xx_dev->rgu_pci_irq_en = true;
508 t7xx_pcie_mac_set_int(t7xx_dev, SAP_RGU_INT);
509 iowrite32(T7XX_L1_BIT(0), IREG_BASE(t7xx_dev) + ENABLE_ASPM_LOWPWR);
510 pm_runtime_mark_last_busy(&pdev->dev);
511 atomic_set(&t7xx_dev->md_pm_state, MTK_PM_RESUMED);
512
513 return ret;
514 }
515
t7xx_pci_pm_resume_noirq(struct device * dev)516 static int t7xx_pci_pm_resume_noirq(struct device *dev)
517 {
518 struct pci_dev *pdev = to_pci_dev(dev);
519 struct t7xx_pci_dev *t7xx_dev;
520
521 t7xx_dev = pci_get_drvdata(pdev);
522 t7xx_pcie_mac_interrupts_dis(t7xx_dev);
523
524 return 0;
525 }
526
t7xx_pci_shutdown(struct pci_dev * pdev)527 static void t7xx_pci_shutdown(struct pci_dev *pdev)
528 {
529 __t7xx_pci_pm_suspend(pdev);
530 }
531
t7xx_pci_pm_suspend(struct device * dev)532 static int t7xx_pci_pm_suspend(struct device *dev)
533 {
534 return __t7xx_pci_pm_suspend(to_pci_dev(dev));
535 }
536
t7xx_pci_pm_resume(struct device * dev)537 static int t7xx_pci_pm_resume(struct device *dev)
538 {
539 return __t7xx_pci_pm_resume(to_pci_dev(dev), true);
540 }
541
t7xx_pci_pm_thaw(struct device * dev)542 static int t7xx_pci_pm_thaw(struct device *dev)
543 {
544 return __t7xx_pci_pm_resume(to_pci_dev(dev), false);
545 }
546
t7xx_pci_pm_runtime_suspend(struct device * dev)547 static int t7xx_pci_pm_runtime_suspend(struct device *dev)
548 {
549 return __t7xx_pci_pm_suspend(to_pci_dev(dev));
550 }
551
t7xx_pci_pm_runtime_resume(struct device * dev)552 static int t7xx_pci_pm_runtime_resume(struct device *dev)
553 {
554 return __t7xx_pci_pm_resume(to_pci_dev(dev), true);
555 }
556
557 static const struct dev_pm_ops t7xx_pci_pm_ops = {
558 .suspend = t7xx_pci_pm_suspend,
559 .resume = t7xx_pci_pm_resume,
560 .resume_noirq = t7xx_pci_pm_resume_noirq,
561 .freeze = t7xx_pci_pm_suspend,
562 .thaw = t7xx_pci_pm_thaw,
563 .poweroff = t7xx_pci_pm_suspend,
564 .restore = t7xx_pci_pm_resume,
565 .restore_noirq = t7xx_pci_pm_resume_noirq,
566 .runtime_suspend = t7xx_pci_pm_runtime_suspend,
567 .runtime_resume = t7xx_pci_pm_runtime_resume
568 };
569
t7xx_request_irq(struct pci_dev * pdev)570 static int t7xx_request_irq(struct pci_dev *pdev)
571 {
572 struct t7xx_pci_dev *t7xx_dev;
573 int ret = 0, i;
574
575 t7xx_dev = pci_get_drvdata(pdev);
576
577 for (i = 0; i < EXT_INT_NUM; i++) {
578 const char *irq_descr;
579 int irq_vec;
580
581 if (!t7xx_dev->intr_handler[i])
582 continue;
583
584 irq_descr = devm_kasprintf(&pdev->dev, GFP_KERNEL, "%s_%d",
585 dev_driver_string(&pdev->dev), i);
586 if (!irq_descr) {
587 ret = -ENOMEM;
588 break;
589 }
590
591 irq_vec = pci_irq_vector(pdev, i);
592 ret = request_threaded_irq(irq_vec, t7xx_dev->intr_handler[i],
593 t7xx_dev->intr_thread[i], 0, irq_descr,
594 t7xx_dev->callback_param[i]);
595 if (ret) {
596 dev_err(&pdev->dev, "Failed to request IRQ: %d\n", ret);
597 break;
598 }
599 }
600
601 if (ret) {
602 while (i--) {
603 if (!t7xx_dev->intr_handler[i])
604 continue;
605
606 free_irq(pci_irq_vector(pdev, i), t7xx_dev->callback_param[i]);
607 }
608 }
609
610 return ret;
611 }
612
t7xx_setup_msix(struct t7xx_pci_dev * t7xx_dev)613 static int t7xx_setup_msix(struct t7xx_pci_dev *t7xx_dev)
614 {
615 struct pci_dev *pdev = t7xx_dev->pdev;
616 int ret;
617
618 /* Only using 6 interrupts, but HW-design requires power-of-2 IRQs allocation */
619 ret = pci_alloc_irq_vectors(pdev, EXT_INT_NUM, EXT_INT_NUM, PCI_IRQ_MSIX);
620 if (ret < 0) {
621 dev_err(&pdev->dev, "Failed to allocate MSI-X entry: %d\n", ret);
622 return ret;
623 }
624
625 ret = t7xx_request_irq(pdev);
626 if (ret) {
627 pci_free_irq_vectors(pdev);
628 return ret;
629 }
630
631 t7xx_pcie_set_mac_msix_cfg(t7xx_dev, EXT_INT_NUM);
632 return 0;
633 }
634
t7xx_interrupt_init(struct t7xx_pci_dev * t7xx_dev)635 static int t7xx_interrupt_init(struct t7xx_pci_dev *t7xx_dev)
636 {
637 int ret, i;
638
639 if (!t7xx_dev->pdev->msix_cap)
640 return -EINVAL;
641
642 ret = t7xx_setup_msix(t7xx_dev);
643 if (ret)
644 return ret;
645
646 /* IPs enable interrupts when ready */
647 for (i = 0; i < EXT_INT_NUM; i++)
648 t7xx_pcie_mac_set_int(t7xx_dev, i);
649
650 return 0;
651 }
652
t7xx_pci_infracfg_ao_calc(struct t7xx_pci_dev * t7xx_dev)653 static void t7xx_pci_infracfg_ao_calc(struct t7xx_pci_dev *t7xx_dev)
654 {
655 t7xx_dev->base_addr.infracfg_ao_base = t7xx_dev->base_addr.pcie_ext_reg_base +
656 INFRACFG_AO_DEV_CHIP -
657 t7xx_dev->base_addr.pcie_dev_reg_trsl_addr;
658 }
659
t7xx_pci_probe(struct pci_dev * pdev,const struct pci_device_id * id)660 static int t7xx_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
661 {
662 struct t7xx_pci_dev *t7xx_dev;
663 int ret;
664
665 t7xx_dev = devm_kzalloc(&pdev->dev, sizeof(*t7xx_dev), GFP_KERNEL);
666 if (!t7xx_dev)
667 return -ENOMEM;
668
669 pci_set_drvdata(pdev, t7xx_dev);
670 t7xx_dev->pdev = pdev;
671
672 ret = pcim_enable_device(pdev);
673 if (ret)
674 return ret;
675
676 pci_set_master(pdev);
677
678 ret = pcim_iomap_regions(pdev, BIT(T7XX_PCI_IREG_BASE) | BIT(T7XX_PCI_EREG_BASE),
679 pci_name(pdev));
680 if (ret) {
681 dev_err(&pdev->dev, "Could not request BARs: %d\n", ret);
682 return -ENOMEM;
683 }
684
685 ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
686 if (ret) {
687 dev_err(&pdev->dev, "Could not set PCI DMA mask: %d\n", ret);
688 return ret;
689 }
690
691 ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
692 if (ret) {
693 dev_err(&pdev->dev, "Could not set consistent PCI DMA mask: %d\n", ret);
694 return ret;
695 }
696
697 IREG_BASE(t7xx_dev) = pcim_iomap_table(pdev)[T7XX_PCI_IREG_BASE];
698 t7xx_dev->base_addr.pcie_ext_reg_base = pcim_iomap_table(pdev)[T7XX_PCI_EREG_BASE];
699
700 ret = t7xx_pci_pm_init(t7xx_dev);
701 if (ret)
702 return ret;
703
704 t7xx_pcie_mac_atr_init(t7xx_dev);
705 t7xx_pci_infracfg_ao_calc(t7xx_dev);
706 t7xx_mhccif_init(t7xx_dev);
707
708 ret = t7xx_md_init(t7xx_dev);
709 if (ret)
710 return ret;
711
712 t7xx_pcie_mac_interrupts_dis(t7xx_dev);
713
714 ret = t7xx_interrupt_init(t7xx_dev);
715 if (ret) {
716 t7xx_md_exit(t7xx_dev);
717 return ret;
718 }
719
720 t7xx_pcie_mac_set_int(t7xx_dev, MHCCIF_INT);
721 t7xx_pcie_mac_interrupts_en(t7xx_dev);
722
723 return 0;
724 }
725
t7xx_pci_remove(struct pci_dev * pdev)726 static void t7xx_pci_remove(struct pci_dev *pdev)
727 {
728 struct t7xx_pci_dev *t7xx_dev;
729 int i;
730
731 t7xx_dev = pci_get_drvdata(pdev);
732 t7xx_md_exit(t7xx_dev);
733
734 for (i = 0; i < EXT_INT_NUM; i++) {
735 if (!t7xx_dev->intr_handler[i])
736 continue;
737
738 free_irq(pci_irq_vector(pdev, i), t7xx_dev->callback_param[i]);
739 }
740
741 pci_free_irq_vectors(t7xx_dev->pdev);
742 }
743
744 static const struct pci_device_id t7xx_pci_table[] = {
745 { PCI_DEVICE(PCI_VENDOR_ID_MEDIATEK, 0x4d75) },
746 { }
747 };
748 MODULE_DEVICE_TABLE(pci, t7xx_pci_table);
749
750 static struct pci_driver t7xx_pci_driver = {
751 .name = "mtk_t7xx",
752 .id_table = t7xx_pci_table,
753 .probe = t7xx_pci_probe,
754 .remove = t7xx_pci_remove,
755 .driver.pm = &t7xx_pci_pm_ops,
756 .shutdown = t7xx_pci_shutdown,
757 };
758
759 module_pci_driver(t7xx_pci_driver);
760
761 MODULE_AUTHOR("MediaTek Inc");
762 MODULE_DESCRIPTION("MediaTek PCIe 5G WWAN modem T7xx driver");
763 MODULE_LICENSE("GPL");
764