1 /*
2  * Copyright (c) 2006-2022, RT-Thread Development Team
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  *
6  * Change Logs:
7  * Date           Author       Notes
8  * 2022-11-07     GuEe-GUI     first version
9  */
10 
11 #include <drivers/pci_msi.h>
12 #include <drivers/core/numa.h>
13 
14 #define DBG_TAG "pci.msi"
15 #define DBG_LVL DBG_INFO
16 #include <rtdbg.h>
17 
18 /* PCI has 2048 max IRQs in MSI-X */
19 static RT_IRQ_AFFINITY_DECLARE(msi_affinity_default[2048]) rt_section(".bss.noclean.pci.msi");
20 
spin_lock(struct rt_spinlock * lock)21 rt_inline void spin_lock(struct rt_spinlock *lock)
22 {
23     rt_hw_spin_lock(&lock->lock);
24 }
25 
spin_unlock(struct rt_spinlock * lock)26 rt_inline void spin_unlock(struct rt_spinlock *lock)
27 {
28     rt_hw_spin_unlock(&lock->lock);
29 }
30 
msix_table_base(struct rt_pci_msix_conf * msix)31 rt_inline void *msix_table_base(struct rt_pci_msix_conf *msix)
32 {
33     return msix->table_base + msix->index * PCIM_MSIX_ENTRY_SIZE;
34 }
35 
msix_vector_ctrl_base(struct rt_pci_msix_conf * msix)36 rt_inline void *msix_vector_ctrl_base(struct rt_pci_msix_conf *msix)
37 {
38     return msix_table_base(msix) + PCIM_MSIX_ENTRY_VECTOR_CTRL;
39 }
40 
msix_write_vector_ctrl(struct rt_pci_msix_conf * msix,rt_uint32_t ctrl)41 rt_inline void msix_write_vector_ctrl(struct rt_pci_msix_conf *msix,
42         rt_uint32_t ctrl)
43 {
44     void *vc_addr = msix_vector_ctrl_base(msix);
45 
46     HWREG32(vc_addr) = ctrl;
47 }
48 
msix_mask(struct rt_pci_msix_conf * msix)49 rt_inline void msix_mask(struct rt_pci_msix_conf *msix)
50 {
51     msix->msg_ctrl |= PCIM_MSIX_ENTRYVECTOR_CTRL_MASK;
52     msix_write_vector_ctrl(msix, msix->msg_ctrl);
53 
54     /* Flush write to device */
55     HWREG32(msix->table_base);
56 }
57 
msix_update_ctrl(struct rt_pci_device * pdev,rt_uint16_t clear,rt_uint16_t set)58 static void msix_update_ctrl(struct rt_pci_device *pdev,
59         rt_uint16_t clear, rt_uint16_t set)
60 {
61     rt_uint16_t msgctl;
62 
63     rt_pci_read_config_u16(pdev, pdev->msix_cap + PCIR_MSIX_CTRL, &msgctl);
64     msgctl &= ~clear;
65     msgctl |= set;
66     rt_pci_write_config_u16(pdev, pdev->msix_cap + PCIR_MSIX_CTRL, msgctl);
67 }
68 
msix_unmask(struct rt_pci_msix_conf * msix)69 rt_inline void msix_unmask(struct rt_pci_msix_conf *msix)
70 {
71     msix->msg_ctrl &= ~PCIM_MSIX_ENTRYVECTOR_CTRL_MASK;
72     msix_write_vector_ctrl(msix, msix->msg_ctrl);
73 }
74 
msi_multi_mask(struct rt_pci_msi_conf * msi)75 rt_inline rt_uint32_t msi_multi_mask(struct rt_pci_msi_conf *msi)
76 {
77     if (msi->cap.multi_msg_max >= 5)
78     {
79         return 0xffffffff;
80     }
81 
82     return (1 << (1 << msi->cap.multi_msg_max)) - 1;
83 }
84 
msi_write_mask(struct rt_pci_msi_conf * msi,rt_uint32_t clear,rt_uint32_t set,struct rt_pci_device * pdev)85 static void msi_write_mask(struct rt_pci_msi_conf *msi,
86         rt_uint32_t clear, rt_uint32_t set, struct rt_pci_device *pdev)
87 {
88     if (msi->cap.is_masking)
89     {
90         rt_ubase_t level = rt_spin_lock_irqsave(&pdev->msi_lock);
91 
92         msi->mask &= ~clear;
93         msi->mask |= set;
94         rt_pci_write_config_u32(pdev, msi->mask_pos, msi->mask);
95 
96         rt_spin_unlock_irqrestore(&pdev->msi_lock, level);
97     }
98 }
99 
msi_mask(struct rt_pci_msi_conf * msi,rt_uint32_t mask,struct rt_pci_device * pdev)100 rt_inline void msi_mask(struct rt_pci_msi_conf *msi,
101         rt_uint32_t mask, struct rt_pci_device *pdev)
102 {
103     msi_write_mask(msi, 0, mask, pdev);
104 }
105 
msi_unmask(struct rt_pci_msi_conf * msi,rt_uint32_t mask,struct rt_pci_device * pdev)106 rt_inline void msi_unmask(struct rt_pci_msi_conf *msi,
107         rt_uint32_t mask, struct rt_pci_device *pdev)
108 {
109     msi_write_mask(msi, mask, 0, pdev);
110 }
111 
msi_write_enable(struct rt_pci_device * pdev,rt_bool_t enable)112 static void msi_write_enable(struct rt_pci_device *pdev, rt_bool_t enable)
113 {
114     rt_uint16_t msgctl;
115 
116     rt_pci_read_config_u16(pdev, pdev->msi_cap + PCIR_MSI_CTRL, &msgctl);
117 
118     msgctl &= ~PCIM_MSICTRL_MSI_ENABLE;
119 
120     if (enable)
121     {
122         msgctl |= PCIM_MSICTRL_MSI_ENABLE;
123     }
124 
125     rt_pci_write_config_u16(pdev, pdev->msi_cap + PCIR_MSI_CTRL, msgctl);
126 }
127 
msi_affinity_init(struct rt_pci_msi_desc * desc,int msi_index,rt_bitmap_t * cpumasks)128 static void msi_affinity_init(struct rt_pci_msi_desc *desc, int msi_index,
129         rt_bitmap_t *cpumasks)
130 {
131     int irq;
132     struct rt_pic_irq *pirq;
133     struct rt_pci_device *pdev = desc->pdev;
134     struct rt_pic *msi_pic = pdev->msi_pic;
135 
136     irq = desc->irq + desc->is_msix ? 0 : msi_index;
137     pirq = rt_pic_find_pirq(msi_pic, irq);
138 
139     /* Save affinity */
140     if (desc->is_msix)
141     {
142         desc->affinity = pirq->affinity;
143     }
144     else
145     {
146         desc->affinities[msi_index] = pirq->affinity;
147     }
148 
149     if ((void *)cpumasks > (void *)msi_affinity_default &&
150         (void *)cpumasks < (void *)msi_affinity_default + sizeof(msi_affinity_default))
151     {
152         rt_uint64_t data_address;
153 
154         /* Get MSI/MSI-X write data adddress */
155         data_address = desc->msg.address_hi;
156         data_address <<= 32;
157         data_address |= desc->msg.address_lo;
158 
159         /* Prepare affinity */
160         cpumasks = pirq->affinity;
161 
162         rt_numa_memory_affinity(data_address, cpumasks);
163     }
164     else if (rt_bitmap_next_set_bit(cpumasks, 0, RT_CPUS_NR) >= RT_CPUS_NR)
165     {
166         /* No affinity info found, give up */
167         return;
168     }
169 
170     if (!rt_pic_irq_set_affinity(irq, cpumasks))
171     {
172         if (msi_pic->ops->irq_write_msi_msg)
173         {
174             msi_pic->ops->irq_write_msi_msg(pirq, &desc->msg);
175         }
176     }
177 }
178 
rt_pci_msi_shutdown(struct rt_pci_device * pdev)179 void rt_pci_msi_shutdown(struct rt_pci_device *pdev)
180 {
181     struct rt_pci_msi_desc *desc;
182 
183     if (!pdev)
184     {
185         return;
186     }
187 
188     msi_write_enable(pdev, RT_FALSE);
189     rt_pci_intx(pdev, RT_TRUE);
190 
191     if ((desc = rt_pci_msi_first_desc(pdev)))
192     {
193         msi_unmask(&desc->msi, msi_multi_mask(&desc->msi), pdev);
194     }
195 
196     /* Restore pdev->irq to its default pin-assertion IRQ */
197     pdev->irq = desc->msi.default_irq;
198     pdev->msi_enabled = RT_FALSE;
199 }
200 
rt_pci_msix_shutdown(struct rt_pci_device * pdev)201 void rt_pci_msix_shutdown(struct rt_pci_device *pdev)
202 {
203     struct rt_pci_msi_desc *desc;
204 
205     if (!pdev)
206     {
207         return;
208     }
209 
210     rt_pci_msi_for_each_desc(pdev, desc)
211     {
212         msix_mask(&desc->msix);
213     }
214 
215     msix_update_ctrl(pdev, PCIM_MSIXCTRL_MSIX_ENABLE, 0);
216 
217     rt_pci_intx(pdev, RT_TRUE);
218     pdev->msix_enabled = RT_FALSE;
219 }
220 
rt_pci_msi_free_irqs(struct rt_pci_device * pdev)221 void rt_pci_msi_free_irqs(struct rt_pci_device *pdev)
222 {
223     struct rt_pci_msi_desc *desc, *last_desc = RT_NULL;
224 
225     if (!pdev)
226     {
227         return;
228     }
229 
230     if (pdev->msix_base)
231     {
232         rt_iounmap(pdev->msix_base);
233         pdev->msix_base = RT_NULL;
234     }
235 
236     rt_pci_msi_for_each_desc(pdev, desc)
237     {
238         /* To safety */
239         if (last_desc)
240         {
241             rt_list_remove(&last_desc->list);
242             rt_free(last_desc);
243         }
244         last_desc = desc;
245     }
246 
247     /* The last one */
248     if (last_desc)
249     {
250         rt_list_remove(&last_desc->list);
251         rt_free(last_desc);
252     }
253 }
254 
rt_pci_msi_write_msg(struct rt_pci_msi_desc * desc,struct rt_pci_msi_msg * msg)255 void rt_pci_msi_write_msg(struct rt_pci_msi_desc *desc, struct rt_pci_msi_msg *msg)
256 {
257     struct rt_pci_device *pdev = desc->pdev;
258 
259     if (desc->is_msix)
260     {
261         void *msix_entry;
262         rt_bool_t unmasked;
263         rt_uint32_t msgctl;
264         struct rt_pci_msix_conf *msix = &desc->msix;
265 
266         msgctl = msix->msg_ctrl;
267         unmasked = !(msgctl & PCIM_MSIX_ENTRYVECTOR_CTRL_MASK);
268         msix_entry = msix_table_base(msix);
269 
270         if (unmasked)
271         {
272             msix_write_vector_ctrl(msix, msgctl | PCIM_MSIX_ENTRYVECTOR_CTRL_MASK);
273         }
274 
275         HWREG32(msix_entry + PCIM_MSIX_ENTRY_LOWER_ADDR) = msg->address_lo;
276         HWREG32(msix_entry + PCIM_MSIX_ENTRY_UPPER_ADDR) = msg->address_hi;
277         HWREG32(msix_entry + PCIM_MSIX_ENTRY_DATA) = msg->data;
278 
279         if (unmasked)
280         {
281             msix_write_vector_ctrl(msix, msgctl);
282         }
283 
284         /* Ensure that the writes are visible in the device */
285         HWREG32(msix_entry + PCIM_MSIX_ENTRY_DATA);
286     }
287     else
288     {
289         rt_uint16_t msgctl;
290         int pos = pdev->msi_cap;
291         struct rt_pci_msi_conf *msi = &desc->msi;
292 
293         rt_pci_read_config_u16(pdev, pos + PCIR_MSI_CTRL, &msgctl);
294         msgctl &= ~PCIM_MSICTRL_MME_MASK;
295         msgctl |= msi->cap.multi_msg_use << PCIM_MSICTRL_MME_SHIFT;
296         rt_pci_write_config_u16(pdev, pos + PCIR_MSI_CTRL, msgctl);
297 
298         rt_pci_write_config_u32(pdev, pos + PCIR_MSI_ADDR, msg->address_lo);
299 
300         /*
301          * The value stored in this field is related to the processor system,
302          * the processor will initialize this field
303          * when the PCIe device is initialized, and the rules for filling
304          * in this field are not the same for different processors.
305          * If the Multiple Message Enable field is not 0b000 (multiple IRQs),
306          * the PCIe device can send different interrupt requests
307          * by changing the low data in the Message Data field
308          */
309         if (msi->cap.is_64bit)
310         {
311             rt_pci_write_config_u32(pdev, pos + PCIR_MSI_ADDR_HIGH, msg->address_hi);
312             rt_pci_write_config_u16(pdev, pos + PCIR_MSI_DATA_64BIT, msg->data);
313         }
314         else
315         {
316             rt_pci_write_config_u16(pdev, pos + PCIR_MSI_DATA, msg->data);
317         }
318 
319         /* Ensure that the writes are visible in the device */
320         rt_pci_read_config_u16(pdev, pos + PCIR_MSI_CTRL, &msgctl);
321     }
322 
323     desc->msg = *msg;
324 
325     if (desc->write_msi_msg)
326     {
327         desc->write_msi_msg(desc, desc->write_msi_msg_data);
328     }
329 }
330 
rt_pci_msi_mask_irq(struct rt_pic_irq * pirq)331 void rt_pci_msi_mask_irq(struct rt_pic_irq *pirq)
332 {
333     struct rt_pci_msi_desc *desc;
334 
335     if (pirq && (desc = pirq->msi_desc))
336     {
337         if (desc->is_msix)
338         {
339             msix_mask(&desc->msix);
340         }
341         else
342         {
343             msi_mask(&desc->msi, RT_BIT(pirq->irq - desc->irq), desc->pdev);
344         }
345     }
346 }
347 
rt_pci_msi_unmask_irq(struct rt_pic_irq * pirq)348 void rt_pci_msi_unmask_irq(struct rt_pic_irq *pirq)
349 {
350     struct rt_pci_msi_desc *desc;
351 
352     if (pirq && (desc = pirq->msi_desc))
353     {
354         if (desc->is_msix)
355         {
356             msix_unmask(&desc->msix);
357         }
358         else
359         {
360             msi_unmask(&desc->msi, RT_BIT(pirq->irq - desc->irq), desc->pdev);
361         }
362     }
363 }
364 
rt_pci_alloc_vector(struct rt_pci_device * pdev,int min,int max,rt_uint32_t flags,RT_IRQ_AFFINITY_DECLARE ((* affinities)))365 rt_ssize_t rt_pci_alloc_vector(struct rt_pci_device *pdev, int min, int max,
366         rt_uint32_t flags, RT_IRQ_AFFINITY_DECLARE((*affinities)))
367 {
368     rt_ssize_t res = -RT_ENOSYS;
369 
370     if (!pdev || min > max)
371     {
372         return -RT_EINVAL;
373     }
374 
375     if (flags & RT_PCI_IRQ_F_AFFINITY)
376     {
377         if (!affinities)
378         {
379             affinities = msi_affinity_default;
380         }
381     }
382     else
383     {
384         affinities = RT_NULL;
385     }
386 
387     if (flags & RT_PCI_IRQ_F_MSIX)
388     {
389         res = rt_pci_msix_enable_range_affinity(pdev, RT_NULL, min, max, affinities);
390 
391         if (res > 0)
392         {
393             return res;
394         }
395     }
396 
397     if (flags & RT_PCI_IRQ_F_MSI)
398     {
399         res = rt_pci_msi_enable_range_affinity(pdev, min, max, affinities);
400 
401         if (res > 0)
402         {
403             return res;
404         }
405     }
406 
407     if (flags & RT_PCI_IRQ_F_LEGACY)
408     {
409         if (min == 1 && pdev->irq >= 0)
410         {
411             if (affinities)
412             {
413                 int cpuid;
414                 RT_IRQ_AFFINITY_DECLARE(old_affinity);
415 
416                 /* INTx is shared, we should update it */
417                 rt_pic_irq_get_affinity(pdev->irq, old_affinity);
418 
419                 rt_bitmap_for_each_set_bit(affinities[0], cpuid, RT_CPUS_NR)
420                 {
421                     RT_IRQ_AFFINITY_SET(old_affinity, cpuid);
422                 }
423 
424                 rt_pic_irq_set_affinity(pdev->irq, old_affinity);
425             }
426 
427             rt_pci_intx(pdev, RT_TRUE);
428 
429             return min;
430         }
431     }
432 
433     return res;
434 }
435 
rt_pci_free_vector(struct rt_pci_device * pdev)436 void rt_pci_free_vector(struct rt_pci_device *pdev)
437 {
438     if (!pdev)
439     {
440         return;
441     }
442 
443     rt_pci_msi_disable(pdev);
444     rt_pci_msix_disable(pdev);
445     rt_pci_irq_mask(pdev);
446 }
447 
msi_verify_entries(struct rt_pci_device * pdev)448 static rt_err_t msi_verify_entries(struct rt_pci_device *pdev)
449 {
450     if (pdev->no_64bit_msi)
451     {
452         struct rt_pci_msi_desc *desc;
453 
454         rt_pci_msi_for_each_desc(pdev, desc)
455         {
456             if (desc->msg.address_hi)
457             {
458                 LOG_D("%s: Arch assigned 64-bit MSI address %08x%08x"
459                         "but device only supports 32 bits",
460                         rt_dm_dev_get_name(&pdev->parent),
461                         desc->msg.address_hi, desc->msg.address_lo);
462 
463                 return -RT_EIO;
464             }
465         }
466     }
467 
468     return RT_EOK;
469 }
470 
msi_insert_desc(struct rt_pci_device * pdev,struct rt_pci_msi_desc * init_desc)471 static rt_err_t msi_insert_desc(struct rt_pci_device *pdev,
472         struct rt_pci_msi_desc *init_desc)
473 {
474     rt_size_t msi_affinity_ptr_size = 0;
475     struct rt_pci_msi_desc *msi_desc;
476 
477     if (!init_desc->is_msix)
478     {
479         msi_affinity_ptr_size += sizeof(msi_desc->affinities[0]) * 32;
480     }
481 
482     msi_desc = rt_calloc(1, sizeof(*msi_desc) + msi_affinity_ptr_size);
483 
484     if (!msi_desc)
485     {
486         return -RT_ENOMEM;
487     }
488 
489     rt_memcpy(msi_desc, init_desc, sizeof(*msi_desc));
490 
491     if (!init_desc->is_msix)
492     {
493         msi_desc->affinities = (void *)msi_desc + sizeof(*msi_desc);
494     }
495 
496     msi_desc->pdev = pdev;
497     rt_list_init(&msi_desc->list);
498     rt_list_insert_before(&pdev->msi_desc_nodes, &msi_desc->list);
499 
500     return RT_EOK;
501 }
502 
rt_pci_msi_vector_count(struct rt_pci_device * pdev)503 rt_ssize_t rt_pci_msi_vector_count(struct rt_pci_device *pdev)
504 {
505     rt_uint16_t msgctl;
506 
507     if (!pdev)
508     {
509         return -RT_EINVAL;
510     }
511 
512     if (!pdev->msi_cap)
513     {
514         return -RT_EINVAL;
515     }
516 
517     rt_pci_read_config_u16(pdev, pdev->msi_cap + PCIR_MSI_CTRL, &msgctl);
518 
519     return 1 << ((msgctl & PCIM_MSICTRL_MMC_MASK) >> 1);
520 }
521 
rt_pci_msi_disable(struct rt_pci_device * pdev)522 rt_err_t rt_pci_msi_disable(struct rt_pci_device *pdev)
523 {
524     if (!pdev)
525     {
526         return -RT_EINVAL;
527     }
528 
529     if (!pdev->msi_enabled)
530     {
531         return -RT_EINVAL;
532     }
533 
534     spin_lock(&pdev->msi_lock);
535 
536     rt_pci_msi_shutdown(pdev);
537     rt_pci_msi_free_irqs(pdev);
538 
539     spin_unlock(&pdev->msi_lock);
540 
541     return RT_EOK;
542 }
543 
msi_setup_msi_desc(struct rt_pci_device * pdev,int nvec)544 static rt_err_t msi_setup_msi_desc(struct rt_pci_device *pdev, int nvec)
545 {
546     rt_uint16_t msgctl;
547     struct rt_pci_msi_desc desc;
548 
549     rt_memset(&desc, 0, sizeof(desc));
550 
551     desc.vector_used = nvec;
552     desc.vector_count = rt_pci_msi_vector_count(pdev);
553     desc.is_msix = RT_FALSE;
554 
555     rt_pci_read_config_u16(pdev, pdev->msi_cap + PCIR_MSI_CTRL, &msgctl);
556 
557     desc.msi.cap.is_64bit = !!(msgctl & PCIM_MSICTRL_64BIT);
558     desc.msi.cap.is_masking = !!(msgctl & PCIM_MSICTRL_VECTOR);
559     desc.msi.cap.multi_msg_max = (msgctl & PCIM_MSICTRL_MMC_MASK) >> 1;
560 
561     for (int log2 = 0; log2 < 5; ++log2)
562     {
563         if (nvec <= (1 << log2))
564         {
565             desc.msi.cap.multi_msg_use = log2;
566             break;
567         }
568     }
569 
570     if (desc.msi.cap.is_64bit)
571     {
572         desc.msi.mask_pos = pdev->msi_cap + PCIR_MSI_MASK_64BIT;
573     }
574     else
575     {
576         desc.msi.mask_pos = pdev->msi_cap + PCIR_MSI_MASK;
577     }
578 
579     /* Save pdev->irq for its default pin-assertion IRQ */
580     desc.msi.default_irq = pdev->irq;
581 
582     if (desc.msi.cap.is_masking)
583     {
584         /* Get the old mask status */
585         rt_pci_read_config_u32(pdev, desc.msi.mask_pos, &desc.msi.mask);
586     }
587 
588     return msi_insert_desc(pdev, &desc);
589 }
590 
msi_capability_init(struct rt_pci_device * pdev,int nvec,RT_IRQ_AFFINITY_DECLARE ((* affinities)))591 static rt_ssize_t msi_capability_init(struct rt_pci_device *pdev,
592         int nvec, RT_IRQ_AFFINITY_DECLARE((*affinities)))
593 {
594     rt_err_t err;
595     struct rt_pci_msi_desc *desc;
596 
597     msi_write_enable(pdev, RT_FALSE);
598 
599     spin_lock(&pdev->msi_lock);
600 
601     if (!(err = msi_setup_msi_desc(pdev, nvec)))
602     {
603         /* All MSIs are unmasked by default; mask them all */
604         desc = rt_pci_msi_first_desc(pdev);
605         msi_mask(&desc->msi, msi_multi_mask(&desc->msi), pdev);
606 
607         if (!(err = rt_pci_msi_setup_irqs(pdev, nvec, PCIY_MSI)))
608         {
609             err = msi_verify_entries(pdev);
610         }
611 
612         if (err)
613         {
614             msi_unmask(&desc->msi, msi_multi_mask(&desc->msi), pdev);
615         }
616     }
617 
618     spin_unlock(&pdev->msi_lock);
619 
620     if (err)
621     {
622         rt_pci_msi_free_irqs(pdev);
623 
624         LOG_E("%s: Setup %s interrupts(%d) error = %s",
625                 rt_dm_dev_get_name(&pdev->parent), "MSI", nvec, rt_strerror(err));
626 
627         return err;
628     }
629 
630     if (affinities)
631     {
632         for (int idx = 0; idx < nvec; ++idx)
633         {
634             msi_affinity_init(desc, idx, affinities[idx]);
635         }
636     }
637 
638     /* Disable INTX */
639     rt_pci_intx(pdev, RT_FALSE);
640 
641     /* Set MSI enabled bits */
642     msi_write_enable(pdev, RT_TRUE);
643 
644     pdev->irq = desc->irq;
645 
646     pdev->msi_enabled = RT_TRUE;
647 
648     return nvec;
649 }
650 
rt_pci_msi_enable_range_affinity(struct rt_pci_device * pdev,int min,int max,RT_IRQ_AFFINITY_DECLARE ((* affinities)))651 rt_ssize_t rt_pci_msi_enable_range_affinity(struct rt_pci_device *pdev,
652         int min, int max, RT_IRQ_AFFINITY_DECLARE((*affinities)))
653 {
654     int nvec = max;
655     rt_size_t entries_nr;
656 
657     if (!pdev || min > max)
658     {
659         return -RT_EINVAL;
660     }
661 
662     if (pdev->no_msi)
663     {
664         return -RT_ENOSYS;
665     }
666 
667     if (!pdev->msi_pic)
668     {
669         return -RT_ENOSYS;
670     }
671 
672     if (pdev->msi_enabled)
673     {
674         LOG_W("%s: MSI is enabled", rt_dm_dev_get_name(&pdev->parent));
675 
676         return -RT_EINVAL;
677     }
678 
679     entries_nr = rt_pci_msi_vector_count(pdev);
680 
681     if (entries_nr < 0)
682     {
683         return entries_nr;
684     }
685 
686     if (nvec > entries_nr)
687     {
688         return -RT_EEMPTY;
689     }
690 
691     return msi_capability_init(pdev, nvec, affinities);
692 }
693 
rt_pci_msix_vector_count(struct rt_pci_device * pdev)694 rt_ssize_t rt_pci_msix_vector_count(struct rt_pci_device *pdev)
695 {
696     rt_uint16_t msgctl;
697 
698     if (!pdev)
699     {
700         return -RT_EINVAL;
701     }
702 
703     if (!pdev->msix_cap)
704     {
705         return -RT_EINVAL;
706     }
707 
708     rt_pci_read_config_u16(pdev, pdev->msix_cap + PCIR_MSIX_CTRL, &msgctl);
709 
710     return rt_pci_msix_table_size(msgctl);
711 }
712 
rt_pci_msix_disable(struct rt_pci_device * pdev)713 rt_err_t rt_pci_msix_disable(struct rt_pci_device *pdev)
714 {
715     if (!pdev)
716     {
717         return -RT_EINVAL;
718     }
719 
720     if (!pdev->msix_enabled)
721     {
722         return -RT_EINVAL;
723     }
724 
725     spin_lock(&pdev->msi_lock);
726 
727     rt_pci_msix_shutdown(pdev);
728     rt_pci_msi_free_irqs(pdev);
729 
730     spin_unlock(&pdev->msi_lock);
731 
732     return RT_EOK;
733 }
734 
msix_table_remap(struct rt_pci_device * pdev,rt_size_t entries_nr)735 static void *msix_table_remap(struct rt_pci_device *pdev, rt_size_t entries_nr)
736 {
737     rt_uint8_t bir;
738     rt_uint32_t table_offset;
739     rt_ubase_t table_base_phys;
740 
741     rt_pci_read_config_u32(pdev, pdev->msix_cap + PCIR_MSIX_TABLE, &table_offset);
742     bir = (rt_uint8_t)(table_offset & PCIM_MSIX_BIR_MASK);
743 
744     if (pdev->resource[bir].flags & PCI_BUS_REGION_F_NONE)
745     {
746         LOG_E("%s: BAR[bir = %d] is invalid", rt_dm_dev_get_name(&pdev->parent), bir);
747 
748         return RT_NULL;
749     }
750 
751     table_base_phys = pdev->resource[bir].base + (table_offset & ~PCIM_MSIX_BIR_MASK);
752 
753     return rt_ioremap((void *)table_base_phys, entries_nr * PCIM_MSIX_ENTRY_SIZE);
754 }
755 
msix_setup_msi_descs(struct rt_pci_device * pdev,void * table_base,struct rt_pci_msix_entry * entries,int nvec)756 static rt_err_t msix_setup_msi_descs(struct rt_pci_device *pdev,
757         void *table_base, struct rt_pci_msix_entry *entries, int nvec)
758 {
759     rt_err_t err;
760     struct rt_pci_msi_desc desc;
761 
762     rt_memset(&desc, 0, sizeof(desc));
763 
764     desc.vector_used = 1;
765     desc.vector_count = rt_pci_msix_vector_count(pdev);
766 
767     desc.is_msix = RT_TRUE;
768     desc.msix.table_base = table_base;
769 
770     for (int i = 0; i < nvec; ++i)
771     {
772         void *table_entry;
773         int index = entries ? entries[i].index : i;
774 
775         desc.msix.index = index;
776         table_entry = msix_table_base(&desc.msix);
777 
778         desc.msix.msg_ctrl = HWREG32(table_entry + PCIM_MSIX_ENTRY_VECTOR_CTRL);
779 
780         if ((err = msi_insert_desc(pdev, &desc)))
781         {
782             break;
783         }
784     }
785 
786     return err;
787 }
788 
msix_capability_init(struct rt_pci_device * pdev,struct rt_pci_msix_entry * entries,int nvec,RT_IRQ_AFFINITY_DECLARE ((* affinities)))789 static rt_ssize_t msix_capability_init(struct rt_pci_device *pdev,
790         struct rt_pci_msix_entry *entries, int nvec,
791         RT_IRQ_AFFINITY_DECLARE((*affinities)))
792 {
793     rt_err_t err;
794     rt_uint16_t msgctl;
795     rt_size_t table_size;
796     void *table_base, *table_entry;
797     struct rt_pci_msi_desc *desc;
798     struct rt_pci_msix_entry *entry;
799 
800     /*
801      * Some devices require MSI-X to be enabled before the MSI-X
802      * registers can be accessed.
803      * Mask all the vectors to prevent interrupts coming in before
804      * they're fully set up.
805      */
806     msix_update_ctrl(pdev, 0, PCIM_MSIXCTRL_FUNCTION_MASK | PCIM_MSIXCTRL_MSIX_ENABLE);
807 
808     rt_pci_read_config_u16(pdev, pdev->msix_cap + PCIR_MSIX_CTRL, &msgctl);
809     /* Request & Map MSI-X table region */
810     table_size = rt_pci_msix_table_size(msgctl);
811     table_base = msix_table_remap(pdev, table_size);
812 
813     if (!table_base)
814     {
815         LOG_E("%s: Remap MSI-X table fail", rt_dm_dev_get_name(&pdev->parent));
816 
817         err = -RT_ENOMEM;
818         goto _out_disbale_msix;
819     }
820 
821     pdev->msix_base = table_base;
822 
823     spin_lock(&pdev->msi_lock);
824 
825     if (!(err = msix_setup_msi_descs(pdev, table_base, entries, nvec)))
826     {
827         if (!(err = rt_pci_msi_setup_irqs(pdev, nvec, PCIY_MSIX)))
828         {
829             /* Check if all MSI entries honor device restrictions */
830             err = msi_verify_entries(pdev);
831         }
832     }
833 
834     spin_unlock(&pdev->msi_lock);
835 
836     if (err)
837     {
838         rt_pci_msi_free_irqs(pdev);
839 
840         LOG_E("%s: Setup %s interrupts(%d) error = %s",
841                 rt_dm_dev_get_name(&pdev->parent), "MSI-X", nvec, rt_strerror(err));
842 
843         goto _out_disbale_msix;
844     }
845 
846     entry = entries;
847     rt_pci_msi_for_each_desc(pdev, desc)
848     {
849         if (affinities)
850         {
851             msi_affinity_init(desc, desc->msix.index, affinities[entry->index]);
852         }
853 
854         entry->irq = desc->irq;
855         ++entry;
856     }
857 
858     /* Disable INTX */
859     rt_pci_intx(pdev, RT_FALSE);
860 
861     /* Maske all table entries */
862     table_entry = table_base;
863     for (int i = 0; i < table_size; ++i, table_entry += PCIM_MSIX_ENTRY_SIZE)
864     {
865         HWREG32(table_entry + PCIM_MSIX_ENTRY_VECTOR_CTRL) = PCIM_MSIX_ENTRYVECTOR_CTRL_MASK;
866     }
867     msix_update_ctrl(pdev, PCIM_MSIXCTRL_FUNCTION_MASK, 0);
868 
869     pdev->msix_enabled = RT_TRUE;
870 
871     return nvec;
872 
873 _out_disbale_msix:
874     msix_update_ctrl(pdev, PCIM_MSIXCTRL_FUNCTION_MASK | PCIM_MSIXCTRL_MSIX_ENABLE, 0);
875 
876     return err;
877 }
878 
rt_pci_msix_enable_range_affinity(struct rt_pci_device * pdev,struct rt_pci_msix_entry * entries,int min,int max,RT_IRQ_AFFINITY_DECLARE ((* affinities)))879 rt_ssize_t rt_pci_msix_enable_range_affinity(struct rt_pci_device *pdev,
880         struct rt_pci_msix_entry *entries, int min, int max,
881         RT_IRQ_AFFINITY_DECLARE((*affinities)))
882 {
883     int nvec = max;
884     rt_size_t entries_nr;
885 
886     if (!pdev || min > max)
887     {
888         return -RT_EINVAL;
889     }
890 
891     if (pdev->no_msi)
892     {
893         return -RT_ENOSYS;
894     }
895 
896     if (!pdev->msi_pic)
897     {
898         return -RT_ENOSYS;
899     }
900 
901     if (pdev->msix_enabled)
902     {
903         LOG_W("%s: MSI-X is enabled", rt_dm_dev_get_name(&pdev->parent));
904 
905         return -RT_EINVAL;
906     }
907 
908     entries_nr = rt_pci_msix_vector_count(pdev);
909 
910     if (entries_nr < 0)
911     {
912         return entries_nr;
913     }
914 
915     if (nvec > entries_nr)
916     {
917         return -RT_EEMPTY;
918     }
919 
920     if (!entries)
921     {
922         return 0;
923     }
924 
925     /* Check if entries is valid */
926     for (int i = 0; i < nvec; ++i)
927     {
928         struct rt_pci_msix_entry *target = &entries[i];
929 
930         if (target->index >= entries_nr)
931         {
932             return -RT_EINVAL;
933         }
934 
935         for (int j = i + 1; j < nvec; ++j)
936         {
937             /* Check duplicate */
938             if (target->index == entries[j].index)
939             {
940                 LOG_E("%s: msix entry[%d].index = entry[%d].index",
941                         rt_dm_dev_get_name(&pdev->parent), i, j);
942 
943                 return -RT_EINVAL;
944             }
945         }
946     }
947 
948     return msix_capability_init(pdev, entries, nvec, affinities);
949 }
950