1 /*
2 * Copyright (c) 2006-2022, RT-Thread Development Team
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 *
6 * Change Logs:
7 * Date Author Notes
8 * 2022-08-25 GuEe-GUI first version
9 */
10
11 #include <drivers/pci_endpoint.h>
12
13 #define DBG_TAG "pci.ep"
14 #define DBG_LVL DBG_INFO
15 #include <rtdbg.h>
16
17 static rt_list_t _ep_nodes = RT_LIST_OBJECT_INIT(_ep_nodes);
18 static RT_DEFINE_SPINLOCK(_ep_lock);
19
rt_pci_ep_write_header(struct rt_pci_ep * ep,rt_uint8_t func_no,struct rt_pci_ep_header * hdr)20 rt_err_t rt_pci_ep_write_header(struct rt_pci_ep *ep, rt_uint8_t func_no,
21 struct rt_pci_ep_header *hdr)
22 {
23 rt_err_t err;
24
25 if (ep && ep->ops && hdr && func_no < ep->max_functions)
26 {
27 if (ep->ops->write_header)
28 {
29 rt_mutex_take(&ep->lock, RT_WAITING_FOREVER);
30 err = ep->ops->write_header(ep, func_no, hdr);
31 rt_mutex_release(&ep->lock);
32 }
33 else
34 {
35 err = -RT_ENOSYS;
36 }
37 }
38 else
39 {
40 err = -RT_EINVAL;
41 }
42
43 return err;
44 }
45
rt_pci_ep_set_bar(struct rt_pci_ep * ep,rt_uint8_t func_no,struct rt_pci_ep_bar * bar,int bar_idx)46 rt_err_t rt_pci_ep_set_bar(struct rt_pci_ep *ep, rt_uint8_t func_no,
47 struct rt_pci_ep_bar *bar, int bar_idx)
48 {
49 rt_err_t err = RT_EOK;
50
51 if (ep && ep->ops && func_no < ep->max_functions && bar &&
52 bar_idx < PCI_STD_NUM_BARS)
53 {
54 struct rt_pci_bus_resource *bus_bar = &bar->bus;
55
56 if (bar_idx == (PCI_STD_NUM_BARS - 1) &&
57 (bus_bar->flags & PCIM_BAR_MEM_TYPE_64))
58 {
59 err = -RT_EINVAL;
60 LOG_E("%s: Set BAR[%d] can't not 64bit", ep->name, bar_idx);
61 }
62
63 if (rt_upper_32_bits(bus_bar->size) &&
64 !(bus_bar->flags & PCIM_BAR_MEM_TYPE_64))
65 {
66 err = -RT_EINVAL;
67 LOG_E("%s: Set BAR[%d] size is no support 64bit", ep->name, bar_idx);
68 }
69
70 if ((bus_bar->flags & PCIM_BAR_SPACE_IO) &&
71 (bus_bar->flags & PCIM_BAR_IO_MASK))
72 {
73 err = -RT_EINVAL;
74 LOG_E("%s: Set BAR[%d] io flags is invalid", ep->name, bar_idx);
75 }
76
77 if (!err)
78 {
79 if (ep->ops->set_bar)
80 {
81 rt_mutex_take(&ep->lock, RT_WAITING_FOREVER);
82 err = ep->ops->set_bar(ep, func_no, bar, bar_idx);
83 rt_mutex_release(&ep->lock);
84 }
85 else
86 {
87 err = -RT_ENOSYS;
88 }
89 }
90 }
91 else
92 {
93 err = -RT_EINVAL;
94 }
95
96 return err;
97 }
98
rt_pci_ep_clear_bar(struct rt_pci_ep * ep,rt_uint8_t func_no,struct rt_pci_ep_bar * bar,int bar_idx)99 rt_err_t rt_pci_ep_clear_bar(struct rt_pci_ep *ep, rt_uint8_t func_no,
100 struct rt_pci_ep_bar *bar, int bar_idx)
101 {
102 rt_err_t err;
103
104 if (ep && ep->ops && func_no < ep->max_functions && bar &&
105 bar_idx < PCI_STD_NUM_BARS)
106 {
107 if (ep->ops->clear_bar)
108 {
109 rt_mutex_take(&ep->lock, RT_WAITING_FOREVER);
110 err = ep->ops->clear_bar(ep, func_no, bar, bar_idx);
111 rt_mutex_release(&ep->lock);
112 }
113 else
114 {
115 err = -RT_ENOSYS;
116 }
117 }
118 else
119 {
120 err = -RT_EINVAL;
121 }
122
123 return err;
124 }
125
rt_pci_ep_map_addr(struct rt_pci_ep * ep,rt_uint8_t func_no,rt_ubase_t addr,rt_uint64_t pci_addr,rt_size_t size)126 rt_err_t rt_pci_ep_map_addr(struct rt_pci_ep *ep, rt_uint8_t func_no,
127 rt_ubase_t addr, rt_uint64_t pci_addr, rt_size_t size)
128 {
129 rt_err_t err;
130
131 if (ep && ep->ops && func_no < ep->max_functions && size)
132 {
133 if (ep->ops->map_addr)
134 {
135 rt_mutex_take(&ep->lock, RT_WAITING_FOREVER);
136 err = ep->ops->map_addr(ep, func_no, addr, pci_addr, size);
137 rt_mutex_release(&ep->lock);
138 }
139 else
140 {
141 err = -RT_ENOSYS;
142 }
143 }
144 else
145 {
146 err = -RT_EINVAL;
147 }
148
149 return err;
150 }
151
rt_pci_ep_unmap_addr(struct rt_pci_ep * ep,rt_uint8_t func_no,rt_ubase_t addr)152 rt_err_t rt_pci_ep_unmap_addr(struct rt_pci_ep *ep, rt_uint8_t func_no,
153 rt_ubase_t addr)
154 {
155 rt_err_t err;
156
157 if (ep && ep->ops && func_no < ep->max_functions)
158 {
159 if (ep->ops->unmap_addr)
160 {
161 rt_mutex_take(&ep->lock, RT_WAITING_FOREVER);
162 err = ep->ops->unmap_addr(ep, func_no, addr);
163 rt_mutex_release(&ep->lock);
164 }
165 else
166 {
167 err = -RT_ENOSYS;
168 }
169 }
170 else
171 {
172 err = -RT_EINVAL;
173 }
174
175 return err;
176 }
177
rt_pci_ep_set_msi(struct rt_pci_ep * ep,rt_uint8_t func_no,unsigned irq_nr)178 rt_err_t rt_pci_ep_set_msi(struct rt_pci_ep *ep, rt_uint8_t func_no,
179 unsigned irq_nr)
180 {
181 rt_err_t err;
182
183 if (ep && ep->ops && func_no < ep->max_functions)
184 {
185 if (ep->ops->set_msix)
186 {
187 err = -RT_EINVAL;
188
189 for (int log2 = 0; log2 < 5; ++log2)
190 {
191 if (irq_nr <= (1 << log2))
192 {
193 rt_mutex_take(&ep->lock, RT_WAITING_FOREVER);
194 err = ep->ops->set_msi(ep, func_no, log2);
195 rt_mutex_release(&ep->lock);
196 }
197 }
198 }
199 else
200 {
201 err = -RT_ENOSYS;
202 }
203 }
204 else
205 {
206 err = -RT_EINVAL;
207 }
208
209 return err;
210 }
211
rt_pci_ep_get_msi(struct rt_pci_ep * ep,rt_uint8_t func_no,unsigned * out_irq_nr)212 rt_err_t rt_pci_ep_get_msi(struct rt_pci_ep *ep, rt_uint8_t func_no,
213 unsigned *out_irq_nr)
214 {
215 rt_err_t err;
216
217 if (ep && ep->ops && func_no < ep->max_functions && out_irq_nr)
218 {
219 if (ep->ops->get_msi)
220 {
221 rt_mutex_take(&ep->lock, RT_WAITING_FOREVER);
222 err = ep->ops->get_msi(ep, func_no, out_irq_nr);
223 rt_mutex_release(&ep->lock);
224 }
225 else
226 {
227 err = -RT_ENOSYS;
228 }
229 }
230 else
231 {
232 err = -RT_EINVAL;
233 }
234
235 return err;
236 }
237
rt_pci_ep_set_msix(struct rt_pci_ep * ep,rt_uint8_t func_no,unsigned irq_nr,int bar_idx,rt_off_t offset)238 rt_err_t rt_pci_ep_set_msix(struct rt_pci_ep *ep, rt_uint8_t func_no,
239 unsigned irq_nr, int bar_idx, rt_off_t offset)
240 {
241 rt_err_t err;
242
243 if (ep && ep->ops && func_no < ep->max_functions && irq_nr < 2048 &&
244 bar_idx < PCI_STD_NUM_BARS)
245 {
246 if (ep->ops->set_msix)
247 {
248 rt_mutex_take(&ep->lock, RT_WAITING_FOREVER);
249 err = ep->ops->set_msix(ep, func_no, irq_nr, bar_idx, offset);
250 rt_mutex_release(&ep->lock);
251 }
252 else
253 {
254 err = -RT_ENOSYS;
255 }
256 }
257 else
258 {
259 err = -RT_EINVAL;
260 }
261
262 return err;
263 }
264
rt_pci_ep_get_msix(struct rt_pci_ep * ep,rt_uint8_t func_no,unsigned * out_irq_nr)265 rt_err_t rt_pci_ep_get_msix(struct rt_pci_ep *ep, rt_uint8_t func_no,
266 unsigned *out_irq_nr)
267 {
268 rt_err_t err;
269
270 if (ep && ep->ops && func_no < ep->max_functions && out_irq_nr)
271 {
272 if (ep->ops->get_msix)
273 {
274 rt_mutex_take(&ep->lock, RT_WAITING_FOREVER);
275 err = ep->ops->get_msix(ep, func_no, out_irq_nr);
276 rt_mutex_release(&ep->lock);
277 }
278 else
279 {
280 err = -RT_ENOSYS;
281 }
282 }
283 else
284 {
285 err = -RT_EINVAL;
286 }
287
288 return err;
289 }
290
rt_pci_ep_raise_irq(struct rt_pci_ep * ep,rt_uint8_t func_no,enum rt_pci_ep_irq type,unsigned irq)291 rt_err_t rt_pci_ep_raise_irq(struct rt_pci_ep *ep, rt_uint8_t func_no,
292 enum rt_pci_ep_irq type, unsigned irq)
293 {
294 rt_err_t err;
295
296 if (ep && ep->ops && func_no < ep->max_functions)
297 {
298 if (ep->ops->raise_irq)
299 {
300 rt_mutex_take(&ep->lock, RT_WAITING_FOREVER);
301 err = ep->ops->raise_irq(ep, func_no, type, irq);
302 rt_mutex_release(&ep->lock);
303 }
304 else
305 {
306 err = -RT_ENOSYS;
307 }
308 }
309 else
310 {
311 err = -RT_EINVAL;
312 }
313
314 return err;
315 }
316
rt_pci_ep_start(struct rt_pci_ep * ep)317 rt_err_t rt_pci_ep_start(struct rt_pci_ep *ep)
318 {
319 rt_err_t err;
320
321 if (ep && ep->ops)
322 {
323 if (ep->ops->start)
324 {
325 rt_mutex_take(&ep->lock, RT_WAITING_FOREVER);
326 err = ep->ops->start(ep);
327 rt_mutex_release(&ep->lock);
328 }
329 else
330 {
331 err = -RT_ENOSYS;
332 }
333 }
334 else
335 {
336 err = -RT_EINVAL;
337 }
338
339 return err;
340 }
341
rt_pci_ep_stop(struct rt_pci_ep * ep)342 rt_err_t rt_pci_ep_stop(struct rt_pci_ep *ep)
343 {
344 rt_err_t err;
345
346 if (ep && ep->ops)
347 {
348 if (ep->ops->stop)
349 {
350 rt_mutex_take(&ep->lock, RT_WAITING_FOREVER);
351 err = ep->ops->stop(ep);
352 rt_mutex_release(&ep->lock);
353 }
354 else
355 {
356 err = -RT_ENOSYS;
357 }
358 }
359 else
360 {
361 err = -RT_EINVAL;
362 }
363
364 return err;
365 }
366
rt_pci_ep_register(struct rt_pci_ep * ep)367 rt_err_t rt_pci_ep_register(struct rt_pci_ep *ep)
368 {
369 rt_ubase_t level;
370
371 if (!ep || !ep->ops)
372 {
373 return -RT_EINVAL;
374 }
375
376 rt_list_init(&ep->list);
377 rt_ref_init(&ep->ref);
378
379 rt_list_init(&ep->epf_nodes);
380 rt_mutex_init(&ep->lock, ep->name, RT_IPC_FLAG_PRIO);
381
382 level = rt_spin_lock_irqsave(&_ep_lock);
383 rt_list_insert_before(&_ep_nodes, &ep->list);
384 rt_spin_unlock_irqrestore(&_ep_lock, level);
385
386 return RT_EOK;
387 }
388
rt_pci_ep_unregister(struct rt_pci_ep * ep)389 rt_err_t rt_pci_ep_unregister(struct rt_pci_ep *ep)
390 {
391 rt_ubase_t level;
392 rt_err_t err = RT_EOK;
393
394 if (!ep)
395 {
396 return -RT_EINVAL;
397 }
398
399 level = rt_spin_lock_irqsave(&_ep_lock);
400
401 if (rt_ref_read(&ep->ref) > 1)
402 {
403 err = -RT_EBUSY;
404 }
405 else
406 {
407 rt_list_remove(&ep->list);
408 rt_mutex_detach(&ep->lock);
409 }
410
411 rt_spin_unlock_irqrestore(&_ep_lock, level);
412
413 return err;
414 }
415
rt_pci_ep_add_epf(struct rt_pci_ep * ep,struct rt_pci_epf * epf)416 rt_err_t rt_pci_ep_add_epf(struct rt_pci_ep *ep, struct rt_pci_epf *epf)
417 {
418 rt_err_t err = RT_EOK;
419
420 if (!ep || !epf || !epf->name)
421 {
422 return -RT_EINVAL;
423 }
424
425 if (epf->func_no > ep->max_functions - 1)
426 {
427 LOG_E("%s function No(%d) > %s max function No(%d - 1)",
428 epf->name, epf->func_no, ep->name, ep->max_functions);
429
430 return -RT_EINVAL;
431 }
432
433 epf->ep = ep;
434 rt_list_init(&epf->list);
435
436 rt_mutex_take(&ep->lock, RT_WAITING_FOREVER);
437
438 if (!rt_bitmap_test_bit(ep->functions_map, epf->func_no))
439 {
440 rt_bitmap_set_bit(ep->functions_map, epf->func_no);
441 rt_list_insert_before(&ep->epf_nodes, &epf->list);
442 }
443 else
444 {
445 err = -RT_EINVAL;
446 LOG_E("%s function No(%d) is repeating", epf->name, epf->func_no);
447 }
448
449 rt_mutex_release(&ep->lock);
450
451 return err;
452 }
453
rt_pci_ep_remove_epf(struct rt_pci_ep * ep,struct rt_pci_epf * epf)454 rt_err_t rt_pci_ep_remove_epf(struct rt_pci_ep *ep, struct rt_pci_epf *epf)
455 {
456 if (!ep || !epf)
457 {
458 return -RT_EINVAL;
459 }
460
461 rt_mutex_take(&ep->lock, RT_WAITING_FOREVER);
462 rt_bitmap_clear_bit(ep->functions_map, epf->func_no);
463 rt_list_remove(&epf->list);
464 rt_mutex_release(&ep->lock);
465
466 return RT_EOK;
467 }
468
rt_pci_ep_get(const char * name)469 struct rt_pci_ep *rt_pci_ep_get(const char *name)
470 {
471 rt_ubase_t level;
472 struct rt_pci_ep *ep = RT_NULL, *ep_tmp;
473
474 level = rt_spin_lock_irqsave(&_ep_lock);
475
476 rt_list_for_each_entry(ep_tmp, &_ep_nodes, list)
477 {
478 if (!name || !rt_strcmp(ep_tmp->name, name))
479 {
480 ep = ep_tmp;
481 rt_ref_get(&ep->ref);
482 break;
483 }
484 }
485
486 rt_spin_unlock_irqrestore(&_ep_lock, level);
487
488 return ep;
489 }
490
pci_ep_release(struct rt_ref * ref)491 static void pci_ep_release(struct rt_ref *ref)
492 {
493 struct rt_pci_ep *ep = rt_container_of(ref, struct rt_pci_ep, ref);
494
495 rt_pci_ep_unregister(ep);
496 }
497
rt_pci_ep_put(struct rt_pci_ep * ep)498 void rt_pci_ep_put(struct rt_pci_ep *ep)
499 {
500 if (ep)
501 {
502 rt_ref_put(&ep->ref, &pci_ep_release);
503 }
504 }
505