1 /*
2  * Copyright (c) 2021 Travis Geiseblrecht
3  *
4  * Use of this source code is governed by a MIT-style
5  * license that can be found in the LICENSE file or at
6  * https://opensource.org/licenses/MIT
7  */
8 
9 #include "device.h"
10 
11 #include <sys/types.h>
12 #include <lk/cpp.h>
13 #include <lk/debug.h>
14 #include <lk/err.h>
15 #include <lk/list.h>
16 #include <lk/trace.h>
17 #include <lk/pow2.h>
18 #include <dev/bus/pci.h>
19 #include <stdio.h>
20 #include <stdlib.h>
21 #include <string.h>
22 #include <assert.h>
23 #include <platform/interrupts.h>
24 
25 #define LOCAL_TRACE 0
26 
27 #include "bus_mgr.h"
28 #include "bridge.h"
29 
30 namespace pci {
31 
device(pci_location_t loc,bus * bus)32 device::device(pci_location_t loc, bus *bus) : loc_(loc), bus_(bus) {}
33 
~device()34 device::~device() {
35     LTRACE;
36 
37     capability *cap;
38     while ((cap = list_remove_head_type(&capability_list_, capability, node))) {
39         delete cap;
40     }
41 }
42 
43 // probe the device, return a new device
probe(pci_location_t loc,bus * parent_bus,device ** out_device)44 status_t device::probe(pci_location_t loc, bus *parent_bus, device **out_device) {
45     status_t err;
46 
47     *out_device = nullptr;
48 
49     // read vendor id and make sure this
50     uint16_t vendor_id;
51     err = pci_read_config_half(loc, PCI_CONFIG_VENDOR_ID, &vendor_id);
52     if (err != NO_ERROR) {
53         return ERR_NOT_FOUND;
54     }
55     if (vendor_id == 0xffff) {
56         return ERR_NOT_FOUND;
57     }
58 
59     // read base and sub class
60     uint8_t base_class;
61     err = pci_read_config_byte(loc, PCI_CONFIG_CLASS_CODE_BASE, &base_class);
62     if (err != NO_ERROR) {
63         return ERR_NOT_FOUND;
64     }
65     uint8_t sub_class;
66     err = pci_read_config_byte(loc, PCI_CONFIG_CLASS_CODE_SUB, &sub_class);
67     if (err != NO_ERROR) {
68         return ERR_NOT_FOUND;
69     }
70 
71     // read header type (0 or 1)
72     uint8_t header_type;
73     err = pci_read_config_byte(loc, PCI_CONFIG_HEADER_TYPE, &header_type);
74     if (err != NO_ERROR) {
75         return ERR_NOT_FOUND;
76     }
77 
78     header_type &= PCI_HEADER_TYPE_MASK;
79 
80     if (header_type != 0) {
81         LTRACEF("type %d header on device we don't understand, skipping\n", header_type);
82         return ERR_NOT_FOUND;
83     }
84 
85     // if it's a bridge, we should not have been called
86     if (base_class == 0x6) { // XXX replace with #define
87         // bridge
88         if (sub_class == 0x4) { // PCI-PCI bridge, normal decode
89             LTRACEF("found bridge, error\n");
90             return ERR_NOT_SUPPORTED;
91         }
92     }
93 
94     LTRACEF_LEVEL(2, "type %#hhx\n", header_type);
95 
96     // create a new device and pass it up
97     device *d = new device(loc, parent_bus);
98 
99     // try to read in the basic config space for this device
100     err = d->load_config();
101     if (err < 0) {
102         delete d;
103         return err;
104     }
105 
106     // save a copy of the BARs
107     d->load_bars();
108 
109     // probe the device's capabilities
110     d->probe_capabilities();
111 
112     // return the newly constructed device
113     *out_device = d;
114 
115     return NO_ERROR;
116 }
117 
dump(size_t indent)118 void device::dump(size_t indent) {
119     for (size_t i = 0; i < indent; i++) {
120         printf(" ");
121     }
122     char str[14];
123     printf("dev %s vid:pid %04hx:%04hx base:sub:intr %hhu:%hhu:%hhu %s%s\n",
124             pci_loc_string(loc_, str), config_.vendor_id, config_.device_id,
125             base_class(), sub_class(), interface(),
126             has_msi() ? "msi " : "",
127             has_msix() ? "msix " : "");
128     for (size_t b = 0; b < countof(bars_); b++) {
129         if (bars_[b].valid) {
130             for (size_t i = 0; i < indent + 1; i++) {
131                 printf(" ");
132             }
133             pci_dump_bar(bars_ + b, b);
134         }
135     }
136 }
137 
enable()138 status_t device::enable() {
139     char str[14];
140     LTRACEF("%s\n", pci_loc_string(loc(), str));
141 
142     uint16_t command;
143     status_t err = pci_read_config_half(loc_, PCI_CONFIG_COMMAND, &command);
144     if (err != NO_ERROR) {
145         return err;
146     }
147     command |= PCI_COMMAND_IO_EN | PCI_COMMAND_MEM_EN | PCI_COMMAND_BUS_MASTER_EN;
148     err = pci_write_config_half(loc_, PCI_CONFIG_COMMAND, command);
149     if (err != NO_ERROR) {
150         return err;
151     }
152 
153     return NO_ERROR;
154 }
155 
156 // walk the device's capability list, reading them in and creating sub objects per
probe_capabilities()157 status_t device::probe_capabilities() {
158     char str[14];
159     LTRACEF("%s\n", pci_loc_string(loc(), str));
160 
161     // does this device have any capabilities?
162     if ((config_.status & PCI_STATUS_NEW_CAPS) == 0) {
163         // no capabilities, just move on
164         return NO_ERROR;
165     }
166 
167     status_t err;
168     size_t cap_ptr = config_.type0.capabilities_ptr; // type 0 and 1 are at same offset
169     for (;;) {
170         if (cap_ptr == 0) {
171             break;
172         }
173 
174         // read the capability id
175         uint8_t cap_id;
176         err = pci_read_config_byte(loc(), cap_ptr, &cap_id);
177         if (err != NO_ERROR) {
178             return err;
179         }
180 
181         LTRACEF("cap id %#x at offset %#zx\n", cap_id, cap_ptr);
182 
183         // we only handle a few kinds of capabilities at the moment
184         capability *cap = new capability;
185         cap->id = cap_id;
186         cap->config_offset = cap_ptr;
187 
188         // add the cap to our list
189         if (cap) {
190             list_add_tail(&capability_list_, &cap->node);
191         }
192 
193         switch (cap_id) {
194             case 0x5: { // MSI
195                 LTRACEF("MSI\n");
196                 if (init_msi_capability(cap) == NO_ERROR) {
197                     msi_cap_ = cap;
198                 }
199                 break;
200             }
201             case 0x11: { // MSI-X
202                 LTRACEF("MSI-X\n");
203                 if (init_msix_capability(cap) == NO_ERROR) {
204                     msix_cap_ = cap;
205                 }
206                 break;
207             }
208         }
209 
210         // read the next pointer
211         uint8_t next_cap_ptr;
212         err = pci_read_config_byte(loc(), cap_ptr + 1, &next_cap_ptr);
213         if (err != NO_ERROR) {
214             return err;
215         }
216 
217         cap_ptr = next_cap_ptr;
218     }
219 
220     return NO_ERROR;
221 }
222 
init_msi_capability(capability * cap)223 status_t device::init_msi_capability(capability *cap) {
224     LTRACE_ENTRY;
225 
226     DEBUG_ASSERT(cap->id == 0x5);
227 
228     // plain MSI
229     uint32_t cap_buf[6];
230     pci_read_config_word(loc(), cap->config_offset, &cap_buf[0]);
231     pci_read_config_word(loc(), cap->config_offset + 4, &cap_buf[1]);
232     pci_read_config_word(loc(), cap->config_offset + 8, &cap_buf[2]);
233     pci_read_config_word(loc(), cap->config_offset + 12, &cap_buf[3]);
234     pci_read_config_word(loc(), cap->config_offset + 16, &cap_buf[4]);
235     pci_read_config_word(loc(), cap->config_offset + 20, &cap_buf[5]);
236     //hexdump(cap_buf, sizeof(cap_buf));
237 
238     return NO_ERROR;
239 }
240 
init_msix_capability(capability * cap)241 status_t device::init_msix_capability(capability *cap) {
242     LTRACE_ENTRY;
243 
244     DEBUG_ASSERT(cap->id == 0x11);
245 
246     // MSI-X
247     uint32_t cap_buf[3];
248     pci_read_config_word(loc(), cap->config_offset, &cap_buf[0]);
249     pci_read_config_word(loc(), cap->config_offset + 4, &cap_buf[1]);
250     pci_read_config_word(loc(), cap->config_offset + 8, &cap_buf[2]);
251     //hexdump(cap_buf, sizeof(cap_buf));
252 
253     return NO_ERROR;
254 }
255 
allocate_irq(uint * irq)256 status_t device::allocate_irq(uint *irq) {
257     LTRACE_ENTRY;
258 
259     uint8_t interrupt_line;
260     status_t err = pci_read_config_byte(loc(), PCI_CONFIG_INTERRUPT_LINE, &interrupt_line);
261     if (err != NO_ERROR) return err;
262 
263     if (interrupt_line == 0) {
264         return ERR_NO_RESOURCES;
265     }
266 
267     // map the irq number in config space to platform vector space
268     err = platform_pci_int_to_vector(interrupt_line, irq);
269     return err;
270 }
271 
allocate_msi(size_t num_requested,uint * msi_base)272 status_t device::allocate_msi(size_t num_requested, uint *msi_base) {
273     LTRACE_ENTRY;
274 
275     DEBUG_ASSERT(num_requested == 1);
276 
277     if (!has_msi()) {
278         return ERR_NOT_SUPPORTED;
279     }
280 
281     DEBUG_ASSERT(msi_cap_ && msi_cap_->is_msi());
282 
283     // ask the platform for interrupts
284     uint vector_base;
285     status_t err = platform_allocate_interrupts(num_requested, 0, true, &vector_base);
286     if (err != NO_ERROR) {
287         return err;
288     }
289 
290     // compute the MSI message to construct
291     uint64_t msi_address = 0;
292     uint16_t msi_data = 0;
293     err = platform_compute_msi_values(vector_base, 0, true, &msi_address, &msi_data);
294     if (err != NO_ERROR) {
295         // TODO: return the allocated msi
296         return err;
297     }
298 
299     // program it into the capability
300     const uint16_t cap_offset = msi_cap_->config_offset;
301 
302     uint16_t control;
303     pci_read_config_half(loc(), cap_offset + 2, &control);
304     pci_write_config_half(loc(), cap_offset + 2, control & ~(0x1)); // disable MSI
305     pci_write_config_word(loc(), cap_offset + 4, msi_address & 0xffff'ffff); // lower 32bits
306     if (control & (1<<7)) {
307         // 64bit
308         pci_write_config_word(loc(), cap_offset + 8, msi_address >> 32); // upper 32bits
309         pci_write_config_half(loc(), cap_offset + 0xc, msi_data);
310      } else {
311         pci_write_config_half(loc(), cap_offset + 8, msi_data);
312     }
313 
314     // set up the control register and enable it
315     control = 1; // NME/NMI = 1, no per vector masking, keep 64bit flag, enable
316     pci_write_config_half(loc(), cap_offset + 2, control);
317 
318     // write it back to the pci config in the interrupt line offset
319     pci_write_config_byte(loc(), PCI_CONFIG_INTERRUPT_LINE, vector_base);
320 
321     // pass back the allocated irq to the caller
322     *msi_base = vector_base;
323 
324     return NO_ERROR;
325 }
326 
load_bars()327 status_t device::load_bars() {
328     size_t num_bars;
329 
330     if (header_type() == 0) {
331         num_bars = 6;
332     } else if (header_type() == 1) {
333         // type 1 only has 2 bars, but are in the same location as type0
334         // so can use the same code below
335         num_bars = 2;
336     } else {
337         // type 2 header?
338         return ERR_NOT_SUPPORTED;
339     }
340 
341     // Disable IO and MEM decoding around BAR detection, as we fiddle with
342     // BAR addresses themselves for length detection.
343     // This behavior is recommended by the PCI Local Bus Specification.
344 
345     uint16_t command;
346     pci_read_config_half(loc(), PCI_CONFIG_COMMAND, &command);
347     pci_write_config_half(loc(), PCI_CONFIG_COMMAND, command & ~(PCI_COMMAND_IO_EN | PCI_COMMAND_MEM_EN));
348 
349     for (size_t i=0; i < num_bars; i++) {
350         bars_[i] = {};
351         uint64_t bar_addr = config_.type0.base_addresses[i];
352         if (bar_addr & 0x1) {
353             // io address
354             bars_[i].io = true;
355             bars_[i].prefetchable = false;
356             bars_[i].size_64 = false;
357             bars_[i].addr = bar_addr & ~0x3;
358 
359             // probe size by writing all 1s and seeing what bits are masked
360             uint32_t size = 0;
361             pci_write_config_word(loc_, PCI_CONFIG_BASE_ADDRESSES + i * 4, 0xffff);
362             pci_read_config_word(loc_, PCI_CONFIG_BASE_ADDRESSES + i * 4, &size);
363             pci_write_config_word(loc_, PCI_CONFIG_BASE_ADDRESSES + i * 4, bars_[i].addr);
364 
365             // mask out bottom bits, invert and add 1 to compute size
366             bars_[i].size = ((size & ~0b11) ^ 0xffff) + 1;
367 
368             bars_[i].valid = (bars_[i].size != 0);
369         } else if ((bar_addr & 0b110) == 0b000) {
370             // 32bit memory address
371             bars_[i].io = false;
372             bars_[i].prefetchable = bar_addr & (1<<3);
373             bars_[i].size_64 = false;
374             bars_[i].addr = bar_addr & ~0xf;
375 
376             // probe size by writing all 1s and seeing what bits are masked
377             uint32_t size = 0;
378             pci_write_config_word(loc_, PCI_CONFIG_BASE_ADDRESSES + i * 4, 0xffffffff);
379             pci_read_config_word(loc_, PCI_CONFIG_BASE_ADDRESSES + i * 4, &size);
380             pci_write_config_word(loc_, PCI_CONFIG_BASE_ADDRESSES + i * 4, bars_[i].addr);
381 
382             // mask out bottom bits, invert and add 1 to compute size
383             bars_[i].size = (~(size & ~0b1111)) + 1;
384 
385             bars_[i].valid = (bars_[i].size != 0);
386         } else if ((bar_addr & 0b110) == 0b100) {
387             // 64bit memory address
388             if (i >= num_bars - 1) {
389                 // root of 64bit memory range will use up two slots, so cant
390                 // start at the last bar
391                 continue;
392             }
393             bars_[i].io = false;
394             bars_[i].prefetchable = bar_addr & (1<<3);
395             bars_[i].size_64 = true;
396             bars_[i].addr = bar_addr & ~0xf;
397             bars_[i].addr |= (uint64_t)config_.type0.base_addresses[i + 1] << 32;
398 
399             // probe size by writing all 1s and seeing what bits are masked
400             uint64_t size;
401             uint32_t size32 = 0;
402             pci_write_config_word(loc_, PCI_CONFIG_BASE_ADDRESSES + i * 4, 0xffffffff);
403             pci_read_config_word(loc_, PCI_CONFIG_BASE_ADDRESSES + i * 4, &size32);
404             size = size32;
405             pci_write_config_word(loc_, PCI_CONFIG_BASE_ADDRESSES + i * 4 + 4, 0xffffffff);
406             pci_read_config_word(loc_, PCI_CONFIG_BASE_ADDRESSES + i * 4 + 4, &size32);
407             size |= (uint64_t)size32 << 32;
408             pci_write_config_word(loc_, PCI_CONFIG_BASE_ADDRESSES + i * 4, bars_[i].addr);
409             pci_write_config_word(loc_, PCI_CONFIG_BASE_ADDRESSES + i * 4 + 4, bars_[i].addr >> 32);
410 
411             // mask out bottom bits, invert and add 1 to compute size
412             bars_[i].size = (~(size & ~(uint64_t)0b1111)) + 1;
413 
414             bars_[i].valid = (bars_[i].size != 0);
415 
416             // mark the next entry as invalid
417             i++;
418             bars_[i] = {}; // clears the valid bit
419         }
420     }
421 
422     // Restore any IO and MEM decoding that was enabled before
423     pci_write_config_half(loc(), PCI_CONFIG_COMMAND, command);
424 
425     return NO_ERROR;
426 }
427 
read_bars(pci_bar_t bar[6])428 status_t device::read_bars(pci_bar_t bar[6]) {
429     // copy the cached bar information
430     memcpy(bar, bars_, sizeof(bars_));
431     return NO_ERROR;
432 }
433 
load_config()434 status_t device::load_config() {
435     status_t err = pci_read_config(loc_, &config_);
436     return err;
437 }
438 
compute_bar_sizes(bar_sizes * sizes)439 status_t device::compute_bar_sizes(bar_sizes *sizes) {
440     char str[14];
441     LTRACEF("device at %s\n", pci_loc_string(loc(), str));
442 
443     // iterate through the bars on this device and accumulate the size
444     // of all the bars of various types. also accumulate the maximum alignment
445     for (auto i = 0; i < 6; i++) {
446         const auto &bar = bars_[i];
447         if (!bar.valid) {
448             continue;
449         }
450 
451         if (bar.io) {
452             // io case
453             sizes->io_size += ROUNDUP(bar.size, 16);
454             if (sizes->io_align < 4) {
455                 sizes->io_align = 4;
456             }
457         } else if (bar.size_64 && bar.prefetchable) {
458             // 64bit mmio
459             auto size = ROUNDUP(bar.size, PAGE_SIZE);
460             auto align = __builtin_ctz(size);
461             sizes->prefetchable64_size += size;
462             if (sizes->prefetchable64_align < align) {
463                 sizes->prefetchable64_align = align;
464             }
465         } else if (bar.size_64) {
466             // 64bit mmio
467             auto size = ROUNDUP(bar.size, PAGE_SIZE);
468             auto align = __builtin_ctz(size);
469             sizes->mmio64_size += size;
470             if (sizes->mmio64_align < align) {
471                 sizes->mmio64_align = align;
472             }
473         } else if (bar.prefetchable) {
474             // 64bit prefetchable mmio
475             auto size = ROUNDUP(bar.size, PAGE_SIZE);
476             auto align = __builtin_ctz(size);
477             sizes->prefetchable_size += size;
478             if (sizes->prefetchable_align < align) {
479                 sizes->prefetchable_align = align;
480             }
481         } else {
482             // 32bit mmio
483             auto size = ROUNDUP(bar.size, PAGE_SIZE);
484             auto align = __builtin_ctz(size);
485             sizes->mmio_size += size;
486             if (sizes->mmio_align < align) {
487                 sizes->mmio_align = align;
488             }
489         }
490     }
491 
492     return NO_ERROR;
493 }
494 
get_bar_alloc_requests(list_node * bar_alloc_requests)495 status_t device::get_bar_alloc_requests(list_node *bar_alloc_requests) {
496     char str[14];
497     LTRACEF("device at %s\n", pci_loc_string(loc(), str));
498 
499     DEBUG_ASSERT(bar_alloc_requests);
500 
501     // iterate through the bars on this device and accumulate the size
502     // of all the bars of various types. also accumulate the maximum alignment
503     for (auto i = 0; i < 6; i++) {
504         const auto &bar = bars_[i];
505         if (!bar.valid) {
506             continue;
507         }
508 
509         auto request = new bar_alloc_request;
510         *request = {};
511         request->bridge = false;
512         request->dev = this;
513         request->bar_num = i;
514 
515         if (bar.io) {
516             // io case
517             request->size = ROUNDUP(bar.size, 16);
518             request->align = 4;
519             request->type = PCI_RESOURCE_IO_RANGE;
520         } else if (bar.size_64) {
521             // 64bit mmio
522             auto size = ROUNDUP(bar.size, PAGE_SIZE);
523             auto align = __builtin_ctz(size);
524             request->size = size;
525             request->align = align;
526             request->type = PCI_RESOURCE_MMIO64_RANGE;
527             request->prefetchable = bar.prefetchable;
528         } else {
529             // 32bit mmio
530             auto size = ROUNDUP(bar.size, PAGE_SIZE);
531             auto align = __builtin_ctz(size);
532             request->size = size;
533             request->align = align;
534             request->type = PCI_RESOURCE_MMIO_RANGE;
535             request->prefetchable = bar.prefetchable;
536         }
537         // add it to the list passed in
538         list_add_tail(bar_alloc_requests, &request->node);
539     }
540 
541     return NO_ERROR;
542 }
543 
assign_resource(bar_alloc_request * request,uint64_t address)544 status_t device::assign_resource(bar_alloc_request *request, uint64_t address) {
545     char str[14];
546     LTRACEF("device at %s resource addr %#llx request:\n", pci_loc_string(loc(), str), address);
547     if (LOCAL_TRACE) {
548         request->dump();
549     }
550 
551     DEBUG_ASSERT(IS_ALIGNED(address, (1UL << request->align)));
552 
553     // Note: When assigning the resource, we don't bother setting the bottom bits
554     // as those are hardwired per the spec.
555 
556     uint32_t temp;
557     switch (request->type) {
558         case PCI_RESOURCE_IO_RANGE:
559             temp = (address & 0xfffc);
560             pci_write_config_word(loc(), PCI_CONFIG_BASE_ADDRESSES + request->bar_num * 4, temp);
561             break;
562         case PCI_RESOURCE_MMIO_RANGE:
563             temp = (address & 0xfffffff0);
564             pci_write_config_word(loc(), PCI_CONFIG_BASE_ADDRESSES + request->bar_num * 4, temp);
565             break;
566         case PCI_RESOURCE_MMIO64_RANGE:
567             temp = (address & 0xfffffff0);
568             pci_write_config_word(loc(), PCI_CONFIG_BASE_ADDRESSES + request->bar_num * 4, temp);
569             temp = address >> 32;
570             pci_write_config_word(loc(), PCI_CONFIG_BASE_ADDRESSES + request->bar_num * 4 + 4, temp);
571             break;
572         default:
573             panic("invalid request type %d\n", request->type);
574     }
575 
576     load_config();
577     load_bars();
578 
579     return NO_ERROR;
580 }
581 
dump()582 void device::bar_alloc_request::dump() {
583     char str[14];
584     if (bridge) {
585         printf("BAR alloc request %p: bridge %s type %u (%s) pref %d size %#llx align %u\n",
586                 this, pci_loc_string(dev->loc(), str), type, pci_resource_type_to_str(type), prefetchable, size, align);
587     } else {
588         printf("BAR alloc request %p: device %s type %u (%s) pref %d size %#llx align %u bar %u\n",
589                 this, pci_loc_string(dev->loc(), str), type, pci_resource_type_to_str(type), prefetchable, size, align, bar_num);
590     }
591 }
592 
593 } // namespace pci
594