1 // Copyright 2016 The Fuchsia Authors
2 // Copyright (c) 2015 Travis Geiselbrecht
3 //
4 // Use of this source code is governed by a MIT-style
5 // license that can be found in the LICENSE file or at
6 // https://opensource.org/licenses/MIT
7 
8 #include <debug.h>
9 #include <err.h>
10 #include <fbl/atomic.h>
11 #include <fbl/auto_lock.h>
12 #include <fbl/ref_ptr.h>
13 #include <reg.h>
14 #include <trace.h>
15 
16 #include <arch.h>
17 #include <dev/display.h>
18 #include <dev/hw_rng.h>
19 #include <dev/interrupt.h>
20 #include <dev/power.h>
21 #include <dev/psci.h>
22 #include <dev/uart.h>
23 #include <kernel/cmdline.h>
24 #include <kernel/dpc.h>
25 #include <kernel/spinlock.h>
26 #include <lk/init.h>
27 #include <object/resource_dispatcher.h>
28 #include <vm/kstack.h>
29 #include <vm/physmap.h>
30 #include <vm/vm.h>
31 
32 #include <mexec.h>
33 #include <platform.h>
34 
35 #include <target.h>
36 
37 #include <arch/arch_ops.h>
38 #include <arch/arm64.h>
39 #include <arch/arm64/mmu.h>
40 #include <arch/arm64/mp.h>
41 #include <arch/arm64/periphmap.h>
42 #include <arch/mp.h>
43 
44 #include <vm/bootreserve.h>
45 #include <vm/vm_aspace.h>
46 
47 #include <lib/console.h>
48 #include <lib/debuglog.h>
49 #include <lib/memory_limit.h>
50 #if WITH_PANIC_BACKTRACE
51 #include <kernel/thread.h>
52 #endif
53 
54 #include <libzbi/zbi-cpp.h>
55 #include <pdev/pdev.h>
56 #include <zircon/boot/image.h>
57 #include <zircon/rights.h>
58 #include <zircon/syscalls/smc.h>
59 #include <zircon/types.h>
60 
61 // Defined in start.S.
62 extern paddr_t kernel_entry_paddr;
63 extern paddr_t zbi_paddr;
64 
65 static void* ramdisk_base;
66 static size_t ramdisk_size;
67 
68 static zbi_nvram_t lastlog_nvram;
69 
70 static uint cpu_cluster_count = 0;
71 static uint cpu_cluster_cpus[SMP_CPU_MAX_CLUSTERS] = {0};
72 
73 static bool halt_on_panic = false;
74 static bool uart_disabled = false;
75 
76 // all of the configured memory arenas from the zbi
77 // at the moment, only support 1 arena
78 static pmm_arena_info_t mem_arena = {
79     /* .name */ "sdram",
80     /* .flags */ 0,
81     /* .priority */ 0,
82     /* .base */ 0, // filled in by zbi
83     /* .size */ 0, // filled in by zbi
84 };
85 
86 // boot items to save for mexec
87 // TODO(voydanoff): more generic way of doing this that can be shared with PC platform
88 static uint8_t mexec_zbi[4096];
89 static size_t mexec_zbi_length = 0;
90 
91 static volatile int panic_started;
92 
halt_other_cpus(void)93 static void halt_other_cpus(void) {
94     static volatile int halted = 0;
95 
96     if (atomic_swap(&halted, 1) == 0) {
97         // stop the other cpus
98         printf("stopping other cpus\n");
99         arch_mp_send_ipi(MP_IPI_TARGET_ALL_BUT_LOCAL, 0, MP_IPI_HALT);
100 
101         // spin for a while
102         // TODO: find a better way to spin at this low level
103         for (volatile int i = 0; i < 100000000; i++) {
104             __asm volatile("nop");
105         }
106     }
107 }
108 
platform_panic_start(void)109 void platform_panic_start(void) {
110     arch_disable_ints();
111 
112     halt_other_cpus();
113 
114     if (atomic_swap(&panic_started, 1) == 0) {
115         dlog_bluescreen_init();
116     }
117 }
118 
platform_get_ramdisk(size_t * size)119 void* platform_get_ramdisk(size_t* size) {
120     if (ramdisk_base) {
121         *size = ramdisk_size;
122         return ramdisk_base;
123     } else {
124         *size = 0;
125         return nullptr;
126     }
127 }
128 
platform_halt_cpu(void)129 void platform_halt_cpu(void) {
130     uint32_t result = psci_cpu_off();
131     // should have never returned
132     panic("psci_cpu_off returned %u\n", result);
133 }
134 
platform_halt_secondary_cpus(void)135 void platform_halt_secondary_cpus(void) {
136     // Ensure the current thread is pinned to the boot CPU.
137     DEBUG_ASSERT(get_current_thread()->cpu_affinity == cpu_num_to_mask(BOOT_CPU_ID));
138 
139     // "Unplug" online secondary CPUs before halting them.
140     cpu_mask_t primary = cpu_num_to_mask(BOOT_CPU_ID);
141     cpu_mask_t mask = mp_get_online_mask() & ~primary;
142     zx_status_t result = mp_unplug_cpu_mask(mask);
143     DEBUG_ASSERT(result == ZX_OK);
144 }
145 
platform_start_cpu(uint cluster,uint cpu)146 static zx_status_t platform_start_cpu(uint cluster, uint cpu) {
147     // Issue memory barrier before starting to ensure previous stores will be visible to new CPU.
148     smp_mb();
149 
150     uint32_t ret = psci_cpu_on(cluster, cpu, kernel_entry_paddr);
151     dprintf(INFO, "Trying to start cpu %u:%u returned: %d\n", cluster, cpu, (int)ret);
152     if (ret != 0) {
153         return ZX_ERR_INTERNAL;
154     }
155     return ZX_OK;
156 }
157 
platform_cpu_init(void)158 static void platform_cpu_init(void) {
159     for (uint cluster = 0; cluster < cpu_cluster_count; cluster++) {
160         for (uint cpu = 0; cpu < cpu_cluster_cpus[cluster]; cpu++) {
161             if (cluster != 0 || cpu != 0) {
162                 // create a stack for the cpu we're about to start
163                 zx_status_t status = arm64_create_secondary_stack(cluster, cpu);
164                 DEBUG_ASSERT(status == ZX_OK);
165 
166                 // start the cpu
167                 status = platform_start_cpu(cluster, cpu);
168 
169                 if (status != ZX_OK) {
170                     // TODO(maniscalco): Is continuing really the right thing to do here?
171 
172                     // start failed, free the stack
173                     zx_status_t status = arm64_free_secondary_stack(cluster, cpu);
174                     DEBUG_ASSERT(status == ZX_OK);
175                     continue;
176                 }
177 
178                 // the cpu booted
179                 //
180                 // bootstrap thread is now responsible for freeing its stack
181             }
182         }
183     }
184 }
185 
is_zbi_container(void * addr)186 static inline bool is_zbi_container(void* addr) {
187     DEBUG_ASSERT(addr);
188 
189     zbi_header_t* item = (zbi_header_t*)addr;
190     return item->type == ZBI_TYPE_CONTAINER;
191 }
192 
save_mexec_zbi(zbi_header_t * item)193 static void save_mexec_zbi(zbi_header_t* item) {
194     size_t length = ZBI_ALIGN(
195         static_cast<uint32_t>(sizeof(zbi_header_t) + item->length));
196     ASSERT(sizeof(mexec_zbi) - mexec_zbi_length >= length);
197 
198     memcpy(&mexec_zbi[mexec_zbi_length], item, length);
199     mexec_zbi_length += length;
200 }
201 
process_mem_range(const zbi_mem_range_t * mem_range)202 static void process_mem_range(const zbi_mem_range_t* mem_range) {
203     switch (mem_range->type) {
204     case ZBI_MEM_RANGE_RAM:
205         if (mem_arena.size == 0) {
206             mem_arena.base = mem_range->paddr;
207             mem_arena.size = mem_range->length;
208             dprintf(INFO, "mem_arena.base %#" PRIx64 " size %#" PRIx64 "\n", mem_arena.base,
209                     mem_arena.size);
210         } else {
211             if (mem_range->paddr) {
212                 mem_arena.base = mem_range->paddr;
213                 dprintf(INFO, "overriding mem arena 0 base from FDT: %#zx\n", mem_arena.base);
214             }
215             // if mem_area.base is already set, then just update the size
216             mem_arena.size = mem_range->length;
217             dprintf(INFO, "overriding mem arena 0 size from FDT: %#zx\n", mem_arena.size);
218         }
219         break;
220     case ZBI_MEM_RANGE_PERIPHERAL: {
221         auto status = add_periph_range(mem_range->paddr, mem_range->length);
222         ASSERT(status == ZX_OK);
223         break;
224     }
225     case ZBI_MEM_RANGE_RESERVED:
226         dprintf(INFO, "boot reserve mem range: phys base %#" PRIx64 " length %#" PRIx64 "\n",
227                 mem_range->paddr, mem_range->length);
228         boot_reserve_add_range(mem_range->paddr, mem_range->length);
229         break;
230     default:
231         panic("bad mem_range->type in process_mem_range\n");
232         break;
233     }
234 }
235 
process_zbi_item(zbi_header_t * item,void * payload,void * cookie)236 static zbi_result_t process_zbi_item(zbi_header_t* item, void* payload, void* cookie) {
237     if (ZBI_TYPE_DRV_METADATA(item->type)) {
238         save_mexec_zbi(item);
239         return ZBI_RESULT_OK;
240     }
241     switch (item->type) {
242     case ZBI_TYPE_KERNEL_DRIVER:
243     case ZBI_TYPE_PLATFORM_ID:
244         // we don't process these here, but we need to save them for mexec
245         save_mexec_zbi(item);
246         break;
247     case ZBI_TYPE_CMDLINE: {
248         if (item->length < 1) {
249             break;
250         }
251         char* contents = reinterpret_cast<char*>(payload);
252         contents[item->length - 1] = '\0';
253         cmdline_append(contents);
254         break;
255     }
256     case ZBI_TYPE_MEM_CONFIG: {
257         zbi_mem_range_t* mem_range = reinterpret_cast<zbi_mem_range_t*>(payload);
258         uint32_t count = item->length / (uint32_t)sizeof(zbi_mem_range_t);
259         for (uint32_t i = 0; i < count; i++) {
260             process_mem_range(mem_range++);
261         }
262         save_mexec_zbi(item);
263         break;
264     }
265     case ZBI_TYPE_CPU_CONFIG: {
266         zbi_cpu_config_t* cpu_config = reinterpret_cast<zbi_cpu_config_t*>(payload);
267         cpu_cluster_count = cpu_config->cluster_count;
268         for (uint32_t i = 0; i < cpu_cluster_count; i++) {
269             cpu_cluster_cpus[i] = cpu_config->clusters[i].cpu_count;
270         }
271         arch_init_cpu_map(cpu_cluster_count, cpu_cluster_cpus);
272         save_mexec_zbi(item);
273         break;
274     }
275     case ZBI_TYPE_NVRAM: {
276         zbi_nvram_t* nvram = reinterpret_cast<zbi_nvram_t*>(payload);
277         memcpy(&lastlog_nvram, nvram, sizeof(lastlog_nvram));
278         dprintf(INFO, "boot reserve nvram range: phys base %#" PRIx64 " length %#" PRIx64 "\n",
279                 nvram->base, nvram->length);
280         boot_reserve_add_range(nvram->base, nvram->length);
281         save_mexec_zbi(item);
282         break;
283     }
284     }
285 
286     return ZBI_RESULT_OK;
287 }
288 
process_zbi(zbi_header_t * root)289 static void process_zbi(zbi_header_t* root) {
290     DEBUG_ASSERT(root);
291     zbi_result_t result;
292 
293     uint8_t* zbi_base = reinterpret_cast<uint8_t*>(root);
294     zbi::Zbi image(zbi_base);
295 
296     // Make sure the image looks valid.
297     result = image.Check(nullptr);
298     if (result != ZBI_RESULT_OK) {
299         // TODO(gkalsi): Print something informative here?
300         return;
301     }
302 
303     image.ForEach(process_zbi_item, nullptr);
304 }
305 
platform_early_init(void)306 void platform_early_init(void) {
307     // if the zbi_paddr variable is -1, it was not set
308     // in start.S, so we are in a bad place.
309     if (zbi_paddr == -1UL) {
310         panic("no zbi_paddr!\n");
311     }
312 
313     void* zbi_vaddr = paddr_to_physmap(zbi_paddr);
314 
315     // initialize the boot memory reservation system
316     boot_reserve_init();
317 
318     if (zbi_vaddr && is_zbi_container(zbi_vaddr)) {
319         zbi_header_t* header = (zbi_header_t*)zbi_vaddr;
320 
321         ramdisk_base = header;
322         ramdisk_size = ROUNDUP(header->length + sizeof(*header), PAGE_SIZE);
323     } else {
324         panic("no bootdata!\n");
325     }
326 
327     if (!ramdisk_base || !ramdisk_size) {
328         panic("no ramdisk!\n");
329     }
330 
331     zbi_header_t* zbi = reinterpret_cast<zbi_header_t*>(ramdisk_base);
332     // walk the zbi structure and process all the items
333     process_zbi(zbi);
334 
335     // is the cmdline option to bypass dlog set ?
336     dlog_bypass_init();
337 
338     // bring up kernel drivers after we have mapped our peripheral ranges
339     pdev_init(zbi);
340 
341     // Serial port should be active now
342 
343     // Read cmdline after processing zbi, which may contain cmdline data.
344     halt_on_panic = cmdline_get_bool("kernel.halt-on-panic", false);
345 
346     // Check if serial should be enabled
347     const char* serial_mode = cmdline_get("kernel.serial");
348     uart_disabled = (serial_mode != NULL && !strcmp(serial_mode, "none"));
349 
350     // add the ramdisk to the boot reserve memory list
351     paddr_t ramdisk_start_phys = physmap_to_paddr(ramdisk_base);
352     paddr_t ramdisk_end_phys = ramdisk_start_phys + ramdisk_size;
353     dprintf(INFO, "reserving ramdisk phys range [%#" PRIx64 ", %#" PRIx64 "]\n",
354             ramdisk_start_phys, ramdisk_end_phys - 1);
355     boot_reserve_add_range(ramdisk_start_phys, ramdisk_size);
356 
357     // check if a memory limit was passed in via kernel.memory-limit-mb and
358     // find memory ranges to use if one is found.
359     zx_status_t status = memory_limit_init();
360     if (status == ZX_OK) {
361         // Figure out and add arenas based on the memory limit and our range of DRAM
362         memory_limit_add_range(mem_arena.base, mem_arena.size, mem_arena);
363         status = memory_limit_add_arenas(mem_arena);
364     }
365 
366     // If no memory limit was found, or adding arenas from the range failed, then add
367     // the existing global arena.
368     if (status != ZX_OK) {
369         dprintf(INFO, "memory limit lib returned an error (%d), falling back to default arena\n",
370                 status);
371         pmm_add_arena(&mem_arena);
372     }
373 
374     // tell the boot allocator to mark ranges we've reserved as off limits
375     boot_reserve_wire();
376 }
377 
platform_init(void)378 void platform_init(void) {
379     platform_cpu_init();
380 }
381 
382 // after the fact create a region to reserve the peripheral map(s)
platform_init_postvm(uint level)383 static void platform_init_postvm(uint level) {
384     reserve_periph_ranges();
385 }
386 
387 LK_INIT_HOOK(platform_postvm, platform_init_postvm, LK_INIT_LEVEL_VM);
388 
platform_dputs_thread(const char * str,size_t len)389 void platform_dputs_thread(const char* str, size_t len) {
390     if (uart_disabled) {
391         return;
392     }
393     uart_puts(str, len, true, true);
394 }
395 
platform_dputs_irq(const char * str,size_t len)396 void platform_dputs_irq(const char* str, size_t len) {
397     if (uart_disabled) {
398         return;
399     }
400     uart_puts(str, len, false, true);
401 }
402 
platform_dgetc(char * c,bool wait)403 int platform_dgetc(char* c, bool wait) {
404     if (uart_disabled) {
405         return ZX_ERR_NOT_SUPPORTED;
406     }
407     int ret = uart_getc(wait);
408     if (ret < 0)
409         return ret;
410     *c = static_cast<char>(ret);
411     return 0;
412 }
413 
platform_pputc(char c)414 void platform_pputc(char c) {
415     if (uart_disabled) {
416         return;
417     }
418     uart_pputc(c);
419 }
420 
platform_pgetc(char * c,bool wait)421 int platform_pgetc(char* c, bool wait) {
422     if (uart_disabled) {
423         return ZX_ERR_NOT_SUPPORTED;
424     }
425     int r = uart_pgetc();
426     if (r < 0) {
427         return r;
428     }
429 
430     *c = static_cast<char>(r);
431     return 0;
432 }
433 
434 /* stub out the hardware rng entropy generator, which doesn't exist on this platform */
hw_rng_get_entropy(void * buf,size_t len,bool block)435 size_t hw_rng_get_entropy(void* buf, size_t len, bool block) {
436     return 0;
437 }
438 
439 /* no built in framebuffer */
display_get_info(struct display_info * info)440 zx_status_t display_get_info(struct display_info* info) {
441     return ZX_ERR_NOT_FOUND;
442 }
443 
platform_halt(platform_halt_action suggested_action,platform_halt_reason reason)444 void platform_halt(platform_halt_action suggested_action, platform_halt_reason reason) {
445 
446     if (suggested_action == HALT_ACTION_REBOOT) {
447         power_reboot(REBOOT_NORMAL);
448         printf("reboot failed\n");
449     } else if (suggested_action == HALT_ACTION_REBOOT_BOOTLOADER) {
450         power_reboot(REBOOT_BOOTLOADER);
451         printf("reboot-bootloader failed\n");
452     } else if (suggested_action == HALT_ACTION_REBOOT_RECOVERY) {
453         power_reboot(REBOOT_RECOVERY);
454         printf("reboot-recovery failed\n");
455     } else if (suggested_action == HALT_ACTION_SHUTDOWN) {
456         power_shutdown();
457     }
458 
459     if (reason == HALT_REASON_SW_PANIC) {
460         thread_print_current_backtrace();
461         dlog_bluescreen_halt();
462         if (!halt_on_panic) {
463             power_reboot(REBOOT_NORMAL);
464             printf("reboot failed\n");
465         }
466 #if ENABLE_PANIC_SHELL
467         dprintf(ALWAYS, "CRASH: starting debug shell... (reason = %d)\n", reason);
468         arch_disable_ints();
469         panic_shell_start();
470 #endif // ENABLE_PANIC_SHELL
471     }
472 
473     dprintf(ALWAYS, "HALT: spinning forever... (reason = %d)\n", reason);
474 
475     // catch all fallthrough cases
476     arch_disable_ints();
477     for (;;)
478         ;
479 }
480 
481 typedef struct {
482     //TODO: combine with x86 nvram crashlog handling
483     //TODO: ECC for more robust crashlogs
484     uint64_t magic;
485     uint64_t length;
486     uint64_t nmagic;
487     uint64_t nlength;
488 } log_hdr_t;
489 
490 #define NVRAM_MAGIC (0x6f8962d66b28504fULL)
491 
platform_stow_crashlog(void * log,size_t len)492 size_t platform_stow_crashlog(void* log, size_t len) {
493     size_t max = lastlog_nvram.length - sizeof(log_hdr_t);
494     void* nvram = paddr_to_physmap(lastlog_nvram.base);
495     if (nvram == NULL) {
496         return 0;
497     }
498 
499     if (log == NULL) {
500         return max;
501     }
502     if (len > max) {
503         len = max;
504     }
505 
506     log_hdr_t hdr = {
507         .magic = NVRAM_MAGIC,
508         .length = len,
509         .nmagic = ~NVRAM_MAGIC,
510         .nlength = ~len,
511     };
512     memcpy(nvram, &hdr, sizeof(hdr));
513     memcpy(static_cast<char*>(nvram) + sizeof(hdr), log, len);
514     arch_clean_cache_range((uintptr_t)nvram, sizeof(hdr) + len);
515     return len;
516 }
517 
platform_recover_crashlog(size_t len,void * cookie,void (* func)(const void * data,size_t,size_t len,void * cookie))518 size_t platform_recover_crashlog(size_t len, void* cookie,
519                                  void (*func)(const void* data, size_t, size_t len, void* cookie)) {
520     size_t max = lastlog_nvram.length - sizeof(log_hdr_t);
521     void* nvram = paddr_to_physmap(lastlog_nvram.base);
522     if (nvram == NULL) {
523         return 0;
524     }
525     log_hdr_t hdr;
526     memcpy(&hdr, nvram, sizeof(hdr));
527     if ((hdr.magic != NVRAM_MAGIC) || (hdr.length > max) ||
528         (hdr.nmagic != ~NVRAM_MAGIC) || (hdr.nlength != ~hdr.length)) {
529         printf("nvram-crashlog: bad header: %016lx %016lx %016lx %016lx\n",
530                hdr.magic, hdr.length, hdr.nmagic, hdr.nlength);
531         return 0;
532     }
533     if (len == 0) {
534         return hdr.length;
535     }
536     if (len > hdr.length) {
537         len = hdr.length;
538     }
539     func(static_cast<char*>(nvram) + sizeof(hdr), 0, len, cookie);
540 
541     // invalidate header so we don't get a stale crashlog
542     // on future boots
543     hdr.magic = 0;
544     memcpy(nvram, &hdr, sizeof(hdr));
545     return hdr.length;
546 }
547 
platform_mexec_patch_zbi(uint8_t * zbi,const size_t len)548 zx_status_t platform_mexec_patch_zbi(uint8_t* zbi, const size_t len) {
549     size_t offset = 0;
550 
551     // copy certain boot items provided by the bootloader or boot shim
552     // to the mexec zbi
553     zbi::Zbi image(zbi, len);
554     while (offset < mexec_zbi_length) {
555         zbi_header_t* item = reinterpret_cast<zbi_header_t*>(mexec_zbi + offset);
556 
557         zbi_result_t status;
558         status = image.AppendSection(item->length, item->type, item->extra,
559                                      item->flags,
560                                      reinterpret_cast<uint8_t*>(item + 1));
561 
562         if (status != ZBI_RESULT_OK)
563             return ZX_ERR_INTERNAL;
564 
565         offset += ZBI_ALIGN(
566             static_cast<uint32_t>(sizeof(zbi_header_t)) + item->length);
567     }
568 
569     return ZX_OK;
570 }
571 
platform_mexec_prep(uintptr_t new_bootimage_addr,size_t new_bootimage_len)572 void platform_mexec_prep(uintptr_t new_bootimage_addr, size_t new_bootimage_len) {
573     DEBUG_ASSERT(!arch_ints_disabled());
574     DEBUG_ASSERT(mp_get_online_mask() == cpu_num_to_mask(BOOT_CPU_ID));
575 }
576 
platform_mexec(mexec_asm_func mexec_assembly,memmov_ops_t * ops,uintptr_t new_bootimage_addr,size_t new_bootimage_len,uintptr_t entry64_addr)577 void platform_mexec(mexec_asm_func mexec_assembly, memmov_ops_t* ops,
578                     uintptr_t new_bootimage_addr, size_t new_bootimage_len,
579                     uintptr_t entry64_addr) {
580     DEBUG_ASSERT(arch_ints_disabled());
581     DEBUG_ASSERT(mp_get_online_mask() == cpu_num_to_mask(BOOT_CPU_ID));
582 
583     paddr_t kernel_src_phys = (paddr_t)ops[0].src;
584     paddr_t kernel_dst_phys = (paddr_t)ops[0].dst;
585 
586     // check to see if the kernel is packaged as a zbi container
587     zbi_header_t* header = (zbi_header_t*)paddr_to_physmap(kernel_src_phys);
588     if (header[0].type == ZBI_TYPE_CONTAINER && header[1].type == ZBI_TYPE_KERNEL_ARM64) {
589         zbi_kernel_t* kernel_header = (zbi_kernel_t*)&header[2];
590         // add offset from kernel header to entry point
591         kernel_dst_phys += kernel_header->entry;
592     }
593     // else just jump to beginning of kernel image
594 
595     mexec_assembly((uintptr_t)new_bootimage_addr, 0, 0, arm64_get_boot_el(), ops,
596                    (void*)kernel_dst_phys);
597 }
598 
platform_serial_enabled(void)599 bool platform_serial_enabled(void) {
600     return !uart_disabled && uart_present();
601 }
602 
platform_early_console_enabled()603 bool platform_early_console_enabled() {
604     return false;
605 }
606 
607 // Initialize Resource system after the heap is initialized.
arm_resource_dispatcher_init_hook(unsigned int rl)608 static void arm_resource_dispatcher_init_hook(unsigned int rl) {
609     // 64 bit address space for MMIO on ARM64
610     zx_status_t status = ResourceDispatcher::InitializeAllocator(ZX_RSRC_KIND_MMIO, 0,
611                                                                  UINT64_MAX);
612     if (status != ZX_OK) {
613         printf("Resources: Failed to initialize MMIO allocator: %d\n", status);
614     }
615     // Set up IRQs based on values from the GIC
616     status = ResourceDispatcher::InitializeAllocator(ZX_RSRC_KIND_IRQ,
617                                                      interrupt_get_base_vector(),
618                                                      interrupt_get_max_vector());
619     if (status != ZX_OK) {
620         printf("Resources: Failed to initialize IRQ allocator: %d\n", status);
621     }
622     // Set up SMC valid service call range
623     status = ResourceDispatcher::InitializeAllocator(ZX_RSRC_KIND_SMC,
624                                                      0,
625                                                      ARM_SMC_SERVICE_CALL_NUM_MAX + 1);
626     if (status != ZX_OK) {
627         printf("Resources: Failed to initialize SMC allocator: %d\n", status);
628     }
629 }
630 
631 LK_INIT_HOOK(arm_resource_init, arm_resource_dispatcher_init_hook, LK_INIT_LEVEL_HEAP);
632