1/*
2 * xen/arch/arm/head.S
3 *
4 * Start-of-day code for an ARMv7-A with virt extensions.
5 *
6 * Tim Deegan <tim@xen.org>
7 * Copyright (c) 2011 Citrix Systems.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
17 * GNU General Public License for more details.
18 */
19
20#include <asm/page.h>
21#include <asm/asm_defns.h>
22#include <asm/early_printk.h>
23
24#define ZIMAGE_MAGIC_NUMBER 0x016f2818
25
26#define PT_PT     0xf7f /* nG=1 AF=1 SH=11 AP=01 NS=1 ATTR=111 T=1 P=1 */
27#define PT_MEM    0xf7d /* nG=1 AF=1 SH=11 AP=01 NS=1 ATTR=111 T=0 P=1 */
28#define PT_MEM_L3 0xf7f /* nG=1 AF=1 SH=11 AP=01 NS=1 ATTR=111 T=1 P=1 */
29#define PT_DEV    0xe71 /* nG=1 AF=1 SH=10 AP=01 NS=1 ATTR=100 T=0 P=1 */
30#define PT_DEV_L3 0xe73 /* nG=1 AF=1 SH=10 AP=01 NS=1 ATTR=100 T=1 P=1 */
31
32#define PT_UPPER(x) (PT_##x & 0xf00)
33#define PT_LOWER(x) (PT_##x & 0x0ff)
34
35#if (defined (CONFIG_EARLY_PRINTK)) && (defined (EARLY_PRINTK_INC))
36#include EARLY_PRINTK_INC
37#endif
38
39/*
40 * Common register usage in this file:
41 *   r0  -
42 *   r1  -
43 *   r2  -
44 *   r3  -
45 *   r4  -
46 *   r5  -
47 *   r6  - identity map in place
48 *   r7  - CPUID
49 *   r8  - DTB address (boot CPU only)
50 *   r9  - paddr(start)
51 *   r10 - phys offset
52 *   r11 - UART address
53 *   r12 - is_secondary_cpu
54 *   r13 - SP
55 *   r14 - LR
56 *   r15 - PC
57 */
58/* Macro to print a string to the UART, if there is one.
59 * Clobbers r0-r3. */
60#ifdef CONFIG_EARLY_PRINTK
61#define PRINT(_s)       \
62        adr   r0, 98f ; \
63        bl    puts    ; \
64        b     99f     ; \
6598:     .asciz _s     ; \
66        .align 2      ; \
6799:
68#else /* CONFIG_EARLY_PRINTK */
69#define PRINT(s)
70#endif /* !CONFIG_EARLY_PRINTK */
71
72        .arm
73
74        /*
75         * This must be the very first address in the loaded image.
76         * It should be linked at XEN_VIRT_START, and loaded at any
77         * 4K-aligned address.  All of text+data+bss must fit in 2MB,
78         * or the initial pagetable code below will need adjustment.
79         */
80GLOBAL(start)
81        /*
82         * zImage magic header, see:
83         * http://www.simtec.co.uk/products/SWLINUX/files/booting_article.html#d0e309
84         */
85        .rept 8
86        mov   r0, r0
87        .endr
88        b     past_zImage
89
90        .word ZIMAGE_MAGIC_NUMBER    /* Magic numbers to help the loader */
91        .word 0x00000000             /* absolute load/run zImage address or
92                                      * 0 for PiC */
93        .word (_end - start)         /* zImage end address */
94
95past_zImage:
96        cpsid aif                    /* Disable all interrupts */
97
98        /* Save the bootloader arguments in less-clobberable registers */
99        mov   r8, r2                 /* r8 := DTB base address */
100
101        /* Find out where we are */
102        ldr   r0, =start
103        adr   r9, start              /* r9  := paddr (start) */
104        sub   r10, r9, r0            /* r10 := phys-offset */
105
106        /* Using the DTB in the .dtb section? */
107#ifdef CONFIG_DTB_FILE
108        ldr   r8, =_sdtb
109        add   r8, r10                /* r8 := paddr(DTB) */
110#endif
111
112        mov   r12, #0                /* r12 := is_secondary_cpu */
113
114        b     common_start
115
116GLOBAL(init_secondary)
117        cpsid aif                    /* Disable all interrupts */
118
119        /* Find out where we are */
120        ldr   r0, =start
121        adr   r9, start              /* r9  := paddr (start) */
122        sub   r10, r9, r0            /* r10 := phys-offset */
123
124        mov   r12, #1                /* r12 := is_secondary_cpu */
125
126common_start:
127        mov   r7, #0                 /* r7 := CPU ID. Initialy zero until we
128                                      * find that multiprocessor extensions are
129                                      * present and the system is SMP */
130        mrc   CP32(r1, MPIDR)
131        tst   r1, #MPIDR_SMP         /* Multiprocessor extension supported? */
132        beq   1f
133        tst   r1, #MPIDR_UP          /* Uniprocessor system? */
134        bne   1f
135        bic   r7, r1, #(~MPIDR_HWID_MASK) /* Mask out flags to get CPU ID */
1361:
137
138        /* Non-boot CPUs wait here until __cpu_up is ready for them */
139        teq   r12, #0
140        beq   1f
141
142        ldr   r0, =smp_up_cpu
143        add   r0, r0, r10            /* Apply physical offset */
144        dsb
1452:      ldr   r1, [r0]
146        cmp   r1, r7
147        beq   1f
148        wfe
149        b     2b
1501:
151
152#ifdef CONFIG_EARLY_PRINTK
153        ldr   r11, =EARLY_UART_BASE_ADDRESS  /* r11 := UART base address */
154        teq   r12, #0                /* Boot CPU sets up the UART too */
155        bleq  init_uart
156        PRINT("- CPU ")
157        mov   r0, r7
158        bl    putn
159        PRINT(" booting -\r\n")
160#endif
161
162        /* Check that this CPU has Hyp mode */
163        mrc   CP32(r0, ID_PFR1)
164        and   r0, r0, #0xf000        /* Bits 12-15 define virt extensions */
165        teq   r0, #0x1000            /* Must == 0x1 or may be incompatible */
166        beq   1f
167        PRINT("- CPU doesn't support the virtualization extensions -\r\n")
168        b     fail
1691:
170
171        /* Check that we're already in Hyp mode */
172        mrs   r0, cpsr
173        and   r0, r0, #0x1f          /* Mode is in the low 5 bits of CPSR */
174        teq   r0, #0x1a              /* Hyp Mode? */
175        beq   hyp
176
177        /* OK, we're boned. */
178        PRINT("- Xen must be entered in NS Hyp mode -\r\n")
179        PRINT("- Please update the bootloader -\r\n")
180        b     fail
181
182hyp:    PRINT("- Xen starting in Hyp mode -\r\n")
183
184        /* Zero BSS On the boot CPU to avoid nasty surprises */
185        teq   r12, #0
186        bne   skip_bss
187
188        PRINT("- Zero BSS -\r\n")
189        ldr   r0, =__bss_start       /* Load start & end of bss */
190        ldr   r1, =__bss_end
191        add   r0, r0, r10            /* Apply physical offset */
192        add   r1, r1, r10
193
194        mov   r2, #0
1951:      str   r2, [r0], #4
196        cmp   r0, r1
197        blo   1b
198
199skip_bss:
200        PRINT("- Setting up control registers -\r\n")
201
202        /* Get processor specific proc info into r1 */
203        bl    __lookup_processor_type
204        teq   r1, #0
205        bne   1f
206        mov   r4, r0
207        PRINT("- Missing processor info: ")
208        mov   r0, r4
209        bl    putn
210        PRINT(" -\r\n")
211        b     fail
2121:
213
214        /* Jump to cpu_init */
215        ldr   r1, [r1, #PROCINFO_cpu_init]  /* r1 := vaddr(init func) */
216        adr   lr, cpu_init_done             /* Save return address */
217        add   pc, r1, r10                   /* Call paddr(init func) */
218
219cpu_init_done:
220        /* Set up memory attribute type tables */
221        ldr   r0, =MAIR0VAL
222        ldr   r1, =MAIR1VAL
223        mcr   CP32(r0, MAIR0)
224        mcr   CP32(r1, MAIR1)
225        mcr   CP32(r0, HMAIR0)
226        mcr   CP32(r1, HMAIR1)
227
228        /*
229         * Set up the HTCR:
230         * PT walks use Inner-Shareable accesses,
231         * PT walks are write-back, write-allocate in both cache levels,
232         * Full 32-bit address space goes through this table.
233         */
234        ldr   r0, =(TCR_RES1|TCR_SH0_IS|TCR_ORGN0_WBWA|TCR_IRGN0_WBWA|TCR_T0SZ(0))
235        mcr   CP32(r0, HTCR)
236
237        /*
238         * Set up the HSCTLR:
239         * Exceptions in LE ARM,
240         * Low-latency IRQs disabled,
241         * Write-implies-XN disabled (for now),
242         * D-cache disabled (for now),
243         * I-cache enabled,
244         * Alignment checking enabled,
245         * MMU translation disabled (for now).
246         */
247        ldr   r0, =(HSCTLR_BASE|SCTLR_A)
248        mcr   CP32(r0, HSCTLR)
249
250        /*
251         * Rebuild the boot pagetable's first-level entries. The structure
252         * is described in mm.c.
253         *
254         * After the CPU enables paging it will add the fixmap mapping
255         * to these page tables, however this may clash with the 1:1
256         * mapping. So each CPU must rebuild the page tables here with
257         * the 1:1 in place.
258         */
259
260        /*
261         * If Xen is loaded at exactly XEN_VIRT_START then we don't
262         * need an additional 1:1 mapping, the virtual mapping will
263         * suffice.
264         */
265        cmp   r9, #XEN_VIRT_START
266        moveq r6, #1                 /* r6 := identity map now in place */
267        movne r6, #0                 /* r6 := identity map not yet in place */
268
269        /* Write Xen's PT's paddr into the HTTBR */
270        ldr   r4, =boot_pgtable
271        add   r4, r4, r10            /* r4 := paddr (boot_pagetable) */
272        mov   r5, #0                 /* r4:r5 is paddr (boot_pagetable) */
273        mcrr  CP64(r4, r5, HTTBR)
274
275        /* Setup boot_pgtable: */
276        ldr   r1, =boot_second
277        add   r1, r1, r10            /* r1 := paddr (boot_second) */
278
279        /* ... map boot_second in boot_pgtable[0] */
280        orr   r2, r1, #PT_UPPER(PT)  /* r2:r3 := table map of boot_second */
281        orr   r2, r2, #PT_LOWER(PT)  /* (+ rights for linear PT) */
282        mov   r3, #0x0
283        strd  r2, r3, [r4, #0]       /* Map it in slot 0 */
284
285        /* ... map of paddr(start) in boot_pgtable */
286        lsrs  r1, r9, #FIRST_SHIFT   /* Offset of base paddr in boot_pgtable */
287        beq   1f                     /* If it is in slot 0 then map in boot_second
288                                      * later on */
289        lsl   r2, r1, #FIRST_SHIFT   /* Base address for 1GB mapping */
290        orr   r2, r2, #PT_UPPER(MEM) /* r2:r3 := section map */
291        orr   r2, r2, #PT_LOWER(MEM)
292        lsl   r1, r1, #3             /* r1 := Slot offset */
293        strd  r2, r3, [r4, r1]       /* Mapping of paddr(start) */
294        mov   r6, #1                 /* r6 := identity map now in place */
295
2961:      /* Setup boot_second: */
297        ldr   r4, =boot_second
298        add   r4, r4, r10            /* r4 := paddr (boot_second) */
299
300        ldr   r1, =boot_third
301        add   r1, r1, r10            /* r1 := paddr (boot_third) */
302
303        /* ... map boot_third in boot_second[1] */
304        orr   r2, r1, #PT_UPPER(PT)  /* r2:r3 := table map of boot_third */
305        orr   r2, r2, #PT_LOWER(PT)  /* (+ rights for linear PT) */
306        mov   r3, #0x0
307        strd  r2, r3, [r4, #8]       /* Map it in slot 1 */
308
309        /* ... map of paddr(start) in boot_second */
310        cmp   r6, #1                 /* r6 is set if already created */
311        beq   1f
312        lsr   r2, r9, #SECOND_SHIFT  /* Offset of base paddr in boot_second */
313        ldr   r3, =LPAE_ENTRY_MASK
314        and   r1, r2, r3
315        cmp   r1, #1
316        beq   virtphys_clash         /* It's in slot 1, which we cannot handle */
317
318        lsl   r2, r2, #SECOND_SHIFT  /* Base address for 2MB mapping */
319        orr   r2, r2, #PT_UPPER(MEM) /* r2:r3 := section map */
320        orr   r2, r2, #PT_LOWER(MEM)
321        mov   r3, #0x0
322        lsl   r1, r1, #3             /* r1 := Slot offset */
323        strd  r2, r3, [r4, r1]       /* Mapping of paddr(start) */
324        mov   r6, #1                 /* r6 := identity map now in place */
325
326        /* Setup boot_third: */
3271:      ldr   r4, =boot_third
328        add   r4, r4, r10            /* r4 := paddr (boot_third) */
329
330        lsr   r2, r9, #THIRD_SHIFT  /* Base address for 4K mapping */
331        lsl   r2, r2, #THIRD_SHIFT
332        orr   r2, r2, #PT_UPPER(MEM_L3) /* r2:r3 := map */
333        orr   r2, r2, #PT_LOWER(MEM_L3)
334        mov   r3, #0x0
335
336        /* ... map of vaddr(start) in boot_third */
337        mov   r1, #0
3381:      strd  r2, r3, [r4, r1]       /* Map vaddr(start) */
339        add   r2, r2, #PAGE_SIZE     /* Next page */
340        add   r1, r1, #8             /* Next slot */
341        cmp   r1, #(LPAE_ENTRIES<<3) /* 512*8-byte entries per page */
342        blo   1b
343
344        /*
345         * Defer fixmap and dtb mapping until after paging enabled, to
346         * avoid them clashing with the 1:1 mapping.
347         */
348
349        /* boot pagetable setup complete */
350
351        cmp   r6, #1                /* Did we manage to create an identity mapping ? */
352        beq   1f
353        PRINT("Unable to build boot page tables - Failed to identity map Xen.\r\n")
354        b     fail
355virtphys_clash:
356        /* Identity map clashes with boot_third, which we cannot handle yet */
357        PRINT("- Unable to build boot page tables - virt and phys addresses clash. -\r\n")
358        b     fail
359
3601:
361        PRINT("- Turning on paging -\r\n")
362
363        ldr   r1, =paging            /* Explicit vaddr, not RIP-relative */
364        mrc   CP32(r0, HSCTLR)
365        orr   r0, r0, #(SCTLR_M|SCTLR_C) /* Enable MMU and D-cache */
366        dsb                          /* Flush PTE writes and finish reads */
367        mcr   CP32(r0, HSCTLR)       /* now paging is enabled */
368        isb                          /* Now, flush the icache */
369        mov   pc, r1                 /* Get a proper vaddr into PC */
370paging:
371
372        /*
373         * Now we can install the fixmap and dtb mappings, since we
374         * don't need the 1:1 map any more
375         */
376        dsb
377#if defined(CONFIG_EARLY_PRINTK) /* Fixmap is only used by early printk */
378        /*
379         * Non-boot CPUs don't need to rebuild the fixmap itself, just
380         * the mapping from boot_second to xen_fixmap
381         */
382        teq   r12, #0
383        bne   1f
384
385        /* Add UART to the fixmap table */
386        ldr   r1, =xen_fixmap        /* r1 := vaddr (xen_fixmap) */
387        mov   r3, #0
388        lsr   r2, r11, #THIRD_SHIFT
389        lsl   r2, r2, #THIRD_SHIFT   /* 4K aligned paddr of UART */
390        orr   r2, r2, #PT_UPPER(DEV_L3)
391        orr   r2, r2, #PT_LOWER(DEV_L3) /* r2:r3 := 4K dev map including UART */
392        strd  r2, r3, [r1, #(FIXMAP_CONSOLE*8)] /* Map it in the first fixmap's slot */
3931:
394
395        /* Map fixmap into boot_second */
396        ldr   r1, =boot_second       /* r1 := vaddr (boot_second) */
397        ldr   r2, =xen_fixmap
398        add   r2, r2, r10            /* r2 := paddr (xen_fixmap) */
399        orr   r2, r2, #PT_UPPER(PT)
400        orr   r2, r2, #PT_LOWER(PT)  /* r2:r3 := table map of xen_fixmap */
401        ldr   r4, =FIXMAP_ADDR(0)
402        mov   r4, r4, lsr #(SECOND_SHIFT - 3)   /* r4 := Slot for FIXMAP(0) */
403        strd  r2, r3, [r1, r4]       /* Map it in the fixmap's slot */
404
405        /* Use a virtual address to access the UART. */
406        ldr   r11, =EARLY_UART_VIRTUAL_ADDRESS
407#endif
408
409        /*
410         * Flush the TLB in case the 1:1 mapping happens to clash with
411         * the virtual addresses used by the fixmap or DTB.
412         */
413        dsb                          /* Ensure any page table updates made above
414                                      * have occurred. */
415
416        isb
417        mcr   CP32(r0, TLBIALLH)     /* Flush hypervisor TLB */
418        dsb                          /* Ensure completion of TLB flush */
419        isb
420
421        PRINT("- Ready -\r\n")
422
423        /* The boot CPU should go straight into C now */
424        teq   r12, #0
425        beq   launch
426
427        /*
428         * Non-boot CPUs need to move on to the proper pagetables, which were
429         * setup in init_secondary_pagetables.
430         */
431
432        ldr   r4, =init_ttbr         /* VA of HTTBR value stashed by CPU 0 */
433        ldrd  r4, r5, [r4]           /* Actual value */
434        dsb
435        mcrr  CP64(r4, r5, HTTBR)
436        dsb
437        isb
438        mcr   CP32(r0, TLBIALLH)     /* Flush hypervisor TLB */
439        mcr   CP32(r0, ICIALLU)      /* Flush I-cache */
440        mcr   CP32(r0, BPIALL)       /* Flush branch predictor */
441        dsb                          /* Ensure completion of TLB+BP flush */
442        isb
443
444launch:
445        ldr   r0, =init_data
446        add   r0, #INITINFO_stack    /* Find the boot-time stack */
447        ldr   sp, [r0]
448        add   sp, #STACK_SIZE        /* (which grows down from the top). */
449        sub   sp, #CPUINFO_sizeof    /* Make room for CPU save record */
450        mov   r0, r10                /* Marshal args: - phys_offset */
451        mov   r1, r8                 /*               - DTB address */
452        mov   r2, r7                 /*               - CPU ID */
453        teq   r12, #0
454        beq   start_xen              /* and disappear into the land of C */
455        b     start_secondary        /* (to the appropriate entry point) */
456
457/* Fail-stop */
458fail:   PRINT("- Boot failed -\r\n")
4591:      wfe
460        b     1b
461
462GLOBAL(_end_boot)
463
464/*
465 * Copy Xen to new location and switch TTBR
466 * r1:r0       ttbr
467 * r2          source address
468 * r3          destination address
469 * [sp]=>r4    length
470 *
471 * Source and destination must be word aligned, length is rounded up
472 * to a 16 byte boundary.
473 *
474 * MUST BE VERY CAREFUL when saving things to RAM over the copy
475 */
476ENTRY(relocate_xen)
477        push {r4,r5,r6,r7,r8,r9,r10,r11}
478
479        ldr   r4, [sp, #8*4]                /* Get 4th argument from stack */
480
481        /* Copy 16 bytes at a time using:
482         * r5:  counter
483         * r6:  data
484         * r7:  data
485         * r8:  data
486         * r9:  data
487         * r10: source
488         * r11: destination
489         */
490        mov   r5, r4
491        mov   r10, r2
492        mov   r11, r3
4931:      ldmia r10!, {r6, r7, r8, r9}
494        stmia r11!, {r6, r7, r8, r9}
495
496        subs  r5, r5, #16
497        bgt   1b
498
499        /* Flush destination from dcache using:
500         * r5: counter
501         * r6: step
502         * r7: vaddr
503         */
504        dsb        /* So the CPU issues all writes to the range */
505
506        mov   r5, r4
507        ldr   r6, =cacheline_bytes /* r6 := step */
508        ldr   r6, [r6]
509        mov   r7, r3
510
5111:      mcr   CP32(r7, DCCMVAC)
512
513        add   r7, r7, r6
514        subs  r5, r5, r6
515        bgt   1b
516
517        dsb                            /* Ensure the flushes happen before
518                                        * continuing */
519        isb                            /* Ensure synchronization with previous
520                                        * changes to text */
521        mcr   CP32(r0, TLBIALLH)       /* Flush hypervisor TLB */
522        mcr   CP32(r0, ICIALLU)        /* Flush I-cache */
523        mcr   CP32(r0, BPIALL)         /* Flush branch predictor */
524        dsb                            /* Ensure completion of TLB+BP flush */
525        isb
526
527        mcrr  CP64(r0, r1, HTTBR)
528
529        dsb                            /* ensure memory accesses do not cross
530                                        * over the TTBR0 write */
531        isb                            /* Ensure synchronization with previous
532                                        * changes to text */
533        mcr   CP32(r0, TLBIALLH)       /* Flush hypervisor TLB */
534        mcr   CP32(r0, ICIALLU)        /* Flush I-cache */
535        mcr   CP32(r0, BPIALL)         /* Flush branch predictor */
536        dsb                            /* Ensure completion of TLB+BP flush */
537        isb
538
539        pop {r4, r5,r6,r7,r8,r9,r10,r11}
540
541        mov pc, lr
542
543#ifdef CONFIG_EARLY_PRINTK
544/*
545 * Bring up the UART.
546 * r11: Early UART base address
547 * Clobbers r0-r2
548 */
549init_uart:
550#ifdef EARLY_PRINTK_INIT_UART
551        early_uart_init r11, r1, r2
552#endif
553        adr   r0, 1f
554        b     puts                  /* Jump to puts */
5551:      .asciz "- UART enabled -\r\n"
556        .align 4
557
558/*
559 * Print early debug messages.
560 * r0: Nul-terminated string to print.
561 * r11: Early UART base address
562 * Clobbers r0-r1
563 */
564puts:
565        early_uart_ready r11, r1
566        ldrb  r1, [r0], #1           /* Load next char */
567        teq   r1, #0                 /* Exit on nul */
568        moveq pc, lr
569        early_uart_transmit r11, r1
570        b puts
571
572/*
573 * Print a 32-bit number in hex.  Specific to the PL011 UART.
574 * r0: Number to print.
575 * r11: Early UART base address
576 * Clobbers r0-r3
577 */
578putn:
579        adr   r1, hex
580        mov   r3, #8
5811:
582        early_uart_ready r11, r2
583        and   r2, r0, #0xf0000000    /* Mask off the top nybble */
584        ldrb  r2, [r1, r2, lsr #28]  /* Convert to a char */
585        early_uart_transmit r11, r2
586        lsl   r0, #4                 /* Roll it through one nybble at a time */
587        subs  r3, r3, #1
588        bne   1b
589        mov   pc, lr
590
591hex:    .ascii "0123456789abcdef"
592        .align 2
593
594#else  /* CONFIG_EARLY_PRINTK */
595
596ENTRY(early_puts)
597init_uart:
598puts:
599putn:   mov   pc, lr
600
601#endif /* !CONFIG_EARLY_PRINTK */
602
603/* This provides a C-API version of __lookup_processor_type */
604ENTRY(lookup_processor_type)
605        stmfd sp!, {r4, r10, lr}
606        mov   r10, #0                   /* r10 := offset between virt&phys */
607        bl    __lookup_processor_type
608        mov r0, r1
609        ldmfd sp!, {r4, r10, pc}
610
611/*
612 *  Read processor ID register (CP#15, CR0), and Look up in the linker-built
613 * supported processor list. Note that we can't use the absolute addresses for
614 * the __proc_info lists since we aren't running with the MMU on (and therefore,
615 * we are not in correct address space). We have to calculate the offset.
616 *
617 * r10: offset between virt&phys
618 *
619 * Returns:
620 * r0: CPUID
621 * r1: proc_info pointer
622 * Clobbers r2-r4
623 */
624__lookup_processor_type:
625        mrc   CP32(r0, MIDR)                /* r0 := our cpu id */
626        ldr   r1, = __proc_info_start
627        add   r1, r1, r10                   /* r1 := paddr of table (start) */
628        ldr   r2, = __proc_info_end
629        add   r2, r2, r10                   /* r2 := paddr of table (end) */
6301:      ldr   r3, [r1, #PROCINFO_cpu_mask]
631        and   r4, r0, r3                    /* r4 := our cpu id with mask */
632        ldr   r3, [r1, #PROCINFO_cpu_val]   /* r3 := cpu val in current proc info */
633        teq   r4, r3
634        beq   2f                            /* Match => exit, or try next proc info */
635        add   r1, r1, #PROCINFO_sizeof
636        cmp   r1, r2
637        blo   1b
638        /* We failed to find the proc_info, return NULL */
639        mov   r1, #0
6402:
641        mov   pc, lr
642
643/*
644 * Local variables:
645 * mode: ASM
646 * indent-tabs-mode: nil
647 * End:
648 */
649