1/* SPDX-License-Identifier: BSD-2-Clause */
2/*
3 * Copyright (c) 2014, STMicroelectronics International N.V.
4 * Copyright (c) 2019, Arm Limited. All rights reserved.
5 */
6
7#include <asm.S>
8#include <arm.h>
9#include <arm32_macros.S>
10#include <platform_config.h>
11
12
13/* size_t __get_core_pos(void); */
14FUNC __get_core_pos , : , .identity_map
15	read_mpidr r0
16	b get_core_pos_mpidr
17END_FUNC __get_core_pos
18
19/* size_t get_core_pos_mpidr(uint32_t mpidr); */
20/* Let platforms override this if needed */
21WEAK_FUNC get_core_pos_mpidr , :
22	mov	r3, r0
23
24	/*
25	 * Shift MPIDR value if it's not already shifted.
26	 * Using logical shift ensures AFF0 to be filled with zeroes.
27	 * This part is necessary even if CFG_CORE_THREAD_SHIFT is 0 because
28	 * MT bit can be set on single threaded systems where all the AFF0
29	 * values are zeroes.
30	 */
31	tst	r0, #MPIDR_MT_MASK
32	lsleq	r3, r0, #MPIDR_AFFINITY_BITS
33
34	/*
35	 * At this point the MPIDR layout is always shifted so it looks
36	 * as follows AFF2 -> cluster, AFF1 -> core, AFF0 -> thread
37	 */
38#if CFG_CORE_THREAD_SHIFT == 0
39	/* Calculate CorePos = (ClusterId * (cores/cluster)) + CoreId */
40	ubfx	r0, r3, #MPIDR_AFF1_SHIFT, #MPIDR_AFFINITY_BITS
41	ubfx	r1, r3, #MPIDR_AFF2_SHIFT, #MPIDR_AFFINITY_BITS
42	add	r0, r0, r1, LSL #(CFG_CORE_CLUSTER_SHIFT)
43#else
44	/*
45	 * Calculate CorePos =
46	 * ((ClusterId * (cores/cluster)) + CoreId) * (threads/core) + ThreadId
47	 */
48	ubfx	r0, r3, #MPIDR_AFF0_SHIFT, #MPIDR_AFFINITY_BITS
49	ubfx	r1, r3, #MPIDR_AFF1_SHIFT, #MPIDR_AFFINITY_BITS
50	ubfx	r2, r3, #MPIDR_AFF2_SHIFT, #MPIDR_AFFINITY_BITS
51	add	r1, r1, r2, LSL #(CFG_CORE_CLUSTER_SHIFT)
52	add	r0, r0, r1, LSL #(CFG_CORE_THREAD_SHIFT)
53#endif
54
55	bx	lr
56END_FUNC get_core_pos_mpidr
57
58/*
59 * uint32_t temp_set_mode(int cpu_mode)
60 *   returns cpsr to be set
61 */
62LOCAL_FUNC temp_set_mode , :
63	mov	r1, r0
64	cmp	r1, #CPSR_MODE_USR	/* update mode: usr -> sys */
65	moveq	r1, #CPSR_MODE_SYS
66	cpsid	aif			/* disable interrupts */
67	mrs	r0, cpsr		/* get cpsr with disabled its*/
68	bic	r0, #CPSR_MODE_MASK	/* clear mode */
69	orr	r0, r1			/* set expected mode */
70	bx	lr
71END_FUNC temp_set_mode
72
73/* uint32_t read_mode_sp(int cpu_mode) */
74FUNC read_mode_sp , :
75	push	{r4, lr}
76UNWIND(	.save	{r4, lr})
77	mrs	r4, cpsr		/* save cpsr */
78	bl	temp_set_mode
79	msr	cpsr, r0		/* set the new mode */
80	mov	r0, sp			/* get the function result */
81	msr	cpsr, r4		/* back to the old mode */
82	pop	{r4, pc}
83END_FUNC read_mode_sp
84
85/* uint32_t read_mode_lr(int cpu_mode) */
86FUNC read_mode_lr , :
87	push	{r4, lr}
88UNWIND(	.save	{r4, lr})
89	mrs	r4, cpsr		/* save cpsr */
90	bl	temp_set_mode
91	msr	cpsr, r0		/* set the new mode */
92	mov	r0, lr			/* get the function result */
93	msr	cpsr, r4		/* back to the old mode */
94	pop	{r4, pc}
95END_FUNC read_mode_lr
96
97/* void wait_cycles(unsigned long cycles) */
98FUNC wait_cycles , :
99	/* Divide by 4 since each loop will take 4 cycles to complete */
100	lsrs	r0, r0, #2
101	bxeq	lr
102loop:
103	subs	r0, r0, #1
104	nop
105	bne	loop
106
107	bx lr
108END_FUNC wait_cycles
109