1/*
2 * Copyright 2018-2021 NXP
3 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 *
6 */
7
8#include <asm_macros.S>
9#include <cortex_a53.h>
10#include <drivers/console.h>
11#include <lib/cpus/aarch64/cortex_a72.h>
12
13#include <platform_def.h>
14
15
16	.globl	plat_crash_console_init
17	.globl	plat_crash_console_putc
18	.globl	plat_crash_console_flush
19	.globl  plat_core_pos
20	.globl  plat_my_core_pos
21	.globl  plat_core_mask
22	.globl  plat_my_core_mask
23	.globl  plat_core_pos_by_mpidr
24	.globl _disable_ldstr_pfetch_A53
25	.globl _disable_ldstr_pfetch_A72
26	.global	_set_smmu_pagesz_64
27
28	/* int plat_crash_console_init(void)
29	 * Function to initialize the crash console
30	 * without a C Runtime to print crash report.
31	 * Clobber list : x0 - x4
32	 */
33
34	/* int plat_crash_console_init(void)
35	 * Use normal console by default. Switch it to crash
36	 * mode so serial consoles become active again.
37	 * NOTE: This default implementation will only work for
38	 * crashes that occur after a normal console (marked
39	 * valid for the crash state) has been registered with
40	 * the console framework. To debug crashes that occur
41	 * earlier, the platform has to override these functions
42	 * with an implementation that initializes a console
43	 * driver with hardcoded parameters. See
44	 * docs/porting-guide.rst for more information.
45	 */
46func plat_crash_console_init
47	mov	x3, x30
48	mov	x0, #CONSOLE_FLAG_CRASH
49	bl	console_switch_state
50	mov	x0, #1
51	ret	x3
52endfunc plat_crash_console_init
53
54	/* void plat_crash_console_putc(int character)
55	 * Output through the normal console by default.
56	 */
57func plat_crash_console_putc
58	b	console_putc
59endfunc plat_crash_console_putc
60
61	/* void plat_crash_console_flush(void)
62	 * Flush normal console by default.
63	 */
64func plat_crash_console_flush
65	b	console_flush
66endfunc plat_crash_console_flush
67
68/* This function implements a part of the critical interface between the psci
69 * generic layer and the platform that allows the former to query the platform
70 * to convert an MPIDR to a unique linear index. An error code (-1) is returned
71 * in case the MPIDR is invalid.
72 */
73func plat_core_pos_by_mpidr
74
75	b	plat_core_pos
76
77endfunc plat_core_pos_by_mpidr
78
79#if (SYMMETRICAL_CLUSTERS)
80/* unsigned int plat_my_core_mask(void)
81 *  generate a mask bit for this core
82 */
83func plat_my_core_mask
84	mrs	x0, MPIDR_EL1
85	b	plat_core_mask
86endfunc plat_my_core_mask
87
88/* unsigned int plat_core_mask(u_register_t mpidr)
89 * generate a lsb-based mask bit for the core specified by mpidr in x0.
90 *
91 * SoC core = ((cluster * cpu_per_cluster) + core)
92 * mask = (1 << SoC core)
93 */
94func plat_core_mask
95	mov	w1, wzr
96	mov	w2, wzr
97
98	/* extract cluster */
99	bfxil	w1, w0, #8, #8
100	/* extract cpu # */
101	bfxil	w2, w0, #0, #8
102
103	mov	w0, wzr
104
105	/* error checking */
106	cmp	w1, #NUMBER_OF_CLUSTERS
107	b.ge	1f
108	cmp	w2, #CORES_PER_CLUSTER
109	b.ge	1f
110
111	mov	w0, #CORES_PER_CLUSTER
112	mul	w1, w1, w0
113	add	w1, w1, w2
114	mov	w2, #0x1
115	lsl	w0, w2, w1
1161:
117	ret
118endfunc plat_core_mask
119
120/*
121 * unsigned int plat_my_core_pos(void)
122 *  generate a linear core number for this core
123 */
124func plat_my_core_pos
125	mrs	x0, MPIDR_EL1
126	b	plat_core_pos
127endfunc plat_my_core_pos
128
129/*
130 * unsigned int plat_core_pos(u_register_t mpidr)
131 * Generate a linear core number for the core specified by mpidr.
132 *
133 * SoC core = ((cluster * cpu_per_cluster) + core)
134 * Returns -1 if mpidr invalid
135 */
136func plat_core_pos
137	mov	w1, wzr
138	mov	w2, wzr
139	bfxil	w1, w0, #8, #8	/* extract cluster */
140	bfxil	w2, w0, #0, #8	/* extract cpu #   */
141
142	mov	w0, #-1
143
144	/* error checking */
145	cmp	w1, #NUMBER_OF_CLUSTERS
146	b.ge	1f
147	cmp	w2, #CORES_PER_CLUSTER
148	b.ge	1f
149
150	mov	w0, #CORES_PER_CLUSTER
151	mul	w1, w1, w0
152	add	w0, w1, w2
1531:
154	ret
155endfunc plat_core_pos
156
157#endif
158
159/* this function disables the load-store prefetch of the calling core
160 * Note: this function is for A53 cores ONLY
161 * in:  none
162 * out: none
163 * uses x0
164 */
165func _disable_ldstr_pfetch_A53
166	mrs	x0, CORTEX_A53_CPUACTLR_EL1
167	tst	x0, #CORTEX_A53_CPUACTLR_EL1_L1PCTL
168	b.ne	1f
169	b	2f
170
171.align 6
1721:
173	dsb	sy
174	isb
175	bic	x0, x0, #CORTEX_A53_CPUACTLR_EL1_L1PCTL
176	msr	CORTEX_A53_CPUACTLR_EL1, x0
177	isb
178
1792:
180	ret
181endfunc _disable_ldstr_pfetch_A53
182
183
184/* this function disables the load-store prefetch of the calling core
185 * Note: this function is for A72 cores ONLY
186 * in:  none
187 * out: none
188 * uses x0
189 */
190func _disable_ldstr_pfetch_A72
191
192	mrs	x0, CORTEX_A72_CPUACTLR_EL1
193	tst	x0, #CORTEX_A72_CPUACTLR_EL1_DISABLE_L1_DCACHE_HW_PFTCH
194	b.eq	1f
195	b	2f
196
197.align 6
1981:
199	dsb	sy
200	isb
201	orr	x0, x0, #CORTEX_A72_CPUACTLR_EL1_DISABLE_L1_DCACHE_HW_PFTCH
202	msr	CORTEX_A72_CPUACTLR_EL1, x0
203	isb
204
2052:
206	ret
207endfunc _disable_ldstr_pfetch_A72
208
209/*
210 * Function sets the SACR pagesize to 64k
211 */
212func _set_smmu_pagesz_64
213
214	ldr	x1, =NXP_SMMU_ADDR
215	ldr	w0, [x1, #0x10]
216	orr	w0, w0, #1 << 16	/* setting to 64K page */
217	str	w0, [x1, #0x10]
218
219	ret
220endfunc _set_smmu_pagesz_64
221