1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * Copyright 2009-2012 Freescale Semiconductor, Inc.
4  *
5  * This file is derived from arch/powerpc/cpu/mpc85xx/cpu.c and
6  * arch/powerpc/cpu/mpc86xx/cpu.c. Basically this file contains
7  * cpu specific common code for 85xx/86xx processors.
8  */
9 
10 #include <config.h>
11 #include <command.h>
12 #include <cpu_func.h>
13 #include <init.h>
14 #include <net.h>
15 #include <tsec.h>
16 #include <fm_eth.h>
17 #include <netdev.h>
18 #include <asm/cache.h>
19 #include <asm/global_data.h>
20 #include <asm/io.h>
21 #include <vsc9953.h>
22 
23 DECLARE_GLOBAL_DATA_PTR;
24 
25 static struct cpu_type cpu_type_list[] = {
26 #if defined(CONFIG_MPC85xx)
27 	CPU_TYPE_ENTRY(8533, 8533, 1),
28 	CPU_TYPE_ENTRY(8535, 8535, 1),
29 	CPU_TYPE_ENTRY(8536, 8536, 1),
30 	CPU_TYPE_ENTRY(8540, 8540, 1),
31 	CPU_TYPE_ENTRY(8541, 8541, 1),
32 	CPU_TYPE_ENTRY(8543, 8543, 1),
33 	CPU_TYPE_ENTRY(8544, 8544, 1),
34 	CPU_TYPE_ENTRY(8545, 8545, 1),
35 	CPU_TYPE_ENTRY(8547, 8547, 1),
36 	CPU_TYPE_ENTRY(8548, 8548, 1),
37 	CPU_TYPE_ENTRY(8555, 8555, 1),
38 	CPU_TYPE_ENTRY(8560, 8560, 1),
39 	CPU_TYPE_ENTRY(8567, 8567, 1),
40 	CPU_TYPE_ENTRY(8568, 8568, 1),
41 	CPU_TYPE_ENTRY(8569, 8569, 1),
42 	CPU_TYPE_ENTRY(8572, 8572, 2),
43 	CPU_TYPE_ENTRY(P1010, P1010, 1),
44 	CPU_TYPE_ENTRY(P1011, P1011, 1),
45 	CPU_TYPE_ENTRY(P1012, P1012, 1),
46 	CPU_TYPE_ENTRY(P1013, P1013, 1),
47 	CPU_TYPE_ENTRY(P1014, P1014, 1),
48 	CPU_TYPE_ENTRY(P1017, P1017, 1),
49 	CPU_TYPE_ENTRY(P1020, P1020, 2),
50 	CPU_TYPE_ENTRY(P1021, P1021, 2),
51 	CPU_TYPE_ENTRY(P1022, P1022, 2),
52 	CPU_TYPE_ENTRY(P1023, P1023, 2),
53 	CPU_TYPE_ENTRY(P1024, P1024, 2),
54 	CPU_TYPE_ENTRY(P1025, P1025, 2),
55 	CPU_TYPE_ENTRY(P2010, P2010, 1),
56 	CPU_TYPE_ENTRY(P2020, P2020, 2),
57 	CPU_TYPE_ENTRY(P2040, P2040, 4),
58 	CPU_TYPE_ENTRY(P2041, P2041, 4),
59 	CPU_TYPE_ENTRY(P3041, P3041, 4),
60 	CPU_TYPE_ENTRY(P4040, P4040, 4),
61 	CPU_TYPE_ENTRY(P4080, P4080, 8),
62 	CPU_TYPE_ENTRY(P5010, P5010, 1),
63 	CPU_TYPE_ENTRY(P5020, P5020, 2),
64 	CPU_TYPE_ENTRY(P5021, P5021, 2),
65 	CPU_TYPE_ENTRY(P5040, P5040, 4),
66 	CPU_TYPE_ENTRY(T4240, T4240, 0),
67 	CPU_TYPE_ENTRY(T4120, T4120, 0),
68 	CPU_TYPE_ENTRY(T4160, T4160, 0),
69 	CPU_TYPE_ENTRY(T4080, T4080, 4),
70 	CPU_TYPE_ENTRY(B4860, B4860, 0),
71 	CPU_TYPE_ENTRY(G4860, G4860, 0),
72 	CPU_TYPE_ENTRY(B4440, B4440, 0),
73 	CPU_TYPE_ENTRY(B4460, B4460, 0),
74 	CPU_TYPE_ENTRY(G4440, G4440, 0),
75 	CPU_TYPE_ENTRY(B4420, B4420, 0),
76 	CPU_TYPE_ENTRY(B4220, B4220, 0),
77 	CPU_TYPE_ENTRY(T1040, T1040, 0),
78 	CPU_TYPE_ENTRY(T1041, T1041, 0),
79 	CPU_TYPE_ENTRY(T1042, T1042, 0),
80 	CPU_TYPE_ENTRY(T1020, T1020, 0),
81 	CPU_TYPE_ENTRY(T1021, T1021, 0),
82 	CPU_TYPE_ENTRY(T1022, T1022, 0),
83 	CPU_TYPE_ENTRY(T1024, T1024, 0),
84 	CPU_TYPE_ENTRY(T1023, T1023, 0),
85 	CPU_TYPE_ENTRY(T1014, T1014, 0),
86 	CPU_TYPE_ENTRY(T1013, T1013, 0),
87 	CPU_TYPE_ENTRY(T2080, T2080, 0),
88 	CPU_TYPE_ENTRY(T2081, T2081, 0),
89 	CPU_TYPE_ENTRY(BSC9130, 9130, 1),
90 	CPU_TYPE_ENTRY(BSC9131, 9131, 1),
91 	CPU_TYPE_ENTRY(BSC9132, 9132, 2),
92 	CPU_TYPE_ENTRY(BSC9232, 9232, 2),
93 	CPU_TYPE_ENTRY(C291, C291, 1),
94 	CPU_TYPE_ENTRY(C292, C292, 1),
95 	CPU_TYPE_ENTRY(C293, C293, 1),
96 #elif defined(CONFIG_MPC86xx)
97 	CPU_TYPE_ENTRY(8610, 8610, 1),
98 	CPU_TYPE_ENTRY(8641, 8641, 2),
99 	CPU_TYPE_ENTRY(8641D, 8641D, 2),
100 #endif
101 };
102 
103 #ifdef CONFIG_SYS_FSL_QORIQ_CHASSIS2
init_type(u32 cluster,int init_id)104 static inline u32 init_type(u32 cluster, int init_id)
105 {
106 	ccsr_gur_t *gur = (void __iomem *)(CFG_SYS_MPC85xx_GUTS_ADDR);
107 	u32 idx = (cluster >> (init_id * 8)) & TP_CLUSTER_INIT_MASK;
108 	u32 type = in_be32(&gur->tp_ityp[idx]);
109 
110 	if (type & TP_ITYP_AV)
111 		return type;
112 
113 	return 0;
114 }
115 
compute_ppc_cpumask(void)116 u32 compute_ppc_cpumask(void)
117 {
118 	ccsr_gur_t *gur = (void __iomem *)(CFG_SYS_MPC85xx_GUTS_ADDR);
119 	int i = 0, count = 0;
120 	u32 cluster, type, mask = 0;
121 
122 	do {
123 		int j;
124 		cluster = in_be32(&gur->tp_cluster[i].lower);
125 		for (j = 0; j < TP_INIT_PER_CLUSTER; j++) {
126 			type = init_type(cluster, j);
127 			if (type) {
128 				if (TP_ITYP_TYPE(type) == TP_ITYP_TYPE_PPC)
129 					mask |= 1 << count;
130 				count++;
131 			}
132 		}
133 		i++;
134 	} while ((cluster & TP_CLUSTER_EOC) != TP_CLUSTER_EOC);
135 
136 	return mask;
137 }
138 
139 #ifdef CONFIG_HETROGENOUS_CLUSTERS
compute_dsp_cpumask(void)140 u32 compute_dsp_cpumask(void)
141 {
142 	ccsr_gur_t *gur = (void __iomem *)(CFG_SYS_MPC85xx_GUTS_ADDR);
143 	int i = CONFIG_DSP_CLUSTER_START, count = 0;
144 	u32 cluster, type, dsp_mask = 0;
145 
146 	do {
147 		int j;
148 		cluster = in_be32(&gur->tp_cluster[i].lower);
149 		for (j = 0; j < TP_INIT_PER_CLUSTER; j++) {
150 			type = init_type(cluster, j);
151 			if (type) {
152 				if (TP_ITYP_TYPE(type) == TP_ITYP_TYPE_SC)
153 					dsp_mask |= 1 << count;
154 				count++;
155 			}
156 		}
157 		i++;
158 	} while ((cluster & TP_CLUSTER_EOC) != TP_CLUSTER_EOC);
159 
160 	return dsp_mask;
161 }
162 
fsl_qoriq_dsp_core_to_cluster(unsigned int core)163 int fsl_qoriq_dsp_core_to_cluster(unsigned int core)
164 {
165 	ccsr_gur_t *gur = (void __iomem *)(CFG_SYS_MPC85xx_GUTS_ADDR);
166 	int count = 0, i = CONFIG_DSP_CLUSTER_START;
167 	u32 cluster;
168 
169 	do {
170 		int j;
171 		cluster = in_be32(&gur->tp_cluster[i].lower);
172 		for (j = 0; j < TP_INIT_PER_CLUSTER; j++) {
173 			if (init_type(cluster, j)) {
174 				if (count == core)
175 					return i;
176 				count++;
177 			}
178 		}
179 		i++;
180 	} while ((cluster & TP_CLUSTER_EOC) != TP_CLUSTER_EOC);
181 
182 	return -1;	/* cannot identify the cluster */
183 }
184 #endif
185 
fsl_qoriq_core_to_cluster(unsigned int core)186 int fsl_qoriq_core_to_cluster(unsigned int core)
187 {
188 	ccsr_gur_t *gur = (void __iomem *)(CFG_SYS_MPC85xx_GUTS_ADDR);
189 	int i = 0, count = 0;
190 	u32 cluster;
191 
192 	do {
193 		int j;
194 		cluster = in_be32(&gur->tp_cluster[i].lower);
195 		for (j = 0; j < TP_INIT_PER_CLUSTER; j++) {
196 			if (init_type(cluster, j)) {
197 				if (count == core)
198 					return i;
199 				count++;
200 			}
201 		}
202 		i++;
203 	} while ((cluster & TP_CLUSTER_EOC) != TP_CLUSTER_EOC);
204 
205 	return -1;	/* cannot identify the cluster */
206 }
207 
208 #else /* CONFIG_SYS_FSL_QORIQ_CHASSIS2 */
209 /*
210  * Before chassis genenration 2, the cpumask should be hard-coded.
211  * In case of cpu type unknown or cpumask unset, use 1 as fail save.
212  */
213 #define compute_ppc_cpumask()	1
214 #define fsl_qoriq_core_to_cluster(x) x
215 #endif /* CONFIG_SYS_FSL_QORIQ_CHASSIS2 */
216 
217 static struct cpu_type cpu_type_unknown = CPU_TYPE_ENTRY(Unknown, Unknown, 0);
218 
identify_cpu(u32 ver)219 struct cpu_type *identify_cpu(u32 ver)
220 {
221 	int i;
222 	for (i = 0; i < ARRAY_SIZE(cpu_type_list); i++) {
223 		if (cpu_type_list[i].soc_ver == ver)
224 			return &cpu_type_list[i];
225 	}
226 	return &cpu_type_unknown;
227 }
228 
229 #define MPC8xxx_PICFRR_NCPU_MASK  0x00001f00
230 #define MPC8xxx_PICFRR_NCPU_SHIFT 8
231 
232 /*
233  * Return a 32-bit mask indicating which cores are present on this SOC.
234  */
cpu_mask(void)235 __weak u32 cpu_mask(void)
236 {
237 	ccsr_pic_t __iomem *pic = (void *)CFG_SYS_MPC8xxx_PIC_ADDR;
238 	struct cpu_type *cpu = gd->arch.cpu;
239 
240 	/* better to query feature reporting register than just assume 1 */
241 	if (cpu == &cpu_type_unknown)
242 	return ((in_be32(&pic->frr) & MPC8xxx_PICFRR_NCPU_MASK) >>
243 			MPC8xxx_PICFRR_NCPU_SHIFT) + 1;
244 
245 	if (cpu->num_cores == 0)
246 		return compute_ppc_cpumask();
247 
248 	return cpu->mask;
249 }
250 
251 #ifdef CONFIG_HETROGENOUS_CLUSTERS
cpu_dsp_mask(void)252 __weak u32 cpu_dsp_mask(void)
253 {
254 	ccsr_pic_t __iomem *pic = (void *)CFG_SYS_MPC8xxx_PIC_ADDR;
255 	struct cpu_type *cpu = gd->arch.cpu;
256 
257 	/* better to query feature reporting register than just assume 1 */
258 	if (cpu == &cpu_type_unknown)
259 		return ((in_be32(&pic->frr) & MPC8xxx_PICFRR_NCPU_MASK) >>
260 			 MPC8xxx_PICFRR_NCPU_SHIFT) + 1;
261 
262 	if (cpu->dsp_num_cores == 0)
263 		return compute_dsp_cpumask();
264 
265 	return cpu->dsp_mask;
266 }
267 
268 /*
269  * Return the number of SC/DSP cores on this SOC.
270  */
cpu_num_dspcores(void)271 __weak int cpu_num_dspcores(void)
272 {
273 	struct cpu_type *cpu = gd->arch.cpu;
274 
275 	/*
276 	 * Report # of cores in terms of the cpu_mask if we haven't
277 	 * figured out how many there are yet
278 	 */
279 	if (cpu->dsp_num_cores == 0)
280 		return hweight32(cpu_dsp_mask());
281 
282 	return cpu->dsp_num_cores;
283 }
284 #endif
285 
286 /*
287  * Return the number of PPC cores on this SOC.
288  */
cpu_numcores(void)289 __weak int cpu_numcores(void)
290 {
291 	struct cpu_type *cpu = gd->arch.cpu;
292 
293 	/*
294 	 * Report # of cores in terms of the cpu_mask if we haven't
295 	 * figured out how many there are yet
296 	 */
297 	if (cpu->num_cores == 0)
298 		return hweight32(cpu_mask());
299 
300 	return cpu->num_cores;
301 }
302 
303 /*
304  * Check if the given core ID is valid
305  *
306  * Returns zero if it isn't, 1 if it is.
307  */
is_core_valid(unsigned int core)308 int is_core_valid(unsigned int core)
309 {
310 	return !!((1 << core) & cpu_mask());
311 }
312 
arch_cpu_init(void)313 int arch_cpu_init(void)
314 {
315 	uint svr;
316 	uint ver;
317 
318 	svr = get_svr();
319 	ver = SVR_SOC_VER(svr);
320 
321 	gd->arch.cpu = identify_cpu(ver);
322 
323 	return 0;
324 }
325 
326 /* Once in memory, compute mask & # cores once and save them off */
fixup_cpu(void)327 int fixup_cpu(void)
328 {
329 	struct cpu_type *cpu = gd->arch.cpu;
330 
331 	if (cpu->num_cores == 0) {
332 		cpu->mask = cpu_mask();
333 		cpu->num_cores = cpu_numcores();
334 	}
335 
336 #ifdef CONFIG_HETROGENOUS_CLUSTERS
337 	if (cpu->dsp_num_cores == 0) {
338 		cpu->dsp_mask = cpu_dsp_mask();
339 		cpu->dsp_num_cores = cpu_num_dspcores();
340 	}
341 #endif
342 	return 0;
343 }
344