1/* SPDX-License-Identifier: GPL-2.0-or-later */
2/*
3 * Copyright (C) 2013 Imagination Technologies
4 * Author: Paul Burton <paul.burton@mips.com>
5 */
6
7#include <asm/addrspace.h>
8#include <asm/asm.h>
9#include <asm/asm-offsets.h>
10#include <asm/asmmacro.h>
11#include <asm/cacheops.h>
12#include <asm/eva.h>
13#include <asm/mipsregs.h>
14#include <asm/mipsmtregs.h>
15#include <asm/pm.h>
16#include <asm/smp-cps.h>
17
18#define GCR_CPC_BASE_OFS	0x0088
19#define GCR_CL_COHERENCE_OFS	0x2008
20#define GCR_CL_ID_OFS		0x2028
21
22#define CPC_CL_VC_STOP_OFS	0x2020
23#define CPC_CL_VC_RUN_OFS	0x2028
24
25.extern mips_cm_base
26
27.set noreorder
28
29#ifdef CONFIG_64BIT
30# define STATUS_BITDEPS		ST0_KX
31#else
32# define STATUS_BITDEPS		0
33#endif
34
35#ifdef CONFIG_MIPS_CPS_NS16550
36
37#define DUMP_EXCEP(name)		\
38	PTR_LA	a0, 8f;			\
39	jal	mips_cps_bev_dump;	\
40	 nop;				\
41	TEXT(name)
42
43#else /* !CONFIG_MIPS_CPS_NS16550 */
44
45#define DUMP_EXCEP(name)
46
47#endif /* !CONFIG_MIPS_CPS_NS16550 */
48
49	/*
50	 * Set dest to non-zero if the core supports the MT ASE, else zero. If
51	 * MT is not supported then branch to nomt.
52	 */
53	.macro	has_mt	dest, nomt
54	mfc0	\dest, CP0_CONFIG, 1
55	bgez	\dest, \nomt
56	 mfc0	\dest, CP0_CONFIG, 2
57	bgez	\dest, \nomt
58	 mfc0	\dest, CP0_CONFIG, 3
59	andi	\dest, \dest, MIPS_CONF3_MT
60	beqz	\dest, \nomt
61	 nop
62	.endm
63
64	/*
65	 * Set dest to non-zero if the core supports MIPSr6 multithreading
66	 * (ie. VPs), else zero. If MIPSr6 multithreading is not supported then
67	 * branch to nomt.
68	 */
69	.macro	has_vp	dest, nomt
70	mfc0	\dest, CP0_CONFIG, 1
71	bgez	\dest, \nomt
72	 mfc0	\dest, CP0_CONFIG, 2
73	bgez	\dest, \nomt
74	 mfc0	\dest, CP0_CONFIG, 3
75	bgez	\dest, \nomt
76	 mfc0	\dest, CP0_CONFIG, 4
77	bgez	\dest, \nomt
78	 mfc0	\dest, CP0_CONFIG, 5
79	andi	\dest, \dest, MIPS_CONF5_VP
80	beqz	\dest, \nomt
81	 nop
82	.endm
83
84
85.balign 0x1000
86
87LEAF(mips_cps_core_entry)
88	/*
89	 * These first several instructions will be patched by cps_smp_setup to load the
90	 * CCA to use into register s0 and GCR base address to register s1.
91	 */
92	.rept   CPS_ENTRY_PATCH_INSNS
93	nop
94	.endr
95
96	.global mips_cps_core_entry_patch_end
97mips_cps_core_entry_patch_end:
98
99	/* Check whether we're here due to an NMI */
100	mfc0	k0, CP0_STATUS
101	and	k0, k0, ST0_NMI
102	beqz	k0, not_nmi
103	 nop
104
105	/* This is an NMI */
106	PTR_LA	k0, nmi_handler
107	jr	k0
108	 nop
109
110not_nmi:
111	/* Setup Cause */
112	li	t0, CAUSEF_IV
113	mtc0	t0, CP0_CAUSE
114
115	/* Setup Status */
116	li	t0, ST0_CU1 | ST0_CU0 | ST0_BEV | STATUS_BITDEPS
117	mtc0	t0, CP0_STATUS
118
119	/* Skip cache & coherence setup if we're already coherent */
120	lw	s7, GCR_CL_COHERENCE_OFS(s1)
121	bnez	s7, 1f
122	 nop
123
124	/* Initialize the L1 caches */
125	jal	mips_cps_cache_init
126	 nop
127
128	/* Enter the coherent domain */
129	li	t0, 0xff
130	sw	t0, GCR_CL_COHERENCE_OFS(s1)
131	ehb
132
133	/* Set Kseg0 CCA to that in s0 */
1341:	mfc0	t0, CP0_CONFIG
135	ori	t0, 0x7
136	xori	t0, 0x7
137	or	t0, t0, s0
138	mtc0	t0, CP0_CONFIG
139	ehb
140
141	/* Jump to kseg0 */
142	PTR_LA	t0, 1f
143	jr	t0
144	 nop
145
146	/*
147	 * We're up, cached & coherent. Perform any EVA initialization necessary
148	 * before we access memory.
149	 */
1501:	eva_init
151
152	/* Retrieve boot configuration pointers */
153	jal	mips_cps_get_bootcfg
154	 nop
155
156	/* Skip core-level init if we started up coherent */
157	bnez	s7, 1f
158	 nop
159
160	/* Perform any further required core-level initialisation */
161	jal	mips_cps_core_init
162	 nop
163
164	/*
165	 * Boot any other VPEs within this core that should be online, and
166	 * deactivate this VPE if it should be offline.
167	 */
168	move	a1, t9
169	jal	mips_cps_boot_vpes
170	 move	a0, v0
171
172	/* Off we go! */
1731:	PTR_L	t1, VPEBOOTCFG_PC(v1)
174	PTR_L	gp, VPEBOOTCFG_GP(v1)
175	PTR_L	sp, VPEBOOTCFG_SP(v1)
176	jr	t1
177	 nop
178	END(mips_cps_core_entry)
179
180.org 0x200
181LEAF(excep_tlbfill)
182	DUMP_EXCEP("TLB Fill")
183	b	.
184	 nop
185	END(excep_tlbfill)
186
187.org 0x280
188LEAF(excep_xtlbfill)
189	DUMP_EXCEP("XTLB Fill")
190	b	.
191	 nop
192	END(excep_xtlbfill)
193
194.org 0x300
195LEAF(excep_cache)
196	DUMP_EXCEP("Cache")
197	b	.
198	 nop
199	END(excep_cache)
200
201.org 0x380
202LEAF(excep_genex)
203	DUMP_EXCEP("General")
204	b	.
205	 nop
206	END(excep_genex)
207
208.org 0x400
209LEAF(excep_intex)
210	DUMP_EXCEP("Interrupt")
211	b	.
212	 nop
213	END(excep_intex)
214
215.org 0x480
216LEAF(excep_ejtag)
217	PTR_LA	k0, ejtag_debug_handler
218	jr	k0
219	 nop
220	END(excep_ejtag)
221
222LEAF(mips_cps_core_init)
223#ifdef CONFIG_MIPS_MT_SMP
224	/* Check that the core implements the MT ASE */
225	has_mt	t0, 3f
226
227	.set	push
228	.set	MIPS_ISA_LEVEL_RAW
229	.set	mt
230
231	/* Only allow 1 TC per VPE to execute... */
232	dmt
233
234	/* ...and for the moment only 1 VPE */
235	dvpe
236	PTR_LA	t1, 1f
237	jr.hb	t1
238	 nop
239
240	/* Enter VPE configuration state */
2411:	mfc0	t0, CP0_MVPCONTROL
242	ori	t0, t0, MVPCONTROL_VPC
243	mtc0	t0, CP0_MVPCONTROL
244
245	/* Retrieve the number of VPEs within the core */
246	mfc0	t0, CP0_MVPCONF0
247	srl	t0, t0, MVPCONF0_PVPE_SHIFT
248	andi	t0, t0, (MVPCONF0_PVPE >> MVPCONF0_PVPE_SHIFT)
249	addiu	ta3, t0, 1
250
251	/* If there's only 1, we're done */
252	beqz	t0, 2f
253	 nop
254
255	/* Loop through each VPE within this core */
256	li	ta1, 1
257
2581:	/* Operate on the appropriate TC */
259	mtc0	ta1, CP0_VPECONTROL
260	ehb
261
262	/* Bind TC to VPE (1:1 TC:VPE mapping) */
263	mttc0	ta1, CP0_TCBIND
264
265	/* Set exclusive TC, non-active, master */
266	li	t0, VPECONF0_MVP
267	sll	t1, ta1, VPECONF0_XTC_SHIFT
268	or	t0, t0, t1
269	mttc0	t0, CP0_VPECONF0
270
271	/* Set TC non-active, non-allocatable */
272	mttc0	zero, CP0_TCSTATUS
273
274	/* Set TC halted */
275	li	t0, TCHALT_H
276	mttc0	t0, CP0_TCHALT
277
278	/* Next VPE */
279	addiu	ta1, ta1, 1
280	slt	t0, ta1, ta3
281	bnez	t0, 1b
282	 nop
283
284	/* Leave VPE configuration state */
2852:	mfc0	t0, CP0_MVPCONTROL
286	xori	t0, t0, MVPCONTROL_VPC
287	mtc0	t0, CP0_MVPCONTROL
288
2893:	.set	pop
290#endif
291	jr	ra
292	 nop
293	END(mips_cps_core_init)
294
295/**
296 * mips_cps_get_bootcfg() - retrieve boot configuration pointers
297 *
298 * Returns: pointer to struct core_boot_config in v0, pointer to
299 *          struct vpe_boot_config in v1, VPE ID in t9
300 */
301LEAF(mips_cps_get_bootcfg)
302	/* Calculate a pointer to this cores struct core_boot_config */
303	lw	t0, GCR_CL_ID_OFS(s1)
304	li	t1, COREBOOTCFG_SIZE
305	mul	t0, t0, t1
306	PTR_LA	t1, mips_cps_core_bootcfg
307	PTR_L	t1, 0(t1)
308	PTR_ADDU v0, t0, t1
309
310	/* Calculate this VPEs ID. If the core doesn't support MT use 0 */
311	li	t9, 0
312#if defined(CONFIG_CPU_MIPSR6)
313	has_vp	ta2, 1f
314
315	/*
316	 * Assume non-contiguous numbering. Perhaps some day we'll need
317	 * to handle contiguous VP numbering, but no such systems yet
318	 * exist.
319	 */
320	mfc0	t9, CP0_GLOBALNUMBER
321	andi	t9, t9, MIPS_GLOBALNUMBER_VP
322#elif defined(CONFIG_MIPS_MT_SMP)
323	has_mt	ta2, 1f
324
325	/* Find the number of VPEs present in the core */
326	mfc0	t1, CP0_MVPCONF0
327	srl	t1, t1, MVPCONF0_PVPE_SHIFT
328	andi	t1, t1, MVPCONF0_PVPE >> MVPCONF0_PVPE_SHIFT
329	addiu	t1, t1, 1
330
331	/* Calculate a mask for the VPE ID from EBase.CPUNum */
332	clz	t1, t1
333	li	t2, 31
334	subu	t1, t2, t1
335	li	t2, 1
336	sll	t1, t2, t1
337	addiu	t1, t1, -1
338
339	/* Retrieve the VPE ID from EBase.CPUNum */
340	mfc0	t9, $15, 1
341	and	t9, t9, t1
342#endif
343
3441:	/* Calculate a pointer to this VPEs struct vpe_boot_config */
345	li	t1, VPEBOOTCFG_SIZE
346	mul	v1, t9, t1
347	PTR_L	ta3, COREBOOTCFG_VPECONFIG(v0)
348	PTR_ADDU v1, v1, ta3
349
350	jr	ra
351	 nop
352	END(mips_cps_get_bootcfg)
353
354LEAF(mips_cps_boot_vpes)
355	lw	ta2, COREBOOTCFG_VPEMASK(a0)
356	PTR_L	ta3, COREBOOTCFG_VPECONFIG(a0)
357
358#if defined(CONFIG_CPU_MIPSR6)
359
360	has_vp	t0, 5f
361
362	/* Find base address of CPC */
363	PTR_LA	t1, mips_gcr_base
364	PTR_L	t1, 0(t1)
365	PTR_L	t1, GCR_CPC_BASE_OFS(t1)
366	PTR_LI	t2, ~0x7fff
367	and	t1, t1, t2
368	PTR_LI	t2, UNCAC_BASE
369	PTR_ADD	t1, t1, t2
370
371	/* Start any other VPs that ought to be running */
372	PTR_S	ta2, CPC_CL_VC_RUN_OFS(t1)
373
374	/* Ensure this VP stops running if it shouldn't be */
375	not	ta2
376	PTR_S	ta2, CPC_CL_VC_STOP_OFS(t1)
377	ehb
378
379#elif defined(CONFIG_MIPS_MT)
380
381	/* If the core doesn't support MT then return */
382	has_mt	t0, 5f
383
384	/* Enter VPE configuration state */
385	.set	push
386	.set	MIPS_ISA_LEVEL_RAW
387	.set	mt
388	dvpe
389	.set	pop
390
391	PTR_LA	t1, 1f
392	jr.hb	t1
393	 nop
3941:	mfc0	t1, CP0_MVPCONTROL
395	ori	t1, t1, MVPCONTROL_VPC
396	mtc0	t1, CP0_MVPCONTROL
397	ehb
398
399	/* Loop through each VPE */
400	move	t8, ta2
401	li	ta1, 0
402
403	/* Check whether the VPE should be running. If not, skip it */
4041:	andi	t0, ta2, 1
405	beqz	t0, 2f
406	 nop
407
408	/* Operate on the appropriate TC */
409	mfc0	t0, CP0_VPECONTROL
410	ori	t0, t0, VPECONTROL_TARGTC
411	xori	t0, t0, VPECONTROL_TARGTC
412	or	t0, t0, ta1
413	mtc0	t0, CP0_VPECONTROL
414	ehb
415
416	.set	push
417	.set	MIPS_ISA_LEVEL_RAW
418	.set	mt
419
420	/* Skip the VPE if its TC is not halted */
421	mftc0	t0, CP0_TCHALT
422	beqz	t0, 2f
423	 nop
424
425	/* Calculate a pointer to the VPEs struct vpe_boot_config */
426	li	t0, VPEBOOTCFG_SIZE
427	mul	t0, t0, ta1
428	addu	t0, t0, ta3
429
430	/* Set the TC restart PC */
431	lw	t1, VPEBOOTCFG_PC(t0)
432	mttc0	t1, CP0_TCRESTART
433
434	/* Set the TC stack pointer */
435	lw	t1, VPEBOOTCFG_SP(t0)
436	mttgpr	t1, sp
437
438	/* Set the TC global pointer */
439	lw	t1, VPEBOOTCFG_GP(t0)
440	mttgpr	t1, gp
441
442	/* Copy config from this VPE */
443	mfc0	t0, CP0_CONFIG
444	mttc0	t0, CP0_CONFIG
445
446	/*
447	 * Copy the EVA config from this VPE if the CPU supports it.
448	 * CONFIG3 must exist to be running MT startup - just read it.
449	 */
450	mfc0	t0, CP0_CONFIG, 3
451	and	t0, t0, MIPS_CONF3_SC
452	beqz	t0, 3f
453	 nop
454	mfc0    t0, CP0_SEGCTL0
455	mttc0	t0, CP0_SEGCTL0
456	mfc0    t0, CP0_SEGCTL1
457	mttc0	t0, CP0_SEGCTL1
458	mfc0    t0, CP0_SEGCTL2
459	mttc0	t0, CP0_SEGCTL2
4603:
461	/* Ensure no software interrupts are pending */
462	mttc0	zero, CP0_CAUSE
463	mttc0	zero, CP0_STATUS
464
465	/* Set TC active, not interrupt exempt */
466	mftc0	t0, CP0_TCSTATUS
467	li	t1, ~TCSTATUS_IXMT
468	and	t0, t0, t1
469	ori	t0, t0, TCSTATUS_A
470	mttc0	t0, CP0_TCSTATUS
471
472	/* Clear the TC halt bit */
473	mttc0	zero, CP0_TCHALT
474
475	/* Set VPE active */
476	mftc0	t0, CP0_VPECONF0
477	ori	t0, t0, VPECONF0_VPA
478	mttc0	t0, CP0_VPECONF0
479
480	/* Next VPE */
4812:	srl	ta2, ta2, 1
482	addiu	ta1, ta1, 1
483	bnez	ta2, 1b
484	 nop
485
486	/* Leave VPE configuration state */
487	mfc0	t1, CP0_MVPCONTROL
488	xori	t1, t1, MVPCONTROL_VPC
489	mtc0	t1, CP0_MVPCONTROL
490	ehb
491	evpe
492
493	.set	pop
494
495	/* Check whether this VPE is meant to be running */
496	li	t0, 1
497	sll	t0, t0, a1
498	and	t0, t0, t8
499	bnez	t0, 2f
500	 nop
501
502	/* This VPE should be offline, halt the TC */
503	li	t0, TCHALT_H
504	mtc0	t0, CP0_TCHALT
505	PTR_LA	t0, 1f
5061:	jr.hb	t0
507	 nop
508
5092:
510
511#endif /* CONFIG_MIPS_MT_SMP */
512
513	/* Return */
5145:	jr	ra
515	 nop
516	END(mips_cps_boot_vpes)
517
518LEAF(mips_cps_cache_init)
519	/*
520	 * Clear the bits used to index the caches. Note that the architecture
521	 * dictates that writing to any of TagLo or TagHi selects 0 or 2 should
522	 * be valid for all MIPS32 CPUs, even those for which said writes are
523	 * unnecessary.
524	 */
525	mtc0	zero, CP0_TAGLO, 0
526	mtc0	zero, CP0_TAGHI, 0
527	mtc0	zero, CP0_TAGLO, 2
528	mtc0	zero, CP0_TAGHI, 2
529	ehb
530
531	/* Primary cache configuration is indicated by Config1 */
532	mfc0	v0, CP0_CONFIG, 1
533
534	/* Detect I-cache line size */
535	_EXT	t0, v0, MIPS_CONF1_IL_SHF, MIPS_CONF1_IL_SZ
536	beqz	t0, icache_done
537	 li	t1, 2
538	sllv	t0, t1, t0
539
540	/* Detect I-cache size */
541	_EXT	t1, v0, MIPS_CONF1_IS_SHF, MIPS_CONF1_IS_SZ
542	xori	t2, t1, 0x7
543	beqz	t2, 1f
544	 li	t3, 32
545	addiu	t1, t1, 1
546	sllv	t1, t3, t1
5471:	/* At this point t1 == I-cache sets per way */
548	_EXT	t2, v0, MIPS_CONF1_IA_SHF, MIPS_CONF1_IA_SZ
549	addiu	t2, t2, 1
550	mul	t1, t1, t0
551	mul	t1, t1, t2
552
553	li	a0, CKSEG0
554	PTR_ADD	a1, a0, t1
5551:	cache	Index_Store_Tag_I, 0(a0)
556	PTR_ADD	a0, a0, t0
557	bne	a0, a1, 1b
558	 nop
559icache_done:
560
561	/* Detect D-cache line size */
562	_EXT	t0, v0, MIPS_CONF1_DL_SHF, MIPS_CONF1_DL_SZ
563	beqz	t0, dcache_done
564	 li	t1, 2
565	sllv	t0, t1, t0
566
567	/* Detect D-cache size */
568	_EXT	t1, v0, MIPS_CONF1_DS_SHF, MIPS_CONF1_DS_SZ
569	xori	t2, t1, 0x7
570	beqz	t2, 1f
571	 li	t3, 32
572	addiu	t1, t1, 1
573	sllv	t1, t3, t1
5741:	/* At this point t1 == D-cache sets per way */
575	_EXT	t2, v0, MIPS_CONF1_DA_SHF, MIPS_CONF1_DA_SZ
576	addiu	t2, t2, 1
577	mul	t1, t1, t0
578	mul	t1, t1, t2
579
580	li	a0, CKSEG0
581	PTR_ADDU a1, a0, t1
582	PTR_SUBU a1, a1, t0
5831:	cache	Index_Store_Tag_D, 0(a0)
584	bne	a0, a1, 1b
585	 PTR_ADD a0, a0, t0
586dcache_done:
587
588	jr	ra
589	 nop
590	END(mips_cps_cache_init)
591
592#if defined(CONFIG_MIPS_CPS_PM) && defined(CONFIG_CPU_PM)
593
594	/* Calculate a pointer to this CPUs struct mips_static_suspend_state */
595	.macro	psstate	dest
596	.set	push
597	.set	noat
598	lw	$1, TI_CPU(gp)
599	sll	$1, $1, LONGLOG
600	PTR_LA	\dest, __per_cpu_offset
601	addu	$1, $1, \dest
602	lw	$1, 0($1)
603	PTR_LA	\dest, cps_cpu_state
604	addu	\dest, \dest, $1
605	.set	pop
606	.endm
607
608LEAF(mips_cps_pm_save)
609	/* Save CPU state */
610	SUSPEND_SAVE_REGS
611	psstate	t1
612	SUSPEND_SAVE_STATIC
613	jr	v0
614	 nop
615	END(mips_cps_pm_save)
616
617LEAF(mips_cps_pm_restore)
618	/* Restore CPU state */
619	psstate	t1
620	RESUME_RESTORE_STATIC
621	RESUME_RESTORE_REGS_RETURN
622	END(mips_cps_pm_restore)
623
624#endif /* CONFIG_MIPS_CPS_PM && CONFIG_CPU_PM */
625