1/*
2 * Copyright (C) 2013 ARM Ltd.
3 * Copyright (C) 2013 Linaro.
4 *
5 * This code is based on glibc cortex strings work originally authored by Linaro
6 * and re-licensed under GPLv2 for the Linux kernel. The original code can
7 * be found @
8 *
9 * http://bazaar.launchpad.net/~linaro-toolchain-dev/cortex-strings/trunk/
10 * files/head:/src/aarch64/
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License version 2 as
14 * published by the Free Software Foundation.
15 *
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
19 * GNU General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program.  If not, see <http://www.gnu.org/licenses/>.
23 */
24
25#include <asm/cache.h>
26#include "assembler.h"
27
28/*
29 * Fill in the buffer with character c (alignment handled by the hardware)
30 *
31 * Parameters:
32 *	x0 - buf
33 *	x1 - c
34 *	x2 - n
35 * Returns:
36 *	x0 - buf
37 */
38
39dstin		.req	x0
40val		.req	w1
41count		.req	x2
42tmp1		.req	x3
43tmp1w		.req	w3
44tmp2		.req	x4
45tmp2w		.req	w4
46zva_len_x	.req	x5
47zva_len		.req	w5
48zva_bits_x	.req	x6
49
50A_l		.req	x7
51A_lw		.req	w7
52dst		.req	x8
53tmp3w		.req	w9
54tmp3		.req	x9
55
56ENTRY(memset)
57	mov	dst, dstin	/* Preserve return value.  */
58	and	A_lw, val, #255
59	orr	A_lw, A_lw, A_lw, lsl #8
60	orr	A_lw, A_lw, A_lw, lsl #16
61	orr	A_l, A_l, A_l, lsl #32
62
63	cmp	count, #15
64	b.hi	.Lover16_proc
65	/*All store maybe are non-aligned..*/
66	tbz	count, #3, 1f
67	str	A_l, [dst], #8
681:
69	tbz	count, #2, 2f
70	str	A_lw, [dst], #4
712:
72	tbz	count, #1, 3f
73	strh	A_lw, [dst], #2
743:
75	tbz	count, #0, 4f
76	strb	A_lw, [dst]
774:
78	ret
79
80.Lover16_proc:
81	/*Whether  the start address is aligned with 16.*/
82	neg	tmp2, dst
83	ands	tmp2, tmp2, #15
84	b.eq	.Laligned
85/*
86* The count is not less than 16, we can use stp to store the start 16 bytes,
87* then adjust the dst aligned with 16.This process will make the current
88* memory address at alignment boundary.
89*/
90	stp	A_l, A_l, [dst] /*non-aligned store..*/
91	/*make the dst aligned..*/
92	sub	count, count, tmp2
93	add	dst, dst, tmp2
94
95.Laligned:
96	cbz	A_l, .Lzero_mem
97
98.Ltail_maybe_long:
99	cmp	count, #64
100	b.ge	.Lnot_short
101.Ltail63:
102	ands	tmp1, count, #0x30
103	b.eq	3f
104	cmp	tmp1w, #0x20
105	b.eq	1f
106	b.lt	2f
107	stp	A_l, A_l, [dst], #16
1081:
109	stp	A_l, A_l, [dst], #16
1102:
111	stp	A_l, A_l, [dst], #16
112/*
113* The last store length is less than 16,use stp to write last 16 bytes.
114* It will lead some bytes written twice and the access is non-aligned.
115*/
1163:
117	ands	count, count, #15
118	cbz	count, 4f
119	add	dst, dst, count
120	stp	A_l, A_l, [dst, #-16]	/* Repeat some/all of last store. */
1214:
122	ret
123
124	/*
125	* Critical loop. Start at a new cache line boundary. Assuming
126	* 64 bytes per line, this ensures the entire loop is in one line.
127	*/
128	.p2align	L1_CACHE_SHIFT
129.Lnot_short:
130	sub	dst, dst, #16/* Pre-bias.  */
131	sub	count, count, #64
1321:
133	stp	A_l, A_l, [dst, #16]
134	stp	A_l, A_l, [dst, #32]
135	stp	A_l, A_l, [dst, #48]
136	stp	A_l, A_l, [dst, #64]!
137	subs	count, count, #64
138	b.ge	1b
139	tst	count, #0x3f
140	add	dst, dst, #16
141	b.ne	.Ltail63
142.Lexitfunc:
143	ret
144
145	/*
146	* For zeroing memory, check to see if we can use the ZVA feature to
147	* zero entire 'cache' lines.
148	*/
149.Lzero_mem:
150	cmp	count, #63
151	b.le	.Ltail63
152	/*
153	* For zeroing small amounts of memory, it's not worth setting up
154	* the line-clear code.
155	*/
156	cmp	count, #128
157	b.lt	.Lnot_short /*count is at least  128 bytes*/
158
159	mrs	tmp1, dczid_el0
160	tbnz	tmp1, #4, .Lnot_short
161	mov	tmp3w, #4
162	and	zva_len, tmp1w, #15	/* Safety: other bits reserved.  */
163	lsl	zva_len, tmp3w, zva_len
164
165	ands	tmp3w, zva_len, #63
166	/*
167	* ensure the zva_len is not less than 64.
168	* It is not meaningful to use ZVA if the block size is less than 64.
169	*/
170	b.ne	.Lnot_short
171.Lzero_by_line:
172	/*
173	* Compute how far we need to go to become suitably aligned. We're
174	* already at quad-word alignment.
175	*/
176	cmp	count, zva_len_x
177	b.lt	.Lnot_short		/* Not enough to reach alignment.  */
178	sub	zva_bits_x, zva_len_x, #1
179	neg	tmp2, dst
180	ands	tmp2, tmp2, zva_bits_x
181	b.eq	2f			/* Already aligned.  */
182	/* Not aligned, check that there's enough to copy after alignment.*/
183	sub	tmp1, count, tmp2
184	/*
185	* grantee the remain length to be ZVA is bigger than 64,
186	* avoid to make the 2f's process over mem range.*/
187	cmp	tmp1, #64
188	ccmp	tmp1, zva_len_x, #8, ge	/* NZCV=0b1000 */
189	b.lt	.Lnot_short
190	/*
191	* We know that there's at least 64 bytes to zero and that it's safe
192	* to overrun by 64 bytes.
193	*/
194	mov	count, tmp1
1951:
196	stp	A_l, A_l, [dst]
197	stp	A_l, A_l, [dst, #16]
198	stp	A_l, A_l, [dst, #32]
199	subs	tmp2, tmp2, #64
200	stp	A_l, A_l, [dst, #48]
201	add	dst, dst, #64
202	b.ge	1b
203	/* We've overrun a bit, so adjust dst downwards.*/
204	add	dst, dst, tmp2
2052:
206	sub	count, count, zva_len_x
2073:
208	dc	zva, dst
209	add	dst, dst, zva_len_x
210	subs	count, count, zva_len_x
211	b.ge	3b
212	ands	count, count, zva_bits_x
213	b.ne	.Ltail_maybe_long
214	ret
215ENDPROC(memset)
216