1/* 2 * Copyright (C) 2013 ARM Ltd. 3 * Copyright (C) 2013 Linaro. 4 * 5 * This code is based on glibc cortex strings work originally authored by Linaro 6 * and re-licensed under GPLv2 for the Linux kernel. The original code can 7 * be found @ 8 * 9 * http://bazaar.launchpad.net/~linaro-toolchain-dev/cortex-strings/trunk/ 10 * files/head:/src/aarch64/ 11 * 12 * This program is free software; you can redistribute it and/or modify 13 * it under the terms of the GNU General Public License version 2 as 14 * published by the Free Software Foundation. 15 * 16 * This program is distributed in the hope that it will be useful, 17 * but WITHOUT ANY WARRANTY; without even the implied warranty of 18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 19 * GNU General Public License for more details. 20 * 21 * You should have received a copy of the GNU General Public License 22 * along with this program. If not, see <http://www.gnu.org/licenses/>. 23 */ 24 25#include "assembler.h" 26 27/* 28* compare memory areas(when two memory areas' offset are different, 29* alignment handled by the hardware) 30* 31* Parameters: 32* x0 - const memory area 1 pointer 33* x1 - const memory area 2 pointer 34* x2 - the maximal compare byte length 35* Returns: 36* x0 - a compare result, maybe less than, equal to, or greater than ZERO 37*/ 38 39/* Parameters and result. */ 40src1 .req x0 41src2 .req x1 42limit .req x2 43result .req x0 44 45/* Internal variables. */ 46data1 .req x3 47data1w .req w3 48data2 .req x4 49data2w .req w4 50has_nul .req x5 51diff .req x6 52endloop .req x7 53tmp1 .req x8 54tmp2 .req x9 55tmp3 .req x10 56pos .req x11 57limit_wd .req x12 58mask .req x13 59 60ENTRY(memcmp) 61 cbz limit, .Lret0 62 eor tmp1, src1, src2 63 tst tmp1, #7 64 b.ne .Lmisaligned8 65 ands tmp1, src1, #7 66 b.ne .Lmutual_align 67 sub limit_wd, limit, #1 /* limit != 0, so no underflow. */ 68 lsr limit_wd, limit_wd, #3 /* Convert to Dwords. */ 69 /* 70 * The input source addresses are at alignment boundary. 71 * Directly compare eight bytes each time. 72 */ 73.Lloop_aligned: 74 ldr data1, [src1], #8 75 ldr data2, [src2], #8 76.Lstart_realigned: 77 subs limit_wd, limit_wd, #1 78 eor diff, data1, data2 /* Non-zero if differences found. */ 79 csinv endloop, diff, xzr, cs /* Last Dword or differences. */ 80 cbz endloop, .Lloop_aligned 81 82 /* Not reached the limit, must have found a diff. */ 83 tbz limit_wd, #63, .Lnot_limit 84 85 /* Limit % 8 == 0 => the diff is in the last 8 bytes. */ 86 ands limit, limit, #7 87 b.eq .Lnot_limit 88 /* 89 * The remained bytes less than 8. It is needed to extract valid data 90 * from last eight bytes of the intended memory range. 91 */ 92 lsl limit, limit, #3 /* bytes-> bits. */ 93 mov mask, #~0 94CPU_BE( lsr mask, mask, limit ) 95CPU_LE( lsl mask, mask, limit ) 96 bic data1, data1, mask 97 bic data2, data2, mask 98 99 orr diff, diff, mask 100 b .Lnot_limit 101 102.Lmutual_align: 103 /* 104 * Sources are mutually aligned, but are not currently at an 105 * alignment boundary. Round down the addresses and then mask off 106 * the bytes that precede the start point. 107 */ 108 bic src1, src1, #7 109 bic src2, src2, #7 110 ldr data1, [src1], #8 111 ldr data2, [src2], #8 112 /* 113 * We can not add limit with alignment offset(tmp1) here. Since the 114 * addition probably make the limit overflown. 115 */ 116 sub limit_wd, limit, #1/*limit != 0, so no underflow.*/ 117 and tmp3, limit_wd, #7 118 lsr limit_wd, limit_wd, #3 119 add tmp3, tmp3, tmp1 120 add limit_wd, limit_wd, tmp3, lsr #3 121 add limit, limit, tmp1/* Adjust the limit for the extra. */ 122 123 lsl tmp1, tmp1, #3/* Bytes beyond alignment -> bits.*/ 124 neg tmp1, tmp1/* Bits to alignment -64. */ 125 mov tmp2, #~0 126 /*mask off the non-intended bytes before the start address.*/ 127CPU_BE( lsl tmp2, tmp2, tmp1 )/*Big-endian.Early bytes are at MSB*/ 128 /* Little-endian. Early bytes are at LSB. */ 129CPU_LE( lsr tmp2, tmp2, tmp1 ) 130 131 orr data1, data1, tmp2 132 orr data2, data2, tmp2 133 b .Lstart_realigned 134 135 /*src1 and src2 have different alignment offset.*/ 136.Lmisaligned8: 137 cmp limit, #8 138 b.lo .Ltiny8proc /*limit < 8: compare byte by byte*/ 139 140 and tmp1, src1, #7 141 neg tmp1, tmp1 142 add tmp1, tmp1, #8/*valid length in the first 8 bytes of src1*/ 143 and tmp2, src2, #7 144 neg tmp2, tmp2 145 add tmp2, tmp2, #8/*valid length in the first 8 bytes of src2*/ 146 subs tmp3, tmp1, tmp2 147 csel pos, tmp1, tmp2, hi /*Choose the maximum.*/ 148 149 sub limit, limit, pos 150 /*compare the proceeding bytes in the first 8 byte segment.*/ 151.Ltinycmp: 152 ldrb data1w, [src1], #1 153 ldrb data2w, [src2], #1 154 subs pos, pos, #1 155 ccmp data1w, data2w, #0, ne /* NZCV = 0b0000. */ 156 b.eq .Ltinycmp 157 cbnz pos, 1f /*diff occurred before the last byte.*/ 158 cmp data1w, data2w 159 b.eq .Lstart_align 1601: 161 sub result, data1, data2 162 ret 163 164.Lstart_align: 165 lsr limit_wd, limit, #3 166 cbz limit_wd, .Lremain8 167 168 ands xzr, src1, #7 169 b.eq .Lrecal_offset 170 /*process more leading bytes to make src1 aligned...*/ 171 add src1, src1, tmp3 /*backwards src1 to alignment boundary*/ 172 add src2, src2, tmp3 173 sub limit, limit, tmp3 174 lsr limit_wd, limit, #3 175 cbz limit_wd, .Lremain8 176 /*load 8 bytes from aligned SRC1..*/ 177 ldr data1, [src1], #8 178 ldr data2, [src2], #8 179 180 subs limit_wd, limit_wd, #1 181 eor diff, data1, data2 /*Non-zero if differences found.*/ 182 csinv endloop, diff, xzr, ne 183 cbnz endloop, .Lunequal_proc 184 /*How far is the current SRC2 from the alignment boundary...*/ 185 and tmp3, tmp3, #7 186 187.Lrecal_offset:/*src1 is aligned now..*/ 188 neg pos, tmp3 189.Lloopcmp_proc: 190 /* 191 * Divide the eight bytes into two parts. First,backwards the src2 192 * to an alignment boundary,load eight bytes and compare from 193 * the SRC2 alignment boundary. If all 8 bytes are equal,then start 194 * the second part's comparison. Otherwise finish the comparison. 195 * This special handle can garantee all the accesses are in the 196 * thread/task space in avoid to overrange access. 197 */ 198 ldr data1, [src1,pos] 199 ldr data2, [src2,pos] 200 eor diff, data1, data2 /* Non-zero if differences found. */ 201 cbnz diff, .Lnot_limit 202 203 /*The second part process*/ 204 ldr data1, [src1], #8 205 ldr data2, [src2], #8 206 eor diff, data1, data2 /* Non-zero if differences found. */ 207 subs limit_wd, limit_wd, #1 208 csinv endloop, diff, xzr, ne/*if limit_wd is 0,will finish the cmp*/ 209 cbz endloop, .Lloopcmp_proc 210.Lunequal_proc: 211 cbz diff, .Lremain8 212 213/*There is differnence occured in the latest comparison.*/ 214.Lnot_limit: 215/* 216* For little endian,reverse the low significant equal bits into MSB,then 217* following CLZ can find how many equal bits exist. 218*/ 219CPU_LE( rev diff, diff ) 220CPU_LE( rev data1, data1 ) 221CPU_LE( rev data2, data2 ) 222 223 /* 224 * The MS-non-zero bit of DIFF marks either the first bit 225 * that is different, or the end of the significant data. 226 * Shifting left now will bring the critical information into the 227 * top bits. 228 */ 229 clz pos, diff 230 lsl data1, data1, pos 231 lsl data2, data2, pos 232 /* 233 * We need to zero-extend (char is unsigned) the value and then 234 * perform a signed subtraction. 235 */ 236 lsr data1, data1, #56 237 sub result, data1, data2, lsr #56 238 ret 239 240.Lremain8: 241 /* Limit % 8 == 0 =>. all data are equal.*/ 242 ands limit, limit, #7 243 b.eq .Lret0 244 245.Ltiny8proc: 246 ldrb data1w, [src1], #1 247 ldrb data2w, [src2], #1 248 subs limit, limit, #1 249 250 ccmp data1w, data2w, #0, ne /* NZCV = 0b0000. */ 251 b.eq .Ltiny8proc 252 sub result, data1, data2 253 ret 254.Lret0: 255 mov result, #0 256 ret 257ENDPROC(memcmp) 258