2 * Copyright (C) 2008 The Android Open Source Project
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in
12 * the documentation and/or other materials provided with the
15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
16 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
17 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
18 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
19 * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
21 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
22 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
23 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
25 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 * Optimized memcpy() for ARM.
33 * note that memcpy() always returns the destination pointer,
34 * so we have to preserve R0.
38 * This file has been modified from the original for use in musl libc.
39 * The main changes are: addition of .type memcpy,%function to make the
40 * code safely callable from thumb mode, adjusting the return
41 * instructions to be compatible with pre-thumb ARM cpus, removal of
42 * prefetch code that is not compatible with older cpus and support for
43 * building as thumb 2 and big-endian.
49 .type memcpy,%function
51 /* The stack must always be 64-bits aligned to be compliant with the
52 * ARM ABI. Since we have to save R0, we might as well save R4
53 * which we can use for better pipelining of the reads below
57 stmfd sp!, {r0, r4, lr}
58 /* Making room for r5-r11 which will be spilled later */
62 /* it simplifies things to take care of len<4 early */
64 blo copy_last_3_and_return
66 /* compute the offset to align the source
67 * offset = (4-(src&3))&3 = -src & 3
73 /* align source to 32 bits. We need to insert 2 instructions between
74 * a ldr[b|h] and str[b|h] because byte and half-word instructions
78 sub r2, r2, r3 /* we know that r3 <= r2 because r2 >= 4 */
88 /* see if src and dst are aligned together (congruent) */
93 /* Use post-incriment mode for stm to spill r5-r11 to reserved stack
94 * frame. Don't update sp.
98 /* align the destination to a cache-line */
101 beq congruent_aligned32
105 /* conditionnaly copies 0 to 7 words (length in r3) */
106 movs r12, r3, lsl #28
107 ldmcs r1!, {r4, r5, r6, r7} /* 16 bytes */
108 ldmmi r1!, {r8, r9} /* 8 bytes */
109 stmcs r0!, {r4, r5, r6, r7}
112 ldrne r10,[r1], #4 /* 4 bytes */
118 * here source is aligned to 32 bytes.
123 blo less_than_32_left
126 * We preload a cache-line up to 64 bytes ahead. On the 926, this will
127 * stall only until the requested world is fetched, but the linefill
128 * continues in the the background.
129 * While the linefill is going, we write our previous cache-line
130 * into the write-buffer (which should have some free space).
131 * When the linefill is done, the writebuffer will
132 * start dumping its content into memory
134 * While all this is going, we then load a full cache line into
135 * 8 registers, this cache line should be in the cache by now
136 * (or partly in the cache).
138 * This code should work well regardless of the source/dest alignment.
142 /* Align the preload register to a cache-line because the cpu does
143 * "critical word first" (the first word requested is loaded first).
148 1: ldmia r1!, { r4-r11 }
152 * NOTE: if r12 is more than 64 ahead of r1, the following ldrhi
153 * for ARM9 preload will not be safely guarded by the preceding subs.
154 * When it is safely guarded the only possibility to have SIGSEGV here
155 * is because the caller overstates the length.
157 @ ldrhi r3, [r12], #32 /* cheap ARM9 preload */
158 stmia r0!, { r4-r11 }
165 * less than 32 bytes left at this point (length in r2)
168 /* skip all this if there is nothing to do, which should
169 * be a common case (if not executed the code below takes
175 /* conditionnaly copies 0 to 31 bytes */
176 movs r12, r2, lsl #28
177 ldmcs r1!, {r4, r5, r6, r7} /* 16 bytes */
178 ldmmi r1!, {r8, r9} /* 8 bytes */
179 stmcs r0!, {r4, r5, r6, r7}
181 movs r12, r2, lsl #30
182 ldrcs r3, [r1], #4 /* 4 bytes */
183 ldrhmi r4, [r1], #2 /* 2 bytes */
187 ldrbne r3, [r1] /* last byte */
190 /* we're done! restore everything and return */
191 1: ldmfd sp!, {r5-r11}
192 ldmfd sp!, {r0, r4, lr}
195 /********************************************************************/
199 * here source is aligned to 4 bytes
200 * but destination is not.
202 * in the code below r2 is the number of bytes read
203 * (the number of bytes written is always smaller, because we have
204 * partial words in the shift queue)
207 blo copy_last_3_and_return
209 /* Use post-incriment mode for stm to spill r5-r11 to reserved stack
210 * frame. Don't update sp.
214 /* compute shifts needed to align src to dest */
216 and r5, r5, #3 /* r5 = # bytes in partial words */
217 mov r12, r5, lsl #3 /* r12 = right */
218 rsb lr, r12, #32 /* lr = left */
220 /* read the first word */
224 /* write a partial word (0 to 3 bytes), such that destination
225 * becomes aligned to 32 bits (r5 = nb of words to copy for alignment)
230 movmi r3, r3, ror #24
232 movcs r3, r3, ror #24
234 movcs r3, r3, ror #24
246 blo partial_word_tail
253 /* Align destination to 32 bytes (cache line boundary) */
270 blo partial_word_tail
272 /* copy 32 bytes at a time */
274 blo less_than_thirtytwo
276 /* Use immediate mode for the shifts, because there is an extra cycle
277 * for register shifts, which could account for up to 50% of
289 ldmia r1!, { r5,r6,r7, r8,r9,r10,r11}
293 orr r3, r3, r4, lsr #16
295 orr r4, r4, r5, lsr #16
297 orr r5, r5, r6, lsr #16
299 orr r6, r6, r7, lsr #16
301 orr r7, r7, r8, lsr #16
303 orr r8, r8, r9, lsr #16
305 orr r9, r9, r10, lsr #16
306 mov r10, r10, lsl #16
307 orr r10, r10, r11, lsr #16
308 stmia r0!, {r3,r4,r5,r6, r7,r8,r9,r10}
311 orr r3, r3, r4, lsl #16
313 orr r4, r4, r5, lsl #16
315 orr r5, r5, r6, lsl #16
317 orr r6, r6, r7, lsl #16
319 orr r7, r7, r8, lsl #16
321 orr r8, r8, r9, lsl #16
323 orr r9, r9, r10, lsl #16
324 mov r10, r10, lsr #16
325 orr r10, r10, r11, lsl #16
326 stmia r0!, {r3,r4,r5,r6, r7,r8,r9,r10}
330 b less_than_thirtytwo
335 ldmia r1!, { r5,r6,r7, r8,r9,r10,r11}
339 orr r3, r3, r4, lsr #24
341 orr r4, r4, r5, lsr #24
343 orr r5, r5, r6, lsr #24
345 orr r6, r6, r7, lsr #24
347 orr r7, r7, r8, lsr #24
349 orr r8, r8, r9, lsr #24
351 orr r9, r9, r10, lsr #24
353 orr r10, r10, r11, lsr #24
354 stmia r0!, {r3,r4,r5,r6, r7,r8,r9,r10}
357 orr r3, r3, r4, lsl #24
359 orr r4, r4, r5, lsl #24
361 orr r5, r5, r6, lsl #24
363 orr r6, r6, r7, lsl #24
365 orr r7, r7, r8, lsl #24
367 orr r8, r8, r9, lsl #24
369 orr r9, r9, r10, lsl #24
371 orr r10, r10, r11, lsl #24
372 stmia r0!, {r3,r4,r5,r6, r7,r8,r9,r10}
376 b less_than_thirtytwo
381 ldmia r1!, { r5,r6,r7, r8,r9,r10,r11}
385 orr r3, r3, r4, lsr #8
387 orr r4, r4, r5, lsr #8
389 orr r5, r5, r6, lsr #8
391 orr r6, r6, r7, lsr #8
393 orr r7, r7, r8, lsr #8
395 orr r8, r8, r9, lsr #8
397 orr r9, r9, r10, lsr #8
398 mov r10, r10, lsl #24
399 orr r10, r10, r11, lsr #8
400 stmia r0!, {r3,r4,r5,r6, r7,r8,r9,r10}
403 orr r3, r3, r4, lsl #8
405 orr r4, r4, r5, lsl #8
407 orr r5, r5, r6, lsl #8
409 orr r6, r6, r7, lsl #8
411 orr r7, r7, r8, lsl #8
413 orr r8, r8, r9, lsl #8
415 orr r9, r9, r10, lsl #8
416 mov r10, r10, lsr #24
417 orr r10, r10, r11, lsl #8
418 stmia r0!, {r3,r4,r5,r6, r7,r8,r9,r10}
424 /* copy the last 0 to 31 bytes of the source */
425 rsb r12, lr, #32 /* we corrupted r12, recompute it */
428 blo partial_word_tail
446 /* we have a partial word in the input buffer */
447 movs r5, lr, lsl #(31-3)
449 movmi r3, r3, ror #24
451 movcs r3, r3, ror #24
453 movcs r3, r3, ror #24
463 /* Refill spilled registers from the stack. Don't update sp. */
466 copy_last_3_and_return:
467 movs r2, r2, lsl #31 /* copy remaining 0, 1, 2 or 3 bytes */
475 /* we're done! restore sp and spilled registers and return */
477 ldmfd sp!, {r0, r4, lr}