2 * Copyright (C) 2008 The Android Open Source Project
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in
12 * the documentation and/or other materials provided with the
15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
16 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
17 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
18 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
19 * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
21 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
22 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
23 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
25 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 * Optimized memcpy() for ARM.
33 * note that memcpy() always returns the destination pointer,
34 * so we have to preserve R0.
38 * This file has been modified from the original for use in musl libc.
39 * The main changes are: addition of .type memcpy,%function to make the
40 * code safely callable from thumb mode, adjusting the return
41 * instructions to be compatible with pre-thumb ARM cpus, and removal
42 * of prefetch code that is not compatible with older cpus.
46 .type memcpy,%function
48 /* The stack must always be 64-bits aligned to be compliant with the
49 * ARM ABI. Since we have to save R0, we might as well save R4
50 * which we can use for better pipelining of the reads below
54 stmfd sp!, {r0, r4, lr}
55 /* Making room for r5-r11 which will be spilled later */
59 /* it simplifies things to take care of len<4 early */
61 blo copy_last_3_and_return
63 /* compute the offset to align the source
64 * offset = (4-(src&3))&3 = -src & 3
70 /* align source to 32 bits. We need to insert 2 instructions between
71 * a ldr[b|h] and str[b|h] because byte and half-word instructions
75 sub r2, r2, r3 /* we know that r3 <= r2 because r2 >= 4 */
85 /* see if src and dst are aligned together (congruent) */
90 /* Use post-incriment mode for stm to spill r5-r11 to reserved stack
91 * frame. Don't update sp.
95 /* align the destination to a cache-line */
98 beq congruent_aligned32
102 /* conditionnaly copies 0 to 7 words (length in r3) */
103 movs r12, r3, lsl #28
104 ldmcsia r1!, {r4, r5, r6, r7} /* 16 bytes */
105 ldmmiia r1!, {r8, r9} /* 8 bytes */
106 stmcsia r0!, {r4, r5, r6, r7}
107 stmmiia r0!, {r8, r9}
109 ldrne r10,[r1], #4 /* 4 bytes */
115 * here source is aligned to 32 bytes.
120 blo less_than_32_left
123 * We preload a cache-line up to 64 bytes ahead. On the 926, this will
124 * stall only until the requested world is fetched, but the linefill
125 * continues in the the background.
126 * While the linefill is going, we write our previous cache-line
127 * into the write-buffer (which should have some free space).
128 * When the linefill is done, the writebuffer will
129 * start dumping its content into memory
131 * While all this is going, we then load a full cache line into
132 * 8 registers, this cache line should be in the cache by now
133 * (or partly in the cache).
135 * This code should work well regardless of the source/dest alignment.
139 /* Align the preload register to a cache-line because the cpu does
140 * "critical word first" (the first word requested is loaded first).
145 1: ldmia r1!, { r4-r11 }
149 * NOTE: if r12 is more than 64 ahead of r1, the following ldrhi
150 * for ARM9 preload will not be safely guarded by the preceding subs.
151 * When it is safely guarded the only possibility to have SIGSEGV here
152 * is because the caller overstates the length.
154 @ ldrhi r3, [r12], #32 /* cheap ARM9 preload */
155 stmia r0!, { r4-r11 }
162 * less than 32 bytes left at this point (length in r2)
165 /* skip all this if there is nothing to do, which should
166 * be a common case (if not executed the code below takes
172 /* conditionnaly copies 0 to 31 bytes */
173 movs r12, r2, lsl #28
174 ldmcsia r1!, {r4, r5, r6, r7} /* 16 bytes */
175 ldmmiia r1!, {r8, r9} /* 8 bytes */
176 stmcsia r0!, {r4, r5, r6, r7}
177 stmmiia r0!, {r8, r9}
178 movs r12, r2, lsl #30
179 ldrcs r3, [r1], #4 /* 4 bytes */
180 ldrmih r4, [r1], #2 /* 2 bytes */
184 ldrneb r3, [r1] /* last byte */
187 /* we're done! restore everything and return */
188 1: ldmfd sp!, {r5-r11}
189 ldmfd sp!, {r0, r4, lr}
194 /********************************************************************/
198 * here source is aligned to 4 bytes
199 * but destination is not.
201 * in the code below r2 is the number of bytes read
202 * (the number of bytes written is always smaller, because we have
203 * partial words in the shift queue)
206 blo copy_last_3_and_return
208 /* Use post-incriment mode for stm to spill r5-r11 to reserved stack
209 * frame. Don't update sp.
213 /* compute shifts needed to align src to dest */
215 and r5, r5, #3 /* r5 = # bytes in partial words */
216 mov r12, r5, lsl #3 /* r12 = right */
217 rsb lr, r12, #32 /* lr = left */
219 /* read the first word */
223 /* write a partial word (0 to 3 bytes), such that destination
224 * becomes aligned to 32 bits (r5 = nb of words to copy for alignment)
235 blo partial_word_tail
237 /* Align destination to 32 bytes (cache line boundary) */
242 orr r4, r3, r5, lsl lr
247 blo partial_word_tail
249 /* copy 32 bytes at a time */
251 blo less_than_thirtytwo
253 /* Use immediate mode for the shifts, because there is an extra cycle
254 * for register shifts, which could account for up to 50% of
266 ldmia r1!, { r5,r6,r7, r8,r9,r10,r11}
269 orr r3, r3, r4, lsl #16
271 orr r4, r4, r5, lsl #16
273 orr r5, r5, r6, lsl #16
275 orr r6, r6, r7, lsl #16
277 orr r7, r7, r8, lsl #16
279 orr r8, r8, r9, lsl #16
281 orr r9, r9, r10, lsl #16
282 mov r10, r10, lsr #16
283 orr r10, r10, r11, lsl #16
284 stmia r0!, {r3,r4,r5,r6, r7,r8,r9,r10}
287 b less_than_thirtytwo
292 ldmia r1!, { r5,r6,r7, r8,r9,r10,r11}
295 orr r3, r3, r4, lsl #24
297 orr r4, r4, r5, lsl #24
299 orr r5, r5, r6, lsl #24
301 orr r6, r6, r7, lsl #24
303 orr r7, r7, r8, lsl #24
305 orr r8, r8, r9, lsl #24
307 orr r9, r9, r10, lsl #24
309 orr r10, r10, r11, lsl #24
310 stmia r0!, {r3,r4,r5,r6, r7,r8,r9,r10}
313 b less_than_thirtytwo
318 ldmia r1!, { r5,r6,r7, r8,r9,r10,r11}
321 orr r3, r3, r4, lsl #8
323 orr r4, r4, r5, lsl #8
325 orr r5, r5, r6, lsl #8
327 orr r6, r6, r7, lsl #8
329 orr r7, r7, r8, lsl #8
331 orr r8, r8, r9, lsl #8
333 orr r9, r9, r10, lsl #8
334 mov r10, r10, lsr #24
335 orr r10, r10, r11, lsl #8
336 stmia r0!, {r3,r4,r5,r6, r7,r8,r9,r10}
341 /* copy the last 0 to 31 bytes of the source */
342 rsb r12, lr, #32 /* we corrupted r12, recompute it */
345 blo partial_word_tail
349 orr r4, r3, r5, lsl lr
356 /* we have a partial word in the input buffer */
357 movs r5, lr, lsl #(31-3)
364 /* Refill spilled registers from the stack. Don't update sp. */
367 copy_last_3_and_return:
368 movs r2, r2, lsl #31 /* copy remaining 0, 1, 2 or 3 bytes */
376 /* we're done! restore sp and spilled registers and return */
378 ldmfd sp!, {r0, r4, lr}