mirror of
https://github.com/fail0verflow/switch-coreboot.git
synced 2025-05-04 01:39:18 -04:00
This does not optimize memcpy for 64bit, it merely makes it compile. Change-Id: I69ad6bd0c3d5f617d9222643abf7a2ba7c2a0359 Signed-off-by: Stefan Reinauer <stefan.reinauer@coreboot.org> Signed-off-by: Scott Duplichan <scott@notabs.org> Reviewed-on: http://review.coreboot.org/10575 Tested-by: build bot (Jenkins) Reviewed-by: Marc Jones <marc.jones@se-eng.com>
22 lines
378 B
C
22 lines
378 B
C
#include <string.h>
|
|
|
|
void *memcpy(void *dest, const void *src, size_t n)
|
|
{
|
|
unsigned long d0, d1, d2;
|
|
|
|
asm volatile(
|
|
#ifdef __x86_64__
|
|
"rep ; movsd\n\t"
|
|
"mov %4,%%rcx\n\t"
|
|
#else
|
|
"rep ; movsl\n\t"
|
|
"movl %4,%%ecx\n\t"
|
|
#endif
|
|
"rep ; movsb\n\t"
|
|
: "=&c" (d0), "=&D" (d1), "=&S" (d2)
|
|
: "0" (n >> 2), "g" (n & 3), "1" (dest), "2" (src)
|
|
: "memory"
|
|
);
|
|
|
|
return dest;
|
|
}
|