i965/tiled_memcpy: Unroll bytes==64 case.

Reviewed-by: Roland Scheidegger <sroland@vmware.com>
This commit is contained in:
Matt Turner 2016-04-11 11:59:59 -07:00
parent 0e605d9b3a
commit eafeb8db66

View file

@ -149,6 +149,14 @@ rgba8_copy_aligned_dst(void *dst, const void *src, size_t bytes)
assert(bytes == 0 || !(((uintptr_t)dst) & 0xf));
#if defined(__SSSE3__) || defined(__SSE2__)
if (bytes == 64) {
rgba8_copy_16_aligned_dst(dst + 0, src + 0);
rgba8_copy_16_aligned_dst(dst + 16, src + 16);
rgba8_copy_16_aligned_dst(dst + 32, src + 32);
rgba8_copy_16_aligned_dst(dst + 48, src + 48);
return dst;
}
while (bytes >= 16) {
rgba8_copy_16_aligned_dst(dst, src);
src += 16;
@ -171,6 +179,14 @@ rgba8_copy_aligned_src(void *dst, const void *src, size_t bytes)
assert(bytes == 0 || !(((uintptr_t)src) & 0xf));
#if defined(__SSSE3__) || defined(__SSE2__)
if (bytes == 64) {
rgba8_copy_16_aligned_dst(dst + 0, src + 0);
rgba8_copy_16_aligned_dst(dst + 16, src + 16);
rgba8_copy_16_aligned_dst(dst + 32, src + 32);
rgba8_copy_16_aligned_dst(dst + 48, src + 48);
return dst;
}
while (bytes >= 16) {
rgba8_copy_16_aligned_src(dst, src);
src += 16;