diff --git a/src/xenia/base/memory.cc b/src/xenia/base/memory.cc index 2c22c7c5b..bdc0ea7f1 100644 --- a/src/xenia/base/memory.cc +++ b/src/xenia/base/memory.cc @@ -26,14 +26,20 @@ void copy_128_aligned(void* dest, const void* src, size_t count) { #if XE_ARCH_AMD64 void copy_and_swap_16_aligned(void* dest_ptr, const void* src_ptr, size_t count) { + assert_zero(reinterpret_cast(src_ptr) & 0x1); auto dest = reinterpret_cast(dest_ptr); auto src = reinterpret_cast(src_ptr); __m128i shufmask = _mm_set_epi8(0x0E, 0x0F, 0x0C, 0x0D, 0x0A, 0x0B, 0x08, 0x09, 0x06, 0x07, 0x04, 0x05, 0x02, 0x03, 0x00, 0x01); - size_t i; - for (i = 0; i + 8 <= count; i += 8) { + size_t i = 0; + size_t unaligned_words = (reinterpret_cast(src_ptr) & 0xF) / 2; + for (; unaligned_words > 0 && i < count; unaligned_words--, i++) { + // Copy up to 16 byte alignment. + dest[i] = byte_swap(src[i]); + } + for (; i + 8 <= count; i += 8) { __m128i input = _mm_load_si128(reinterpret_cast(&src[i])); __m128i output = _mm_shuffle_epi8(input, shufmask); _mm_store_si128(reinterpret_cast<__m128i*>(&dest[i]), output); @@ -64,14 +70,20 @@ void copy_and_swap_16_unaligned(void* dest_ptr, const void* src_ptr, void copy_and_swap_32_aligned(void* dest_ptr, const void* src_ptr, size_t count) { + assert_zero(reinterpret_cast(src_ptr) & 0x3); auto dest = reinterpret_cast(dest_ptr); auto src = reinterpret_cast(src_ptr); __m128i shufmask = _mm_set_epi8(0x0C, 0x0D, 0x0E, 0x0F, 0x08, 0x09, 0x0A, 0x0B, 0x04, 0x05, 0x06, 0x07, 0x00, 0x01, 0x02, 0x03); - size_t i; - for (i = 0; i + 4 <= count; i += 4) { + size_t i = 0; + size_t unaligned_dwords = (reinterpret_cast(src_ptr) & 0xF) / 4; + for (; unaligned_dwords > 0 && i < count; unaligned_dwords--, i++) { + // Copy up to 16 byte alignment. + dest[i] = byte_swap(src[i]); + } + for (; i + 4 <= count; i += 4) { __m128i input = _mm_load_si128(reinterpret_cast(&src[i])); __m128i output = _mm_shuffle_epi8(input, shufmask); _mm_store_si128(reinterpret_cast<__m128i*>(&dest[i]), output); @@ -102,14 +114,20 @@ void copy_and_swap_32_unaligned(void* dest_ptr, const void* src_ptr, void copy_and_swap_64_aligned(void* dest_ptr, const void* src_ptr, size_t count) { + assert_zero(reinterpret_cast(src_ptr) & 0x7); auto dest = reinterpret_cast(dest_ptr); auto src = reinterpret_cast(src_ptr); __m128i shufmask = _mm_set_epi8(0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07); - size_t i; - for (i = 0; i + 2 <= count; i += 2) { + size_t i = 0; + size_t unaligned_qwords = (reinterpret_cast(src_ptr) & 0xF) / 8; + for (; unaligned_qwords > 0 && i < count; unaligned_qwords--, i++) { + // Copy up to 16 byte alignment. + dest[i] = byte_swap(src[i]); + } + for (; i + 2 <= count; i += 2) { __m128i input = _mm_load_si128(reinterpret_cast(&src[i])); __m128i output = _mm_shuffle_epi8(input, shufmask); _mm_store_si128(reinterpret_cast<__m128i*>(&dest[i]), output);