From c1c2866c1ba11f165de797f4402e016b27eb3496 Mon Sep 17 00:00:00 2001 From: "gregory.hainaut@gmail.com" Date: Fri, 8 Oct 2010 21:31:13 +0000 Subject: [PATCH] zzogl-pg: * Finish to convert ASM to intrinsic * Force the pointer outsides of the screen in fullscreen * do not compile useless files git-svn-id: http://pcsx2.googlecode.com/svn/trunk@3903 96395faa-99c1-11dd-bbfe-3dabce05a288 --- plugins/zzogl-pg/opengl/CMakeLists.txt | 4 +- plugins/zzogl-pg/opengl/GLWinX11.cpp | 3 +- plugins/zzogl-pg/opengl/Mem_Swizzle.cpp | 390 ++++++++++++++++++++++++ plugins/zzogl-pg/opengl/targets.cpp | 28 +- 4 files changed, 408 insertions(+), 17 deletions(-) diff --git a/plugins/zzogl-pg/opengl/CMakeLists.txt b/plugins/zzogl-pg/opengl/CMakeLists.txt index 1f83451cb4..701371c634 100644 --- a/plugins/zzogl-pg/opengl/CMakeLists.txt +++ b/plugins/zzogl-pg/opengl/CMakeLists.txt @@ -52,7 +52,7 @@ set(zzoglSources GSmain.cpp HostMemory.cpp Mem.cpp - memcpy_amd.cpp + # memcpy_amd.cpp Mem_Swizzle.cpp Mem_Tables.cpp Profile.cpp @@ -79,7 +79,7 @@ set(zzoglHeaders common.h CRC.h GifTransfer.h - glprocs.h + # glprocs.h GS.h Mem.h Mem_Swizzle.h diff --git a/plugins/zzogl-pg/opengl/GLWinX11.cpp b/plugins/zzogl-pg/opengl/GLWinX11.cpp index 39d8108ae5..9fb50807eb 100644 --- a/plugins/zzogl-pg/opengl/GLWinX11.cpp +++ b/plugins/zzogl-pg/opengl/GLWinX11.cpp @@ -242,8 +242,9 @@ void GLWindow::ToggleFullscreen() } // Hide the cursor in the right bottom corner + // Note: Use big value instead of width/height to be sure it is really out of the screen if(fullScreen) - XWarpPointer(glDisplay, None, glWindow, 0, 0, 0, 0, width, height); + XWarpPointer(glDisplay, None, glWindow, 0, 0, 0, 0, 4000, 2000); } diff --git a/plugins/zzogl-pg/opengl/Mem_Swizzle.cpp b/plugins/zzogl-pg/opengl/Mem_Swizzle.cpp index fb3cd17dd0..e97ba18da9 100644 --- a/plugins/zzogl-pg/opengl/Mem_Swizzle.cpp +++ b/plugins/zzogl-pg/opengl/Mem_Swizzle.cpp @@ -20,48 +20,437 @@ #include "GS.h" #include "Mem.h" #include "Mem_Swizzle.h" +#ifdef ZEROGS_SSE2 +#include +#endif + +// Current port of the ASM function to intrinsic +#define INTRINSIC_PORT_32 +#define INTRINSIC_PORT_16 +#define INTRINSIC_PORT_8 +#define INTRINSIC_PORT_4 +#ifdef ZEROGS_SSE2 +template +__forceinline void SwizzleBlock32_sse2_I(u8 *dst, u8 *src, int pitch, u32 WriteMask) +{ + __m128i src_0; + __m128i src_1; + __m128i src_2; + __m128i src_3; + + if (WriteMask == 0xffffffff) { + for (int i=3 ; i >= 0 ; --i) { + // load + if (aligned) { + src_0 = _mm_load_si128((__m128i*)src); // 5 4 1 0 + src_1 = _mm_load_si128((__m128i*)(src+16)); // 13 12 9 8 + src_2 = _mm_load_si128((__m128i*)(src+pitch)); // 7 6 3 2 + src_3 = _mm_load_si128((__m128i*)(src+16+pitch)); // 15 14 11 10 + } else { + src_0 = _mm_loadu_si128((__m128i*)src); // 5 4 1 0 + src_1 = _mm_loadu_si128((__m128i*)(src+16)); // 13 12 9 8 + src_2 = _mm_loadu_si128((__m128i*)(src+pitch)); // 7 6 3 2 + src_3 = _mm_loadu_si128((__m128i*)(src+16+pitch)); // 15 14 11 10 + } + + // Reorder + __m128i dst_0 = _mm_unpacklo_epi64(src_0, src_2); // 3 2 1 0 + __m128i dst_1 = _mm_unpackhi_epi64(src_0, src_2); // 7 6 5 4 + __m128i dst_2 = _mm_unpacklo_epi64(src_1, src_3); // 11 10 9 8 + __m128i dst_3 = _mm_unpackhi_epi64(src_1, src_3); // 15 14 13 12 + + // store + _mm_stream_si128((__m128i*)dst, dst_0); + _mm_stream_si128(((__m128i*)dst)+1, dst_1); + _mm_stream_si128(((__m128i*)dst)+2, dst_2); + _mm_stream_si128(((__m128i*)dst)+3, dst_3); + + // update the pointer + dst += 64; + src += 2*pitch; + } + } + else + { + // Build the mask (tranform a u32 to a 4 packets u32) + __m128i mask = _mm_cvtsi32_si128(WriteMask); + mask = _mm_shuffle_epi32(mask, 0); + + for (int i=3 ; i >= 0 ; --i) { + // load + if (aligned) { + src_0 = _mm_load_si128((__m128i*)src); // 5 4 1 0 + src_1 = _mm_load_si128((__m128i*)(src+16)); // 13 12 9 8 + src_2 = _mm_load_si128((__m128i*)(src+pitch)); // 7 6 3 2 + src_3 = _mm_load_si128((__m128i*)(src+16+pitch)); // 15 14 11 10 + } else { + src_0 = _mm_loadu_si128((__m128i*)src); // 5 4 1 0 + src_1 = _mm_loadu_si128((__m128i*)(src+16)); // 13 12 9 8 + src_2 = _mm_loadu_si128((__m128i*)(src+pitch)); // 7 6 3 2 + src_3 = _mm_loadu_si128((__m128i*)(src+16+pitch)); // 15 14 11 10 + } + + // Apply the WriteMask before reordering + src_0 = _mm_and_si128(src_0, mask); + src_1 = _mm_and_si128(src_1, mask); + src_2 = _mm_and_si128(src_2, mask); + src_3 = _mm_and_si128(src_3, mask); + + // Reorder + __m128i dst_0 = _mm_unpacklo_epi64(src_0, src_2); // 3 2 1 0 + __m128i dst_1 = _mm_unpackhi_epi64(src_0, src_2); // 7 6 5 4 + __m128i dst_2 = _mm_unpacklo_epi64(src_1, src_3); // 11 10 9 8 + __m128i dst_3 = _mm_unpackhi_epi64(src_1, src_3); // 15 14 13 12 + + // Load previous value and apply the ~mask + __m128i old_dst_0 = _mm_andnot_si128(mask, _mm_load_si128((__m128i*)dst)); + __m128i old_dst_1 = _mm_andnot_si128(mask, _mm_load_si128(((__m128i*)dst)+1)); + __m128i old_dst_2 = _mm_andnot_si128(mask, _mm_load_si128(((__m128i*)dst)+2)); + __m128i old_dst_3 = _mm_andnot_si128(mask, _mm_load_si128(((__m128i*)dst)+3)); + + // Build the final value + dst_0 = _mm_or_si128(dst_0, old_dst_0); + dst_1 = _mm_or_si128(dst_1, old_dst_1); + dst_2 = _mm_or_si128(dst_2, old_dst_2); + dst_3 = _mm_or_si128(dst_3, old_dst_3); + + // store + _mm_stream_si128((__m128i*)dst, dst_0); + _mm_stream_si128(((__m128i*)dst)+1, dst_1); + _mm_stream_si128(((__m128i*)dst)+2, dst_2); + _mm_stream_si128(((__m128i*)dst)+3, dst_3); + + // update the pointer + dst += 64; + src += 2*pitch; + } + } + // FIXME normally you must use a sfence but it would impact perf to do here + // the function is in a loop and it would have a better place after the loop... +} + +template +__forceinline void SwizzleBlock16_sse2_I(u8 *dst, u8 *src, int pitch) +{ + __m128i src_0_L; + __m128i src_0_H; + __m128i src_2_L; + __m128i src_2_H; + + for (int i=3 ; i >= 0 ; --i) { + // load + if (aligned) { + src_0_L = _mm_load_si128((__m128i*)src); // 13L 12L 9L 8L 5L 4L 1L 0L + src_0_H = _mm_load_si128((__m128i*)(src+16)); // 13H 12H 9H 8H 5H 4H 1H 0H + src_2_L = _mm_load_si128((__m128i*)(src+pitch)); // 15L 14L 11L 10L 7L 6L 3L 2L + src_2_H = _mm_load_si128((__m128i*)(src+16+pitch)); // 15H 14H 11H 10H 7H 6H 3H 2H + } else { + src_0_L = _mm_loadu_si128((__m128i*)src); // 13L 12L 9L 8L 5L 4L 1L 0L + src_0_H = _mm_loadu_si128((__m128i*)(src+16)); // 13H 12H 9H 8H 5H 4H 1H 0H + src_2_L = _mm_loadu_si128((__m128i*)(src+pitch)); // 15L 14L 11L 10L 7L 6L 3L 2L + src_2_H = _mm_loadu_si128((__m128i*)(src+16+pitch)); // 15H 14H 11H 10H 7H 6H 3H 2H + } + + // Interleave L and H to obtains 32 bits packets + __m128i dst_0_tmp = _mm_unpacklo_epi16(src_0_L, src_0_H); // 5H 5L 4H 4L 1H 1L 0H 0L + __m128i dst_1_tmp = _mm_unpacklo_epi16(src_2_L, src_2_H); // 7H 7L 6H 6L 3H 3L 2H 2L + __m128i dst_2_tmp = _mm_unpackhi_epi16(src_0_L, src_0_H); // 13H 13L 12H 12L 9H 9L 8H 8L + __m128i dst_3_tmp = _mm_unpackhi_epi16(src_2_L, src_2_H); // 15H 15L 14H 14L 11H 11L 10H 10L + + // Reorder + __m128i dst_0 = _mm_unpacklo_epi64(dst_0_tmp, dst_1_tmp); // 3 2 1 0 + __m128i dst_1 = _mm_unpackhi_epi64(dst_0_tmp, dst_1_tmp); // 7 6 5 4 + __m128i dst_2 = _mm_unpacklo_epi64(dst_2_tmp, dst_3_tmp); // 11 10 9 8 + __m128i dst_3 = _mm_unpackhi_epi64(dst_2_tmp, dst_3_tmp); // 15 14 13 12 + + // store + _mm_stream_si128((__m128i*)dst, dst_0); + _mm_stream_si128(((__m128i*)dst)+1, dst_1); + _mm_stream_si128(((__m128i*)dst)+2, dst_2); + _mm_stream_si128(((__m128i*)dst)+3, dst_3); + + // update the pointer + dst += 64; + src += 2*pitch; + } + // FIXME normally you must use a sfence but it would impact perf to do here + // the function is in a loop and it would have a better place after the loop... +} + +// Template the code to improve reuse of code +template +__forceinline void SwizzleColumn8_sse2_I(u8 *dst, u8 *src, int pitch) +{ + __m128i src_0; + __m128i src_1; + __m128i src_2; + __m128i src_3; + + // load 4 line of 16*8 bits packets + if (aligned) { + src_0 = _mm_load_si128((__m128i*)src); + src_2 = _mm_load_si128((__m128i*)(src+pitch)); + src_1 = _mm_load_si128((__m128i*)(src+2*pitch)); + src_3 = _mm_load_si128((__m128i*)(src+3*pitch)); + } else { + src_0 = _mm_loadu_si128((__m128i*)src); + src_2 = _mm_loadu_si128((__m128i*)(src+pitch)); + src_1 = _mm_loadu_si128((__m128i*)(src+2*pitch)); + src_3 = _mm_loadu_si128((__m128i*)(src+3*pitch)); + } + + // shuffle 2 lines to align pixels + if (INDEX == 0 || INDEX == 2) { + src_1 = _mm_shuffle_epi32(src_1, 0xB1); // 13 12 9 8 5 4 1 0 ... (byte 3 & 1) + src_3 = _mm_shuffle_epi32(src_3, 0xB1); // 15 14 11 10 7 6 3 2 ... (byte 3 & 1) + } else if (INDEX == 1 || INDEX == 3) { + src_0 = _mm_shuffle_epi32(src_0, 0xB1); // 13 12 9 8 5 4 1 0 ... (byte 2 & 0) + src_2 = _mm_shuffle_epi32(src_2, 0xB1); // 15 14 11 10 7 6 3 2 ... (byte 2 & 0) + } else { + assert(0); + } + // src_0 = 13 12 9 8 5 4 1 0 ... (byte 2 & 0) + // src_1 = 13 12 9 8 5 4 1 0 ... (byte 3 & 1) + // src_2 = 15 14 11 10 7 6 3 2 ... (byte 2 & 0) + // src_3 = 15 14 11 10 7 6 3 2 ... (byte 3 & 1) + + // Interleave byte 1 & 0 to obtain 16 bits packets + __m128i src_0_L = _mm_unpacklo_epi8(src_0, src_1); // 13L 12L 9L 8L 5L 4L 1L 0L + __m128i src_1_L = _mm_unpacklo_epi8(src_2, src_3); // 15L 14L 11L 10L 7L 6L 3L 2L + // Interleave byte 3 & 2 to obtain 16 bits packets + __m128i src_0_H = _mm_unpackhi_epi8(src_0, src_1); // 13H 12H 9H 8H 5H 4H 1H 0H + __m128i src_1_H = _mm_unpackhi_epi8(src_2, src_3); // 15H 14H 11H 10H 7H 6H 3H 2H + + // Interleave H and L to obtain 32 bits packets + __m128i dst_0_tmp = _mm_unpacklo_epi16(src_0_L, src_0_H); // 5 4 1 0 + __m128i dst_1_tmp = _mm_unpacklo_epi16(src_1_L, src_1_H); // 7 6 3 2 + __m128i dst_2_tmp = _mm_unpackhi_epi16(src_0_L, src_0_H); // 13 12 9 8 + __m128i dst_3_tmp = _mm_unpackhi_epi16(src_1_L, src_1_H); // 15 14 11 10 + + // Reorder the 32 bits packets + __m128i dst_0 = _mm_unpacklo_epi64(dst_0_tmp, dst_1_tmp); // 3 2 1 0 + __m128i dst_1 = _mm_unpackhi_epi64(dst_0_tmp, dst_1_tmp); // 7 6 5 4 + __m128i dst_2 = _mm_unpacklo_epi64(dst_2_tmp, dst_3_tmp); // 11 10 9 8 + __m128i dst_3 = _mm_unpackhi_epi64(dst_2_tmp, dst_3_tmp); // 15 14 13 12 + + // store + _mm_stream_si128((__m128i*)dst, dst_0); + _mm_stream_si128(((__m128i*)dst)+1, dst_1); + _mm_stream_si128(((__m128i*)dst)+2, dst_2); + _mm_stream_si128(((__m128i*)dst)+3, dst_3); +} + +template +__forceinline void SwizzleBlock8_sse2_I(u8 *dst, u8 *src, int pitch) +{ + SwizzleColumn8_sse2_I(dst, src, pitch); + + dst += 64; + src += 4*pitch; + SwizzleColumn8_sse2_I(dst, src, pitch); + + dst += 64; + src += 4*pitch; + SwizzleColumn8_sse2_I(dst, src, pitch); + + dst += 64; + src += 4*pitch; + SwizzleColumn8_sse2_I(dst, src, pitch); + + // FIXME normally you must use a sfence but it would impact perf to do here + // the function is in a loop and it would have a better place after the loop... +} + +// Template the code to improve reuse of code +template +__forceinline void SwizzleColumn4_sse2_I(u8 *dst, u8 *src, int pitch) +{ + __m128i src_0; + __m128i src_1; + __m128i src_2; + __m128i src_3; + + // Build a mask (tranform a u32 to a 4 packets u32) + const u32 mask_template = 0x0f0f0f0f; + __m128i mask = _mm_cvtsi32_si128(mask_template); + mask = _mm_shuffle_epi32(mask, 0); + + // load 4 line of 32*4 bits packets + if (aligned) { + src_0 = _mm_load_si128((__m128i*)src); + src_2 = _mm_load_si128((__m128i*)(src+pitch)); + src_1 = _mm_load_si128((__m128i*)(src+2*pitch)); + src_3 = _mm_load_si128((__m128i*)(src+3*pitch)); + } else { + src_0 = _mm_loadu_si128((__m128i*)src); + src_2 = _mm_loadu_si128((__m128i*)(src+pitch)); + src_1 = _mm_loadu_si128((__m128i*)(src+2*pitch)); + src_3 = _mm_loadu_si128((__m128i*)(src+3*pitch)); + } + + // shuffle 2 lines to align pixels + if (INDEX == 0 || INDEX == 2) { + src_1 = _mm_shufflelo_epi16(src_1, 0xB1); + src_1 = _mm_shufflehi_epi16(src_1, 0xB1); // 13 12 9 8 5 4 1 0 ... (Half-byte 7 & 5 & 3 & 1) + src_3 = _mm_shufflelo_epi16(src_3, 0xB1); + src_3 = _mm_shufflehi_epi16(src_3, 0xB1); // 15 14 11 10 7 6 3 2 ... (Half-byte 7 & 5 & 3 & 1) + } else if (INDEX == 1 || INDEX == 3) { + src_0 = _mm_shufflelo_epi16(src_0, 0xB1); + src_0 = _mm_shufflehi_epi16(src_0, 0xB1); // 13 12 9 8 5 4 1 0 ... (Half-byte 6 & 4 & 2 & 0) + src_2 = _mm_shufflelo_epi16(src_2, 0xB1); + src_2 = _mm_shufflehi_epi16(src_2, 0xB1); // 15 14 11 10 7 6 3 2 ... (Half-byte 6 & 4 & 2 & 0) + } else { + assert(0); + } + // src_0 = 13 12 9 8 5 4 1 0 ... (Half-byte 6 & 4 & 2 & 0) + // src_1 = 13 12 9 8 5 4 1 0 ... (Half-byte 7 & 5 & 3 & 1) + // src_2 = 15 14 11 10 7 6 3 2 ... (Half-byte 6 & 4 & 2 & 0) + // src_3 = 15 14 11 10 7 6 3 2 ... (Half-byte 7 & 5 & 3 & 1) + + // ** Interleave Half-byte to obtain 8 bits packets + // Shift value to ease 4 bits filter. + // Note use a packet shift to allow a 4bits shifts + __m128i src_0_shift = _mm_srli_epi64(src_0, 4); // ? 13 12 9 8 5 4 1 ... (Half-byte 6 & 4 & 2 & 0) + __m128i src_1_shift = _mm_slli_epi64(src_1, 4); // 12 9 8 5 4 1 0 ? ... (Half-byte 7 & 5 & 3 & 1) + __m128i src_2_shift = _mm_srli_epi64(src_2, 4); // ? 15 14 11 10 7 6 3 ... (Half-byte 6 & 4 & 2 & 0) + __m128i src_3_shift = _mm_slli_epi64(src_3, 4); // 14 11 10 7 6 3 2 ? ... (Half-byte 7 & 5 & 3 & 1) + + // 12 - 8 - 4 - 0 - (HB odd) || - 12 - 8 - 4 - 0 (HB even) => 12 8 4 0 (byte 3 & 2 & 1 & 0) + src_0 = _mm_or_si128(_mm_andnot_si128(mask, src_1_shift), _mm_and_si128(mask, src_0)); + // - 13 - 9 - 5 - 1 (HB even) || 13 - 9 - 5 - 1 - (HB odd) => 13 9 5 1 (byte 3 & 2 & 1 & 0) + src_1 = _mm_or_si128(_mm_and_si128(mask, src_0_shift), _mm_andnot_si128(mask, src_1)); + + // 14 - 10 - 6 - 2 - (HB odd) || - 14 - 10 - 6 - 2 (HB even) => 14 10 6 2 (byte 3 & 2 & 1 & 0) + src_2 = _mm_or_si128(_mm_andnot_si128(mask, src_3_shift), _mm_and_si128(mask, src_2)); + // - 15 - 11 - 7 - 3 (HB even) || 15 - 11 - 7 - 3 - (HB odd) => 15 11 7 3 (byte 3 & 2 & 1 & 0) + src_3 = _mm_or_si128(_mm_and_si128(mask, src_2_shift), _mm_andnot_si128(mask, src_3)); + + + // reorder the 8 bits packets + __m128i src_0_tmp = _mm_unpacklo_epi8(src_0, src_1); // 13 12 9 8 5 4 1 0 (byte 1 & 0) + __m128i src_1_tmp = _mm_unpackhi_epi8(src_0, src_1); // 13 12 9 8 5 4 1 0 (byte 3 & 2) + __m128i src_2_tmp = _mm_unpacklo_epi8(src_2, src_3); // 15 14 11 10 7 6 3 2 (byte 1 & 0) + __m128i src_3_tmp = _mm_unpackhi_epi8(src_2, src_3); // 15 14 11 10 7 6 3 2 (byte 3 & 2) + + // interleave byte to obtain 32 bits packets + __m128i src_0_L = _mm_unpacklo_epi8(src_0_tmp, src_1_tmp); // 2.13 0.13 2.12 0.12 2.9 0.9 2.8 0.8 2.5 0.5 2.4 0.4 2.1 0.1 2.0 0.0 + __m128i src_0_H = _mm_unpackhi_epi8(src_0_tmp, src_1_tmp); // 3.13 1.13 3.12 1.12 3.9 1.9 3.8 1.8 3.5 1.5 3.4 1.4 3.1 1.1 3.0 1.0 + __m128i src_1_L = _mm_unpacklo_epi8(src_2_tmp, src_3_tmp); // 2.15 0.15 2.14 0.14 2.11 0.11 2.10 0.10 2.7 0.7 2.6 0.6 2.3 0.3 2.2 0.2 + __m128i src_1_H = _mm_unpackhi_epi8(src_2_tmp, src_3_tmp); // 3.15 1.15 3.14 1.14 3.11 1.11 3.10 1.10 3.7 1.7 3.6 1.6 3.3 1.3 3.2 1.2 + + __m128i dst_0_tmp = _mm_unpacklo_epi8(src_0_L, src_0_H); // 5 4 1 0 + __m128i dst_1_tmp = _mm_unpacklo_epi8(src_1_L, src_1_H); // 7 6 3 2 + __m128i dst_2_tmp = _mm_unpackhi_epi8(src_0_L, src_0_H); // 13 12 9 8 + __m128i dst_3_tmp = _mm_unpackhi_epi8(src_1_L, src_1_H); // 15 14 11 10 + + // Reorder the 32 bits packets + __m128i dst_0 = _mm_unpacklo_epi64(dst_0_tmp, dst_1_tmp); // 3 2 1 0 + __m128i dst_1 = _mm_unpackhi_epi64(dst_0_tmp, dst_1_tmp); // 7 6 5 4 + __m128i dst_2 = _mm_unpacklo_epi64(dst_2_tmp, dst_3_tmp); // 11 10 9 8 + __m128i dst_3 = _mm_unpackhi_epi64(dst_2_tmp, dst_3_tmp); // 15 14 13 12 + + // store + _mm_stream_si128((__m128i*)dst, dst_0); + _mm_stream_si128(((__m128i*)dst)+1, dst_1); + _mm_stream_si128(((__m128i*)dst)+2, dst_2); + _mm_stream_si128(((__m128i*)dst)+3, dst_3); +} + +template +__forceinline void SwizzleBlock4_sse2_I(u8 *dst, u8 *src, int pitch) +{ + SwizzleColumn4_sse2_I(dst, src, pitch); + + dst += 64; + src += 4*pitch; + SwizzleColumn4_sse2_I(dst, src, pitch); + + dst += 64; + src += 4*pitch; + SwizzleColumn4_sse2_I(dst, src, pitch); + + dst += 64; + src += 4*pitch; + SwizzleColumn4_sse2_I(dst, src, pitch); + + // FIXME normally you must use a sfence but it would impact perf to do here + // the function is in a loop and it would have a better place after the loop... +} +#endif // special swizzle macros - which I converted to functions. #ifdef ZEROGS_SSE2 __forceinline void SwizzleBlock32(u8 *dst, u8 *src, int pitch, u32 WriteMask) { +#ifdef INTRINSIC_PORT_32 + SwizzleBlock32_sse2_I(dst, src, pitch, WriteMask); +#else SwizzleBlock32_sse2(dst, src, pitch, WriteMask); +#endif } __forceinline void SwizzleBlock16(u8 *dst, u8 *src, int pitch, u32 WriteMask) { +#ifdef INTRINSIC_PORT_16 + SwizzleBlock16_sse2_I(dst, src, pitch/*, WriteMask*/); +#else SwizzleBlock16_sse2(dst, src, pitch/*, WriteMask*/); +#endif } __forceinline void SwizzleBlock8(u8 *dst, u8 *src, int pitch, u32 WriteMask) { +#ifdef INTRINSIC_PORT_8 + SwizzleBlock8_sse2_I(dst, src, pitch/*, WriteMask*/); +#else SwizzleBlock8_sse2(dst, src, pitch/*, WriteMask*/); +#endif } __forceinline void SwizzleBlock4(u8 *dst, u8 *src, int pitch, u32 WriteMask) { +#ifdef INTRINSIC_PORT_4 + SwizzleBlock4_sse2_I(dst, src, pitch/*, WriteMask*/); +#else SwizzleBlock4_sse2(dst, src, pitch/*, WriteMask*/); +#endif } __forceinline void SwizzleBlock32u(u8 *dst, u8 *src, int pitch, u32 WriteMask) { +#ifdef INTRINSIC_PORT_32 + SwizzleBlock32_sse2_I(dst, src, pitch, WriteMask); +#else SwizzleBlock32u_sse2(dst, src, pitch, WriteMask); +#endif } __forceinline void SwizzleBlock16u(u8 *dst, u8 *src, int pitch, u32 WriteMask) { +#ifdef INTRINSIC_PORT_16 + SwizzleBlock16_sse2_I(dst, src, pitch/*, WriteMask*/); +#else SwizzleBlock16u_sse2(dst, src, pitch/*, WriteMask*/); +#endif } __forceinline void SwizzleBlock8u(u8 *dst, u8 *src, int pitch, u32 WriteMask) { +#ifdef INTRINSIC_PORT_8 + SwizzleBlock8_sse2_I(dst, src, pitch/*, WriteMask*/); +#else SwizzleBlock8u_sse2(dst, src, pitch/*, WriteMask*/); +#endif } __forceinline void SwizzleBlock4u(u8 *dst, u8 *src, int pitch, u32 WriteMask) { +#ifdef INTRINSIC_PORT_4 + SwizzleBlock4_sse2_I(dst, src, pitch/*, WriteMask*/); +#else SwizzleBlock4u_sse2(dst, src, pitch/*, WriteMask*/); +#endif } #else @@ -270,3 +659,4 @@ __forceinline void SwizzleBlock4HL(u8 *dst, u8 *src, int pitch, u32 WriteMask) SwizzleBlock32((u8*)dst, (u8*)tempblock, 32, 0x0f000000); } + diff --git a/plugins/zzogl-pg/opengl/targets.cpp b/plugins/zzogl-pg/opengl/targets.cpp index eece9df090..1656637a5f 100644 --- a/plugins/zzogl-pg/opengl/targets.cpp +++ b/plugins/zzogl-pg/opengl/targets.cpp @@ -3140,10 +3140,10 @@ __forceinline void update_8pixels_sse2(u32* src, u32* basepage, u32 i_msk, u32 j } // Merge the 2 dword - pixels_0 = _mm_unpacklo_epi64(pixel_0_low, pixel_0_high); + pixels_0 = _mm_unpacklo_epi64(pixel_0_low, pixel_0_high); if (PSMT_ISHALF(psm)) pixels_1 = _mm_unpacklo_epi64(pixel_1_low, pixel_1_high); - // transform pixel from ARGB:8888 to ARGB:1555 + // transform pixel from ARGB:8888 to ARGB:1555 if (psm == PSMCT16 || psm == PSMCT16S) { // shift pixel instead of the mask. It allow to keep 1 mask into a register // instead of 4 (not enough room on x86...). @@ -3214,26 +3214,26 @@ __forceinline void update_8pixels_sse2(u32* src, u32* basepage, u32 i_msk, u32 j // Save some memory access when pix_mask is 0. if (pix_mask) { - // Build fbm mask (tranform a u32 to a 4 packets u32) - // In 16 bits texture one packet is "0000 DATA" - __m128i imask = _mm_cvtsi32_si128(pix_mask); - imask = _mm_shuffle_epi32(imask, 0); + // Build fbm mask (tranform a u32 to a 4 packets u32) + // In 16 bits texture one packet is "0000 DATA" + __m128i imask = _mm_cvtsi32_si128(pix_mask); + imask = _mm_shuffle_epi32(imask, 0); - // apply the mask on new values - pixels_0 = _mm_andnot_si128(imask, pixels_0); + // apply the mask on new values + pixels_0 = _mm_andnot_si128(imask, pixels_0); - __m128i old_pixels_0; - __m128i final_pixels_0; + __m128i old_pixels_0; + __m128i final_pixels_0; old_pixels_0 = _mm_and_si128(imask, _mm_load_si128((__m128i*)dst_add)); final_pixels_0 = _mm_or_si128(old_pixels_0, pixels_0); _mm_store_si128((__m128i*)dst_add, final_pixels_0); - } else { + } else { // Note: because we did not read the previous value of add. We could bypass the cache. // We gains a few percents _mm_stream_si128((__m128i*)dst_add, pixels_0); - } + } } @@ -3362,13 +3362,13 @@ void Resolve_32_Bit_sse2(const void* psrc, int fbp, int fbw, int fbh, u32 fbm) // Note update_8pixels process 2 lines at onces hence the factor 2 src -= 2*raw_size; - } + } if(!pix_mask) { // Ensure that previous (out of order) write are done. It must be done after non temporal instruction // (or *_stream_* intrinsic) _mm_sfence(); - } + } #ifdef LOG_RESOLVE_PROFILE #ifdef __LINUX__