GX_TF_I4 texture decoder optimized with SSE2 producing a ~76% speed increase over reference C implementation.

GX_TF_RGBA8 texture decoder optimized with SSE2 producing a ~68% speed increase over reference C implementation.
TABified the entire document per NeoBrainX. :)

git-svn-id: https://dolphin-emu.googlecode.com/svn/trunk@6706 8ced0084-cf51-0410-be5f-012b33b47a6e
This commit is contained in:
james.jdunne 2011-01-01 03:52:32 +00:00
parent 99c8ea7abb
commit 60082853ec

View File

@ -747,15 +747,12 @@ PC_TexFormat TexDecoder_Decode_real(u8 *dst, const u8 *src, int width, int heigh
for (int iy = 0; iy < 4; iy++, src += 8) for (int iy = 0; iy < 4; iy++, src += 8)
decodebytesC8_To_Raw16_SSSE3((u16*)dst + (y + iy) * width + x, src, tlutaddr); decodebytesC8_To_Raw16_SSSE3((u16*)dst + (y + iy) * width + x, src, tlutaddr);
} else } else
#endif #endif
{ {
for (int y = 0; y < height; y += 4) for (int y = 0; y < height; y += 4)
for (int x = 0; x < width; x += 8) for (int x = 0; x < width; x += 8)
for (int iy = 0; iy < 4; iy++, src += 8) for (int iy = 0; iy < 4; iy++, src += 8)
decodebytesC8_To_Raw16((u16*)dst + (y + iy) * width + x, src, tlutaddr); decodebytesC8_To_Raw16((u16*)dst + (y + iy) * width + x, src, tlutaddr);
} }
} }
return GetPCFormatFromTLUTFormat(tlutfmt); return GetPCFormatFromTLUTFormat(tlutfmt);
@ -957,6 +954,98 @@ PC_TexFormat TexDecoder_Decode_RGBA(u32 * dst, const u8 * src, int width, int he
break; break;
case GX_TF_I4: case GX_TF_I4:
{ {
// JSD optimized with SSE2 intrinsics.
// Produces a ~76% speed increase over reference C implementation.
const __m128i kMask_x0f = _mm_set_epi32(0x0f0f0f0fL, 0x0f0f0f0fL, 0x0f0f0f0fL, 0x0f0f0f0fL);
const __m128i kMask_xf0 = _mm_set_epi32(0xf0f0f0f0L, 0xf0f0f0f0L, 0xf0f0f0f0L, 0xf0f0f0f0L);
const __m128i kMask_x00000000ffffffff = _mm_set_epi32(0x00000000L, 0xffffffffL, 0x00000000L, 0xffffffffL);
const __m128i kMask_xffffffff00000000 = _mm_set_epi32(0xffffffffL, 0x00000000L, 0xffffffffL, 0x00000000L);
for (int y = 0; y < height; y += 8)
for (int x = 0; x < width; x += 8)
for (int iy = 0; iy < 8; iy += 2, src += 8)
{
// Expand [BA] to [BB][BB][BB][BB] [AA][AA][AA][AA], where [BA] is a single byte and A and B are 4-bit values.
// Load 64 bits from `src` into an __m128i with upper 64 bits zeroed: (0000 0000 hgfe dcba)
// dcba is row #0 and hgfe is row #1. We process two rows at once with each loop iteration, hence iy += 2.
const __m128i r0 = _mm_loadl_epi64((const __m128i *)src);
// Shuffle low 64-bits with itself to expand from (0000 0000 hgfe dcba) to (hhgg ffee ddcc bbaa)
const __m128i r1 = _mm_unpacklo_epi8(r0, r0);
// We want the hi 4 bits of each 8-bit word replicated to 32-bit words:
// (HhHhGgGg FfFfEeEe DdDdCcCc BbBbAaAa) >> 4 [16] -> (0HhH0GgG 0FfF0EeE 0DdD0CcC 0BbB0AaA)
const __m128i i1 = _mm_srli_epi16(r1, 4);
// (0HhH0GgG 0FfF0EeE 0DdD0CcC 0BbB0AaA) & kMask_x0f -> (0H0H0G0G 0F0F0E0E 0D0D0C0C 0B0B0A0A)
const __m128i i12 = _mm_and_si128(i1, kMask_x0f);
// (HhHhGgGg FfFfEeEe DdDdCcCc BbBbAaAa) & kMask_xf0 -> (H0H0G0G0 F0F0E0E0 D0D0C0C0 B0B0A0A0)
const __m128i i13 = _mm_and_si128(r1, kMask_xf0);
// (0H0H0G0G 0F0F0E0E 0D0D0C0C 0B0B0A0A) | (H0H0G0G0 F0F0E0E0 D0D0C0C0 B0B0A0A0) -> (HHHHGGGG FFFFEEEE DDDDCCCC BBBBAAAA)
const __m128i i14 = _mm_or_si128(i12, i13);
// Shuffle low 64-bits with itself to expand from (HHHHGGGG FFFFEEEE DDDDCCCC BBBBAAAA) to (DDDDDDDD CCCCCCCC BBBBBBBB AAAAAAAA)
const __m128i i15 = _mm_unpacklo_epi8(i14, i14);
// (DDDDDDDD CCCCCCCC BBBBBBBB AAAAAAAA) -> (BBBBBBBB BBBBBBBB AAAAAAAA AAAAAAAA)
const __m128i i151 = _mm_unpacklo_epi8(i15, i15);
// (DDDDDDDD CCCCCCCC BBBBBBBB AAAAAAAA) -> (DDDDDDDD DDDDDDDD CCCCCCCC CCCCCCCC)
const __m128i i152 = _mm_unpackhi_epi8(i15, i15);
// Shuffle hi 64-bits with itself to expand from (HHHHGGGG FFFFEEEE DDDDCCCC BBBBAAAA) to (HHHHHHHH GGGGGGGG FFFFFFFF EEEEEEEE)
const __m128i i16 = _mm_unpackhi_epi8(i14, i14);
// (HHHHHHHH GGGGGGGG FFFFFFFF EEEEEEEE) -> (FFFFFFFF FFFFFFFF EEEEEEEE EEEEEEEE)
const __m128i i161 = _mm_unpacklo_epi8(i16, i16);
// (HHHHHHHH GGGGGGGG FFFFFFFF EEEEEEEE) -> (HHHHHHHH HHHHHHHH GGGGGGGG GGGGGGGG)
const __m128i i162 = _mm_unpackhi_epi8(i16, i16);
// Now find the lo 4 bits of each input 8-bit word:
// (HhHhGgGg FfFfEeEe DdDdCcCc BbBbAaAa) & kMask_x0f -> (0h0h0g0g 0f0f0e0e 0d0d0c0c 0b0b0a0a)
const __m128i i2 = _mm_and_si128(r1, kMask_x0f);
// (HhHhGgGg FfFfEeEe DdDdCcCc BbBbAaAa) << 4 [16] -> (hHh0gGg0 fFf0eEe0 dDd0cCc0 bBb0aAa0)
const __m128i i21 = _mm_slli_epi16(r1, 4);
// (hHh0gGg0 fFf0eEe0 dDd0cCc0 bBb0aAa0) & kMask_xf0 -> (h0h0g0g0 f0f0e0e0 d0d0c0c0 b0b0a0a0)
const __m128i i22 = _mm_and_si128(i21, kMask_xf0);
// (0h0h0g0g 0f0f0e0e 0d0d0c0c 0b0b0a0a) | (h0h0g0g0 f0f0e0e0 d0d0c0c0 b0b0a0a0) -> (hhhhgggg ffffeeee ddddcccc bbbbaaaa)
const __m128i i23 = _mm_or_si128(i2, i22);
// Shuffle low 64-bits with itself to expand from (hhhhgggg ffffeeee ddddcccc bbbbaaaa) to (dddddddd cccccccc bbbbbbbb aaaaaaaa)
const __m128i i25 = _mm_unpacklo_epi8(i23, i23);
// (dddddddd cccccccc bbbbbbbb aaaaaaaa) -> (bbbbbbbb bbbbbbbb aaaaaaaa aaaaaaaa)
const __m128i i251 = _mm_unpacklo_epi8(i25, i25);
// (dddddddd cccccccc bbbbbbbb aaaaaaaa) -> (dddddddd dddddddd cccccccc cccccccc)
const __m128i i252 = _mm_unpackhi_epi8(i25, i25);
// Shuffle hi 64-bits with itself to expand from (hhhhgggg ffffeeee ddddcccc bbbbaaaa) to (hhhhhhhh gggggggg ffffffff eeeeeeee)
const __m128i i26 = _mm_unpackhi_epi8(i23, i23);
// (hhhhhhhh gggggggg ffffffff eeeeeeee) -> (ffffffff ffffffff eeeeeeee eeeeeeee)
const __m128i i261 = _mm_unpacklo_epi8(i26, i26);
// (hhhhhhhh gggggggg ffffffff eeeeeeee) -> (hhhhhhhh hhhhhhhh gggggggg gggggggg)
const __m128i i262 = _mm_unpackhi_epi8(i26, i26);
// Now create the final output m128is to write to memory:
// _mm_and_si128(i151, kMask_x00000000ffffffff) takes i151 and masks off 1st and 3rd 32-bit words
// (BBBBBBBB BBBBBBBB AAAAAAAA AAAAAAAA) -> (00000000 BBBBBBBB 00000000 AAAAAAAA)
// _mm_and_si128(i251, kMask_xffffffff00000000) takes i251 and masks off 2nd and 4th 32-bit words
// (bbbbbbbb bbbbbbbb aaaaaaaa aaaaaaaa) -> (bbbbbbbb 00000000 aaaaaaaa 00000000)
// And last but not least, _mm_or_si128 ORs those two together, giving us the interleaving we desire:
// (00000000 BBBBBBBB 00000000 AAAAAAAA) | (bbbbbbbb 00000000 aaaaaaaa 00000000) -> (bbbbbbbb BBBBBBBB aaaaaaaa AAAAAAAA)
const __m128i o1 = _mm_or_si128(_mm_and_si128(i151, kMask_x00000000ffffffff), _mm_and_si128(i251, kMask_xffffffff00000000));
const __m128i o2 = _mm_or_si128(_mm_and_si128(i152, kMask_x00000000ffffffff), _mm_and_si128(i252, kMask_xffffffff00000000));
// These two are for the next row; same pattern as above. We batched up two rows because our input was 64 bits.
const __m128i o3 = _mm_or_si128(_mm_and_si128(i161, kMask_x00000000ffffffff), _mm_and_si128(i261, kMask_xffffffff00000000));
const __m128i o4 = _mm_or_si128(_mm_and_si128(i162, kMask_x00000000ffffffff), _mm_and_si128(i262, kMask_xffffffff00000000));
// Write row 0:
_mm_store_si128( (__m128i*)( dst+(y + iy) * width + x ), o1 );
_mm_store_si128( (__m128i*)( dst+(y + iy) * width + x + 4 ), o2 );
// Write row 1:
_mm_store_si128( (__m128i*)( dst+(y + iy+1) * width + x ), o3 );
_mm_store_si128( (__m128i*)( dst+(y + iy+1) * width + x + 4 ), o4 );
}
#if 0
// Reference C implementation:
for (int y = 0; y < height; y += 8) for (int y = 0; y < height; y += 8)
for (int x = 0; x < width; x += 8) for (int x = 0; x < width; x += 8)
for (int iy = 0; iy < 8; iy++, src += 4) for (int iy = 0; iy < 8; iy++, src += 4)
@ -968,15 +1057,22 @@ PC_TexFormat TexDecoder_Decode_RGBA(u32 * dst, const u8 * src, int width, int he
memset(dst+(y + iy) * width + x + ix * 2 , i1,4); memset(dst+(y + iy) * width + x + ix * 2 , i1,4);
memset(dst+(y + iy) * width + x + ix * 2 + 1 , i2,4); memset(dst+(y + iy) * width + x + ix * 2 + 1 , i2,4);
} }
#endif
} }
break; break;
case GX_TF_I8: // speed critical case GX_TF_I8: // speed critical
{ {
#if _M_SSE >= 0x301 // JSD optimized with SSE2 intrinsics.
// JSD: It doesn't get any faster than this, folks. // Produces an ~86% speed increase over reference C implementation.
for (int y = 0; y < height; y += 4) for (int y = 0; y < height; y += 4)
for (int x = 0; x < width; x += 8) for (int x = 0; x < width; x += 8)
{ {
// Each loop iteration processes 4 rows from 4 64-bit reads.
// TODO: is it more efficient to group the loads together sequentially and also the stores at the end?
// _mm_stream instead of _mm_store on my AMD Phenom II x410 made performance significantly WORSE, so I
// went with _mm_stores. Perhaps there is some edge case here creating the terrible performance or we're
// not aligned to 16-byte boundaries. I don't know.
__m128i *quaddst; __m128i *quaddst;
// Load 64 bits from `src` into an __m128i with upper 64 bits zeroed: (0000 0000 hgfe dcba) // Load 64 bits from `src` into an __m128i with upper 64 bits zeroed: (0000 0000 hgfe dcba)
@ -1048,7 +1144,8 @@ PC_TexFormat TexDecoder_Decode_RGBA(u32 * dst, const u8 * src, int width, int he
src += 8; src += 8;
} }
#else #if 0
// Reference C implementation
for (int y = 0; y < height; y += 4) for (int y = 0; y < height; y += 4)
for (int x = 0; x < width; x += 8) for (int x = 0; x < width; x += 8)
for (int iy = 0; iy < 4; ++iy, src += 8) for (int iy = 0; iy < 4; ++iy, src += 8)
@ -1106,7 +1203,8 @@ PC_TexFormat TexDecoder_Decode_RGBA(u32 * dst, const u8 * src, int width, int he
break; break;
case GX_TF_IA8: case GX_TF_IA8:
{ {
#if _M_SSE >= 0x301 // JSD optimized with SSE2 intrinsics.
// Produces an ~80% speed improvement over reference C implementation.
const __m128i kMask_xf0 = _mm_set_epi32(0x00000000L, 0x00000000L, 0xff00ff00L, 0xff00ff00L); const __m128i kMask_xf0 = _mm_set_epi32(0x00000000L, 0x00000000L, 0xff00ff00L, 0xff00ff00L);
const __m128i kMask_x0f = _mm_set_epi32(0x00000000L, 0x00000000L, 0x00ff00ffL, 0x00ff00ffL); const __m128i kMask_x0f = _mm_set_epi32(0x00000000L, 0x00000000L, 0x00ff00ffL, 0x00ff00ffL);
const __m128i kMask_xf000 = _mm_set_epi32(0xff000000L, 0xff000000L, 0xff000000L, 0xff000000L); const __m128i kMask_xf000 = _mm_set_epi32(0xff000000L, 0xff000000L, 0xff000000L, 0xff000000L);
@ -1154,7 +1252,8 @@ PC_TexFormat TexDecoder_Decode_RGBA(u32 * dst, const u8 * src, int width, int he
// write out the 128-bit result: // write out the 128-bit result:
_mm_store_si128( (__m128i*)(dst + (y + iy) * width + x), r1 ); _mm_store_si128( (__m128i*)(dst + (y + iy) * width + x), r1 );
} }
#else #if 0
// Reference C implementation:
for (int y = 0; y < height; y += 4) for (int y = 0; y < height; y += 4)
for (int x = 0; x < width; x += 4) for (int x = 0; x < width; x += 4)
for (int iy = 0; iy < 4; iy++, src += 8) for (int iy = 0; iy < 4; iy++, src += 8)
@ -1208,9 +1307,8 @@ PC_TexFormat TexDecoder_Decode_RGBA(u32 * dst, const u8 * src, int width, int he
break; break;
case GX_TF_RGB5A3: case GX_TF_RGB5A3:
{ {
#if _M_SSE >= 0x301 // JSD optimized with SSE2 intrinsics in 2 out of 4 cases.
// These constants are used to apply the (x & mask) operation after x has been right-shifted // Produces a ~25% speed improvement over reference C implementation.
// out of its place.
const __m128i kMask_x1f = _mm_set_epi32(0x0000001fL, 0x0000001fL, 0x0000001fL, 0x0000001fL); const __m128i kMask_x1f = _mm_set_epi32(0x0000001fL, 0x0000001fL, 0x0000001fL, 0x0000001fL);
const __m128i kMask_x0f = _mm_set_epi32(0x0000000fL, 0x0000000fL, 0x0000000fL, 0x0000000fL); const __m128i kMask_x0f = _mm_set_epi32(0x0000000fL, 0x0000000fL, 0x0000000fL, 0x0000000fL);
const __m128i kMask_x07 = _mm_set_epi32(0x00000007L, 0x00000007L, 0x00000007L, 0x00000007L); const __m128i kMask_x07 = _mm_set_epi32(0x00000007L, 0x00000007L, 0x00000007L, 0x00000007L);
@ -1415,7 +1513,8 @@ PC_TexFormat TexDecoder_Decode_RGBA(u32 * dst, const u8 * src, int width, int he
newdst[3] = r3 | (g3 << 8) | (b3 << 16) | (a3 << 24); newdst[3] = r3 | (g3 << 8) | (b3 << 16) | (a3 << 24);
} }
} }
#else #if 0
// Reference C implementation:
for (int y = 0; y < height; y += 4) for (int y = 0; y < height; y += 4)
for (int x = 0; x < width; x += 4) for (int x = 0; x < width; x += 4)
for (int iy = 0; iy < 4; iy++, src += 8) for (int iy = 0; iy < 4; iy++, src += 8)
@ -1425,6 +1524,105 @@ PC_TexFormat TexDecoder_Decode_RGBA(u32 * dst, const u8 * src, int width, int he
break; break;
case GX_TF_RGBA8: // speed critical case GX_TF_RGBA8: // speed critical
{ {
// JSD optimized with SSE2 intrinsics.
// Produces a ~68% improvement in speed over reference C implementation.
const __m128i kMask_x000f = _mm_set_epi32(0x000000FFL, 0x000000FFL, 0x000000FFL, 0x000000FFL);
const __m128i kMask_xf000 = _mm_set_epi32(0xFF000000L, 0xFF000000L, 0xFF000000L, 0xFF000000L);
const __m128i kMask_x0ff0 = _mm_set_epi32(0x00FFFF00L, 0x00FFFF00L, 0x00FFFF00L, 0x00FFFF00L);
for (int y = 0; y < height; y += 4)
for (int x = 0; x < width; x += 4, src += 64)
{
// Input is divided up into 16-bit words. The texels are split up into AR and GB components where all
// AR components come grouped up first in 32 bytes followed by the GB components in 32 bytes. We are
// processing 16 texels per each loop iteration, numbered from 0-f.
//
// Convention is:
// one byte is [component-name texel-number]
// __m128i is (4-bytes 4-bytes 4-bytes 4-bytes)
//
// Input is ([A 7][R 7][A 6][R 6] [A 5][R 5][A 4][R 4] [A 3][R 3][A 2][R 2] [A 1][R 1][A 0][R 0])
// ([A f][R f][A e][R e] [A d][R d][A c][R c] [A b][R b][A a][R a] [A 9][R 9][A 8][R 8])
// ([G 7][B 7][G 6][B 6] [G 5][B 5][G 4][B 4] [G 3][B 3][G 2][B 2] [G 1][B 1][G 0][B 0])
// ([G f][B f][G e][B e] [G d][B d][G c][B c] [G b][B b][G a][B a] [G 9][B 9][G 8][B 8])
//
// Output is (RGBA3 RGBA2 RGBA1 RGBA0)
// (RGBA7 RGBA6 RGBA5 RGBA4)
// (RGBAb RGBAa RGBA9 RGBA8)
// (RGBAf RGBAe RGBAd RGBAc)
// Loads the 1st half of AR components ([A 7][R 7][A 6][R 6] [A 5][R 5][A 4][R 4] [A 3][R 3][A 2][R 2] [A 1][R 1][A 0][R 0])
const __m128i ar0 = _mm_load_si128((__m128i*)src);
// Loads the 2nd half of AR components ([A f][R f][A e][R e] [A d][R d][A c][R c] [A b][R b][A a][R a] [A 9][R 9][A 8][R 8])
const __m128i ar1 = _mm_load_si128((__m128i*)src+1);
// Loads the 1st half of GB components ([G 7][B 7][G 6][B 6] [G 5][B 5][G 4][B 4] [G 3][B 3][G 2][B 2] [G 1][B 1][G 0][B 0])
const __m128i gb0 = _mm_load_si128((__m128i*)src+2);
// Loads the 2nd half of GB components ([G f][B f][G e][B e] [G d][B d][G c][B c] [G b][B b][G a][B a] [G 9][B 9][G 8][B 8])
const __m128i gb1 = _mm_load_si128((__m128i*)src+3);
// Expand the AR components to fill out 32-bit words:
// ([A 7][R 7][A 6][R 6] [A 5][R 5][A 4][R 4] [A 3][R 3][A 2][R 2] [A 1][R 1][A 0][R 0]) -> ([A 3][A 3][R 3][R 3] [A 2][A 2][R 2][R 2] [A 1][A 1][R 1][R 1] [A 0][A 0][R 0][R 0])
const __m128i aarr00 = _mm_unpacklo_epi8(ar0, ar0);
// ([A 7][R 7][A 6][R 6] [A 5][R 5][A 4][R 4] [A 3][R 3][A 2][R 2] [A 1][R 1][A 0][R 0]) -> ([A 7][A 7][R 7][R 7] [A 6][A 6][R 6][R 6] [A 5][A 5][R 5][R 5] [A 4][A 4][R 4][R 4])
const __m128i aarr01 = _mm_unpackhi_epi8(ar0, ar0);
// ([A f][R f][A e][R e] [A d][R d][A c][R c] [A b][R b][A a][R a] [A 9][R 9][A 8][R 8]) -> ([A b][A b][R b][R b] [A a][A a][R a][R a] [A 9][A 9][R 9][R 9] [A 8][A 8][R 8][R 8])
const __m128i aarr10 = _mm_unpacklo_epi8(ar1, ar1);
// ([A f][R f][A e][R e] [A d][R d][A c][R c] [A b][R b][A a][R a] [A 9][R 9][A 8][R 8]) -> ([A f][A f][R f][R f] [A e][A e][R e][R e] [A d][A d][R d][R d] [A c][A c][R c][R c])
const __m128i aarr11 = _mm_unpackhi_epi8(ar1, ar1);
// Move A right 16 bits and mask off everything but the lowest 8 bits to get A in its final place:
const __m128i ___a00 = _mm_and_si128(_mm_srli_epi32(aarr00, 16), kMask_x000f);
// Move R left 16 bits and mask off everything but the highest 8 bits to get R in its final place:
const __m128i r___00 = _mm_and_si128(_mm_slli_epi32(aarr00, 16), kMask_xf000);
// OR the two together to get R and A in their final places:
const __m128i r__a00 = _mm_or_si128(r___00, ___a00);
const __m128i ___a01 = _mm_and_si128(_mm_srli_epi32(aarr01, 16), kMask_x000f);
const __m128i r___01 = _mm_and_si128(_mm_slli_epi32(aarr01, 16), kMask_xf000);
const __m128i r__a01 = _mm_or_si128(r___01, ___a01);
const __m128i ___a10 = _mm_and_si128(_mm_srli_epi32(aarr10, 16), kMask_x000f);
const __m128i r___10 = _mm_and_si128(_mm_slli_epi32(aarr10, 16), kMask_xf000);
const __m128i r__a10 = _mm_or_si128(r___10, ___a10);
const __m128i ___a11 = _mm_and_si128(_mm_srli_epi32(aarr11, 16), kMask_x000f);
const __m128i r___11 = _mm_and_si128(_mm_slli_epi32(aarr11, 16), kMask_xf000);
const __m128i r__a11 = _mm_or_si128(r___11, ___a11);
// Expand the GB components to fill out 32-bit words:
// ([G 7][B 7][G 6][B 6] [G 5][B 5][G 4][B 4] [G 3][B 3][G 2][B 2] [G 1][B 1][G 0][B 0]) -> ([G 3][G 3][B 3][B 3] [G 2][G 2][B 2][B 2] [G 1][G 1][B 1][B 1] [G 0][G 0][B 0][B 0])
const __m128i ggbb00 = _mm_unpacklo_epi8(gb0, gb0);
// ([G 7][B 7][G 6][B 6] [G 5][B 5][G 4][B 4] [G 3][B 3][G 2][B 2] [G 1][B 1][G 0][B 0]) -> ([G 7][G 7][B 7][B 7] [G 6][G 6][B 6][B 6] [G 5][G 5][B 5][B 5] [G 4][G 4][B 4][B 4])
const __m128i ggbb01 = _mm_unpackhi_epi8(gb0, gb0);
// ([G f][B f][G e][B e] [G d][B d][G c][B c] [G b][B b][G a][B a] [G 9][B 9][G 8][B 8]) -> ([G b][G b][B b][B b] [G a][G a][B a][B a] [G 9][G 9][B 9][B 9] [G 8][G 8][B 8][B 8])
const __m128i ggbb10 = _mm_unpacklo_epi8(gb1, gb1);
// ([G f][B f][G e][B e] [G d][B d][G c][B c] [G b][B b][G a][B a] [G 9][B 9][G 8][B 8]) -> ([G f][G f][B f][B f] [G e][G e][B e][B e] [G d][G d][B d][B d] [G c][G c][B c][B c])
const __m128i ggbb11 = _mm_unpackhi_epi8(gb1, gb1);
// G and B are already in perfect spots in the center, just remove the extra copies in the 1st and 4th positions:
const __m128i _gb_00 = _mm_and_si128(ggbb00, kMask_x0ff0);
const __m128i _gb_01 = _mm_and_si128(ggbb01, kMask_x0ff0);
const __m128i _gb_10 = _mm_and_si128(ggbb10, kMask_x0ff0);
const __m128i _gb_11 = _mm_and_si128(ggbb11, kMask_x0ff0);
// Now join up R__A and _GB_ to get RGBA!
const __m128i rgba00 = _mm_or_si128(r__a00, _gb_00);
const __m128i rgba01 = _mm_or_si128(r__a01, _gb_01);
const __m128i rgba10 = _mm_or_si128(r__a10, _gb_10);
const __m128i rgba11 = _mm_or_si128(r__a11, _gb_11);
// Write em out!
__m128i *dst128 = (__m128i*)( dst + (y + 0) * width + x );
_mm_store_si128(dst128, rgba00);
dst128 = (__m128i*)( dst + (y + 1) * width + x );
_mm_store_si128(dst128, rgba01);
dst128 = (__m128i*)( dst + (y + 2) * width + x );
_mm_store_si128(dst128, rgba10);
dst128 = (__m128i*)( dst + (y + 3) * width + x );
_mm_store_si128(dst128, rgba11);
}
#if 0
// Reference C implementation.
for (int y = 0; y < height; y += 4) for (int y = 0; y < height; y += 4)
for (int x = 0; x < width; x += 4) for (int x = 0; x < width; x += 4)
{ {
@ -1432,6 +1630,7 @@ PC_TexFormat TexDecoder_Decode_RGBA(u32 * dst, const u8 * src, int width, int he
decodebytesARGB8_4ToRgba(dst + (y+iy)*width + x, (u16*)src + 4 * iy, (u16*)src + 4 * iy + 16); decodebytesARGB8_4ToRgba(dst + (y+iy)*width + x, (u16*)src + 4 * iy, (u16*)src + 4 * iy + 16);
src += 64; src += 64;
} }
#endif
} }
break; break;
case GX_TF_CMPR: // speed critical case GX_TF_CMPR: // speed critical