cemu_graphic_packs/Enhancements/Splatoon_Shadows/b4196905d6fff2bb_0000000000001e69_ps.txt
Crementif 306da0b802
Update every graphic pack to V4
Since it's not possible to update 300+ shaders manually and automation was possible, I thought that I'd take the honor and create a script that's able to automatically convert all of the shaders to be cross-compatible with Vulkan. And change the graphic pack versions to version 4 of course.

Also, the script has some nifty testing code which compiled every shader as OpenGL and Vulkan, but for that see the details that I've written below.

**Here's the script that I've made to do all of this. No manual edits were needed:**
https://gist.github.com/Crementif/8d98a855b95f219d95298fb3db99deae
2019-11-29 04:36:05 +01:00

256 lines
8.6 KiB
Plaintext

#version 420
#extension GL_ARB_texture_gather : enable
#extension GL_ARB_separate_shader_objects : enable
#ifdef VULKAN
#define ATTR_LAYOUT(__vkSet, __location) layout(set = __vkSet, location = __location)
#define UNIFORM_BUFFER_LAYOUT(__glLocation, __vkSet, __vkLocation) layout(set = __vkSet, binding = __vkLocation, std140)
#define TEXTURE_LAYOUT(__glLocation, __vkSet, __vkLocation) layout(set = __vkSet, binding = __vkLocation)
#define SET_POSITION(_v) gl_Position = _v; gl_Position.z = (gl_Position.z + gl_Position.w) / 2.0
#define GET_FRAGCOORD() vec4(gl_FragCoord.xy*uf_fragCoordScale.xy,gl_FragCoord.zw)
#define gl_VertexID gl_VertexIndex
#define gl_InstanceID gl_InstanceIndex
#else
#define ATTR_LAYOUT(__vkSet, __location) layout(location = __location)
#define UNIFORM_BUFFER_LAYOUT(__glLocation, __vkSet, __vkLocation) layout(binding = __glLocation, std140)
#define TEXTURE_LAYOUT(__glLocation, __vkSet, __vkLocation) layout(binding = __glLocation)
#define SET_POSITION(_v) gl_Position = _v
#define GET_FRAGCOORD() vec4(gl_FragCoord.xy*uf_fragCoordScale,gl_FragCoord.zw)
#endif
// This shaders was auto-converted from OpenGL to Cemu so expect weird code and possible errors.
// shader b4196905d6fff2bb
// Used for: Fixing high-res shadows on models at end-screen of campaign/matches
const float resScale = $shadowRes;
#ifdef VULKAN
layout(set = 1, binding = 3) uniform ufBlock
{
uniform ivec4 uf_remappedPS[12];
uniform vec4 uf_fragCoordScale;
};
#else
uniform ivec4 uf_remappedPS[12];
uniform vec2 uf_fragCoordScale;
#endif
TEXTURE_LAYOUT(0, 1, 0) uniform sampler2D textureUnitPS0;// Tex0 addr 0xf4240800 res 1280x720x1 dim 1 tm: 4 format 080e compSel: 0 4 4 5 mipView: 0x0 (num 0x1
TEXTURE_LAYOUT(1, 1, 1) uniform sampler2DArrayShadow textureUnitPS1;// Tex1 addr 0xf494a800 res 2048x2048x1 dim 5 tm: 4 format 0005 compSel: 0 4 4 5 mipView: 0x0 (num 0x1
TEXTURE_LAYOUT(2, 1, 2) uniform sampler2D textureUnitPS2;// Tex2 addr 0x17f95000 res 4x4x1 dim 1 tm: 2 format 001a compSel: 0 1 2 3 mipView: 0x0 (num 0x1
layout(location = 0) noperspective in vec4 passParameterSem0;
layout(location = 1) noperspective in vec4 passParameterSem4;
layout(location = 0) out vec4 passPixelColor0;
int clampFI32(int v)
{
if( v == 0x7FFFFFFF )
return floatBitsToInt(1.0);
else if( v == 0xFFFFFFFF )
return floatBitsToInt(0.0);
return floatBitsToInt(clamp(intBitsToFloat(v), 0.0, 1.0));
}
float mul_nonIEEE(float a, float b){ if( a == 0.0 || b == 0.0 ) return 0.0; return a*b; }
void main()
{
vec4 R0f = vec4(0.0);
vec4 R1f = vec4(0.0);
vec4 R2f = vec4(0.0);
vec4 R3f = vec4(0.0);
vec4 R4f = vec4(0.0);
vec4 R5f = vec4(0.0);
vec4 R6f = vec4(0.0);
vec4 R123f = vec4(0.0);
vec4 R124f = vec4(0.0);
vec4 R125f = vec4(0.0);
vec4 R126f = vec4(0.0);
vec4 R127f = vec4(0.0);
float backupReg0f, backupReg1f, backupReg2f, backupReg3f, backupReg4f;
vec4 PV0f = vec4(0.0), PV1f = vec4(0.0);
float PS0f = 0.0, PS1f = 0.0;
vec4 tempf = vec4(0.0);
float tempResultf;
int tempResulti;
ivec4 ARi = ivec4(0);
bool predResult = true;
vec3 cubeMapSTM;
int cubeMapFaceId;
R0f = passParameterSem0;
R1f = passParameterSem4;
R0f.w = (texture(textureUnitPS0, R1f.xy).x);
// 0
R125f.x = 1.0;
PV0f.y = 0.0; // = "fix"
PV0f.y /= 2.0;
PV0f.z = intBitsToFloat(uf_remappedPS[0].z);
PV0f.z /= 2.0;
R123f.w = (mul_nonIEEE(-(R0f.w),intBitsToFloat(uf_remappedPS[1].y)) + 1.0);
PV0f.w = R123f.w;
R127f.x = -(intBitsToFloat(uf_remappedPS[0].z)/resScale);
R127f.x /= 2.0;
PS0f = R127f.x;
// 1
R126f.x = 0.0;
R127f.y = mul_nonIEEE(intBitsToFloat(uf_remappedPS[0].x)/resScale, PV0f.y);
R126f.z = mul_nonIEEE(intBitsToFloat(uf_remappedPS[0].x)/resScale, PV0f.z);
PV1f.w = -(intBitsToFloat(uf_remappedPS[0].w)/resScale);
PV1f.w /= 2.0;
PS1f = 1.0 / PV0f.w;
// 2
backupReg0f = R127f.x;
R127f.x = mul_nonIEEE(intBitsToFloat(uf_remappedPS[0].x)/resScale, backupReg0f);
PV0f.y = -(intBitsToFloat(uf_remappedPS[1].x)) * PS1f;
R1f.z = roundEven(0.0);
PV0f.z = R1f.z;
R126f.w = mul_nonIEEE(intBitsToFloat(uf_remappedPS[0].x)/resScale, PV1f.w);
// 3
R5f.x = mul_nonIEEE(R0f.x, PV0f.y);
PV1f.x = R5f.x;
R5f.y = mul_nonIEEE(R0f.y, PV0f.y);
PV1f.y = R5f.y;
R124f.z = PV0f.y;
PV1f.z = R124f.z;
R125f.w = -(PV0f.y) + -(intBitsToFloat(uf_remappedPS[2].x));
R2f.z = PV0f.z;
PS1f = R2f.z;
// 4
tempf.x = dot(vec4(PV1f.x,PV1f.y,PV1f.z,R125f.x),vec4(intBitsToFloat(uf_remappedPS[3].x),intBitsToFloat(uf_remappedPS[3].y),intBitsToFloat(uf_remappedPS[3].z),intBitsToFloat(uf_remappedPS[3].w)));
PV0f.x = tempf.x;
PV0f.y = tempf.x;
PV0f.z = tempf.x;
PV0f.w = tempf.x;
R3f.z = R1f.z;
PS0f = R3f.z;
// 5
tempf.x = dot(vec4(R5f.x,R5f.y,R124f.z,R125f.x),vec4(intBitsToFloat(uf_remappedPS[4].x),intBitsToFloat(uf_remappedPS[4].y),intBitsToFloat(uf_remappedPS[4].z),intBitsToFloat(uf_remappedPS[4].w)));
PV1f.x = tempf.x;
PV1f.y = tempf.x;
PV1f.z = tempf.x;
PV1f.w = tempf.x;
R125f.z = 1.0 / PV0f.x;
PS1f = R125f.z;
// 6
tempf.x = dot(vec4(R5f.x,R5f.y,R124f.z,R125f.x),vec4(intBitsToFloat(uf_remappedPS[5].x),intBitsToFloat(uf_remappedPS[5].y),intBitsToFloat(uf_remappedPS[5].z),intBitsToFloat(uf_remappedPS[5].w)));
PV0f.x = tempf.x;
PV0f.y = tempf.x;
PV0f.z = tempf.x;
PV0f.w = tempf.x;
R127f.z = 1.0 / PV1f.x;
PS0f = R127f.z;
// 7
tempf.x = dot(vec4(R5f.x,R5f.y,R124f.z,R125f.x),vec4(intBitsToFloat(uf_remappedPS[6].x),intBitsToFloat(uf_remappedPS[6].y),intBitsToFloat(uf_remappedPS[6].z),intBitsToFloat(uf_remappedPS[6].w)));
PV1f.x = tempf.x;
PV1f.y = tempf.x;
PV1f.z = tempf.x;
PV1f.w = tempf.x;
R127f.w = PV0f.x * R125f.z;
PS1f = R127f.w;
// 8
tempf.x = dot(vec4(R5f.x,R5f.y,R124f.z,R125f.x),vec4(intBitsToFloat(uf_remappedPS[7].x),intBitsToFloat(uf_remappedPS[7].y),intBitsToFloat(uf_remappedPS[7].z),intBitsToFloat(uf_remappedPS[7].w)));
PV0f.x = tempf.x;
PV0f.y = tempf.x;
PV0f.z = tempf.x;
PV0f.w = tempf.x;
R0f.x = mul_nonIEEE(PV1f.x, R127f.z);
PS0f = R0f.x;
// 9
backupReg0f = R127f.x;
R127f.x = mul_nonIEEE(R127f.w, R127f.y);
PV1f.x = R127f.x;
R0f.y = mul_nonIEEE(PV0f.x, R127f.z);
R127f.z = R127f.w + R126f.x;
R124f.w = mul_nonIEEE(R127f.w, R126f.z);
R126f.x = mul_nonIEEE(R127f.w, backupReg0f);
PS1f = R126f.x;
// 10
tempf.x = dot(vec4(R5f.x,R5f.y,R124f.z,R125f.x),vec4(intBitsToFloat(uf_remappedPS[8].x),intBitsToFloat(uf_remappedPS[8].y),intBitsToFloat(uf_remappedPS[8].z),intBitsToFloat(uf_remappedPS[8].w)));
PV0f.x = tempf.x;
PV0f.y = tempf.x;
PV0f.z = tempf.x;
PV0f.w = tempf.x;
R126f.z = PV1f.x;
PS0f = R126f.z;
// 11
tempf.x = dot(vec4(R5f.x,R5f.y,R124f.z,R125f.x),vec4(intBitsToFloat(uf_remappedPS[9].x),intBitsToFloat(uf_remappedPS[9].y),intBitsToFloat(uf_remappedPS[9].z),intBitsToFloat(uf_remappedPS[9].w)));
PV1f.x = tempf.x;
PV1f.y = tempf.x;
PV1f.z = tempf.x;
PV1f.w = tempf.x;
R127f.y = PV0f.x * R125f.z;
PS1f = R127f.y;
// 12
R124f.x = PV1f.x * R125f.z;
PV0f.x = R124f.x;
R126f.y = mul_nonIEEE(R127f.w, R126f.w);
R4f.z = R1f.z;
R127f.w = R127f.z;
R127f.w = clamp(R127f.w, 0.0, 1.0);
PV0f.w = R127f.w;
R1f.x = PS1f + R124f.w;
PS0f = R1f.x;
// 13
R2f.x = R127f.y + R126f.x;
R1f.y = PV0f.x + R126f.z;
R5f.z = mul_nonIEEE(intBitsToFloat(uf_remappedPS[2].z), R125f.w);
R5f.z = clamp(R5f.z, 0.0, 1.0);
R1f.w = PV0f.w;
R2f.y = PV0f.x + R127f.x;
PS1f = R2f.y;
// 14
R3f.x = R127f.y + R124f.w;
R3f.y = R124f.x + R126f.y;
PV0f.z = mul_nonIEEE(R125f.x, intBitsToFloat(uf_remappedPS[10].w));
R2f.w = R127f.w;
R3f.w = R127f.w;
PS0f = R3f.w;
// 15
R4f.x = R127f.y + R126f.x;
R4f.y = R124f.x + R126f.y;
R0f.z = (mul_nonIEEE(R124f.z,intBitsToFloat(uf_remappedPS[10].z)) + PV0f.z);
R4f.w = R127f.w;
R6f.x = intBitsToFloat(uf_remappedPS[11].x);
PS1f = R6f.x;
R0f.xy = (texture(textureUnitPS2, R0f.xy).xy);
R1f.z = (texture(textureUnitPS1, vec4(R1f.x,R1f.y,R1f.z,R1f.w)));
R2f.y = (texture(textureUnitPS1, vec4(R2f.x,R2f.y,R2f.z,R2f.w)));
R3f.x = (texture(textureUnitPS1, vec4(R3f.x,R3f.y,R3f.z,R3f.w)));
R4f.w = (texture(textureUnitPS1, vec4(R4f.x,R4f.y,R4f.z,R4f.w)));
// 0
backupReg0f = R0f.z;
backupReg1f = R0f.x;
backupReg1f = R0f.x;
backupReg2f = R0f.y;
tempf.x = dot(vec4(R5f.x,R5f.y,backupReg0f,0.0),vec4(intBitsToFloat(uf_remappedPS[10].x),intBitsToFloat(uf_remappedPS[10].y),1.0,0.0));
PV0f.x = tempf.x;
PV0f.y = tempf.x;
PV0f.z = tempf.x;
PV0f.w = tempf.x;
R127f.w = (mul_nonIEEE(-(backupReg1f),backupReg1f) + backupReg2f);
PS0f = R127f.w;
// 1
PV1f.z = R1f.z + R2f.y;
PV1f.z /= 2.0;
PV1f.w = min(PV0f.x, 1.0);
R6f.w = intBitsToFloat(uf_remappedPS[11].w);
PS1f = R6f.w;
// 2
R123f.y = (R3f.x * 0.5 + PV1f.z);
PV0f.y = R123f.y;
PV0f.z = PV1f.w + -(R0f.x);
// 3
R123f.x = (R4f.w * 0.5 + PV0f.y)/2.0;
PV1f.x = R123f.x;
R123f.y = (mul_nonIEEE(PV0f.z,PV0f.z) + R127f.w);
PV1f.y = R123f.y;
R124f.z = (0.0 >= PV0f.z)?1.0:0.0;
// 4
R6f.y = R5f.z + PV1f.x;
PS0f = 1.0 / PV1f.y;
// 5
PV1f.w = R127f.w * PS0f;
// 6
PV0f.y = max(PV1f.w, R124f.z);
// 7
PV1f.x = min(PV0f.y, 1.0);
// 8
R6f.z = R5f.z + PV1f.x;
// export
passPixelColor0 = vec4(R6f.x, R6f.y, R6f.z, R6f.w);
}