#version 420 #extension GL_ARB_texture_gather : enable #extension GL_ARB_separate_shader_objects : enable #ifdef VULKAN #define ATTR_LAYOUT(__vkSet, __location) layout(set = __vkSet, location = __location) #define UNIFORM_BUFFER_LAYOUT(__glLocation, __vkSet, __vkLocation) layout(set = __vkSet, binding = __vkLocation, std140) #define TEXTURE_LAYOUT(__glLocation, __vkSet, __vkLocation) layout(set = __vkSet, binding = __vkLocation) #define SET_POSITION(_v) gl_Position = _v; gl_Position.z = (gl_Position.z + gl_Position.w) / 2.0 #define GET_FRAGCOORD() vec4(gl_FragCoord.xy*uf_fragCoordScale.xy,gl_FragCoord.zw) #define gl_VertexID gl_VertexIndex #define gl_InstanceID gl_InstanceIndex #else #define ATTR_LAYOUT(__vkSet, __location) layout(location = __location) #define UNIFORM_BUFFER_LAYOUT(__glLocation, __vkSet, __vkLocation) layout(binding = __glLocation, std140) #define TEXTURE_LAYOUT(__glLocation, __vkSet, __vkLocation) layout(binding = __glLocation) #define SET_POSITION(_v) gl_Position = _v #define GET_FRAGCOORD() vec4(gl_FragCoord.xy*uf_fragCoordScale,gl_FragCoord.zw) #endif // This shader was automatically converted to be cross-compatible with Vulkan and OpenGL. // shader e2a33ddb22abbc43 // Used for: Horizontal shadow edge smoothing float resXScale = float($width)/float($gameWidth); #ifdef VULKAN layout(set = 0, binding = 0) uniform ufBlock { uniform ivec4 uf_remappedVS[5]; }; #else uniform ivec4 uf_remappedVS[5]; #endif ATTR_LAYOUT(0, 0) in uvec4 attrDataSem0; ATTR_LAYOUT(0, 1) in uvec4 attrDataSem5; out gl_PerVertex { vec4 gl_Position; float gl_PointSize; }; layout(location = 0) out vec4 passParameterSem133; layout(location = 1) out vec4 passParameterSem134; layout(location = 2) out vec4 passParameterSem135; int clampFI32(int v) { if( v == 0x7FFFFFFF ) return floatBitsToInt(1.0); else if( v == 0xFFFFFFFF ) return floatBitsToInt(0.0); return floatBitsToInt(clamp(intBitsToFloat(v), 0.0, 1.0)); } float mul_nonIEEE(float a, float b){return mix(0.0, a*b, (a != 0.0) && (b != 0.0));} void main() { vec4 R0f = vec4(0.0); vec4 R1f = vec4(0.0); vec4 R2f = vec4(0.0); vec4 R3f = vec4(0.0); vec4 R123f = vec4(0.0); vec4 R125f = vec4(0.0); vec4 R126f = vec4(0.0); vec4 R127f = vec4(0.0); uvec4 attrDecoder; float backupReg0f, backupReg1f, backupReg2f, backupReg3f, backupReg4f; vec4 PV0f = vec4(0.0), PV1f = vec4(0.0); float PS0f = 0.0, PS1f = 0.0; vec4 tempf = vec4(0.0); float tempResultf; int tempResulti; ivec4 ARi = ivec4(0); bool predResult = true; vec3 cubeMapSTM; int cubeMapFaceId; R0f = floatBitsToInt(ivec4(gl_VertexID, 0, 0, gl_InstanceID)); attrDecoder.xyz = attrDataSem0.xyz; attrDecoder.xyz = (attrDecoder.xyz>>24)|((attrDecoder.xyz>>8)&0xFF00)|((attrDecoder.xyz<<8)&0xFF0000)|((attrDecoder.xyz<<24)); attrDecoder.w = 0; R1f = vec4(intBitsToFloat(int(attrDecoder.x)), intBitsToFloat(int(attrDecoder.y)), intBitsToFloat(int(attrDecoder.z)), intBitsToFloat(floatBitsToInt(1.0))); attrDecoder.xy = attrDataSem5.xy; attrDecoder.xy = (attrDecoder.xy>>24)|((attrDecoder.xy>>8)&0xFF00)|((attrDecoder.xy<<8)&0xFF0000)|((attrDecoder.xy<<24)); attrDecoder.z = 0; attrDecoder.w = 0; R2f = vec4(intBitsToFloat(int(attrDecoder.x)), intBitsToFloat(int(attrDecoder.y)), intBitsToFloat(floatBitsToInt(0.0)), intBitsToFloat(floatBitsToInt(1.0))); // 0 PV0f.x = mul_nonIEEE(R1f.w, intBitsToFloat(uf_remappedVS[0].z)); PV0f.y = mul_nonIEEE(R1f.w, intBitsToFloat(uf_remappedVS[0].y)); PV0f.z = mul_nonIEEE(R1f.w, intBitsToFloat(uf_remappedVS[0].x)); PV0f.w = mul_nonIEEE(R1f.w, intBitsToFloat(uf_remappedVS[0].w)); R127f.x = 0.0; PS0f = R127f.x; // 1 R123f.x = (mul_nonIEEE(R1f.z,intBitsToFloat(uf_remappedVS[1].z)) + PV0f.x); PV1f.x = R123f.x; R123f.y = (mul_nonIEEE(R1f.z,intBitsToFloat(uf_remappedVS[1].y)) + PV0f.y); PV1f.y = R123f.y; R123f.z = (mul_nonIEEE(R1f.z,intBitsToFloat(uf_remappedVS[1].x)) + PV0f.z); PV1f.z = R123f.z; R123f.w = (mul_nonIEEE(R1f.z,intBitsToFloat(uf_remappedVS[1].w)) + PV0f.w); PV1f.w = R123f.w; R126f.y = 0.0; PS1f = R126f.y; // 2 R123f.x = (mul_nonIEEE(R1f.y,intBitsToFloat(uf_remappedVS[2].z)) + PV1f.x); PV0f.x = R123f.x; R123f.y = (mul_nonIEEE(R1f.y,intBitsToFloat(uf_remappedVS[2].y)) + PV1f.y); PV0f.y = R123f.y; R123f.z = (mul_nonIEEE(R1f.y,intBitsToFloat(uf_remappedVS[2].x)) + PV1f.z); PV0f.z = R123f.z; R123f.w = (mul_nonIEEE(R1f.y,intBitsToFloat(uf_remappedVS[2].w)) + PV1f.w); PV0f.w = R123f.w; R125f.y = 0.0; PS0f = R125f.y; // 3 backupReg0f = R1f.x; backupReg0f = R1f.x; backupReg0f = R1f.x; backupReg0f = R1f.x; R1f.x = (mul_nonIEEE(backupReg0f,intBitsToFloat(uf_remappedVS[3].x)) + PV0f.z); R1f.y = (mul_nonIEEE(backupReg0f,intBitsToFloat(uf_remappedVS[3].y)) + PV0f.y); R1f.z = (mul_nonIEEE(backupReg0f,intBitsToFloat(uf_remappedVS[3].z)) + PV0f.x); R1f.w = (mul_nonIEEE(backupReg0f,intBitsToFloat(uf_remappedVS[3].w)) + PV0f.w); R0f.w = R2f.y + R127f.x; PS1f = R0f.w; // 4 R127f.x = intBitsToFloat(uf_remappedVS[4].z)/resXScale * -(1.0); R127f.y = -(intBitsToFloat(uf_remappedVS[4].z)/resXScale); R127f.y *= 2.0; PV0f.z = -(intBitsToFloat(uf_remappedVS[4].z)/resXScale); PV0f.z *= 4.0; PV0f.w = intBitsToFloat(uf_remappedVS[4].z)/resXScale * intBitsToFloat(0xc0400000); R126f.x = intBitsToFloat(uf_remappedVS[4].z)/resXScale * 1.0; PS0f = R126f.x; // 5 R125f.x = intBitsToFloat(uf_remappedVS[4].z)/resXScale * intBitsToFloat(0x40400000); R0f.y = R2f.x + PV0f.w; R127f.z = intBitsToFloat(uf_remappedVS[4].z)/resXScale; R127f.z *= 4.0; R127f.w = intBitsToFloat(uf_remappedVS[4].z)/resXScale; R127f.w *= 2.0; R0f.x = R2f.x + PV0f.z; PS1f = R0f.x; // 6 R3f.x = R2f.x + R127f.x; R3f.y = R2f.x + R126f.y; R0f.z = R2f.x + R127f.y; R3f.w = R2f.y + R126f.y; R3f.z = R2f.x + R126f.x; PS0f = R3f.z; // 7 backupReg0f = R2f.x; backupReg0f = R2f.x; backupReg0f = R2f.x; backupReg1f = R2f.y; R2f.xyz = vec3(backupReg0f,backupReg0f,backupReg0f) + vec3(R127f.w,R125f.x,R127f.z); R2f.w = backupReg1f + R125f.y; // export SET_POSITION(vec4(R1f.x, R1f.y, R1f.z, R1f.w)); // export passParameterSem133 = vec4(R0f.x, R0f.y, R0f.z, R0f.w); // export passParameterSem134 = vec4(R3f.x, R3f.y, R3f.z, R3f.w); // export passParameterSem135 = vec4(R2f.x, R2f.y, R2f.z, R2f.w); // 0 }