export declare const speckleSaoFrag = "\n\t\t#include \n\t\tvarying vec2 vUv;\n\t\t#if DIFFUSE_TEXTURE == 1\n\t\tuniform sampler2D tDiffuse;\n\t\t#endif\n\t\tuniform sampler2D tDepth;\n\t\t#if NORMAL_TEXTURE == 1\n\t\tuniform sampler2D tNormal;\n\t\t#endif\n\t\tuniform float cameraNear;\n\t\tuniform float cameraFar;\n\t\tuniform mat4 cameraProjectionMatrix;\n\t\tuniform mat4 cameraInverseProjectionMatrix;\n\t\tuniform float scale;\n\t\tuniform float intensity;\n\t\tuniform float bias;\n\t\tuniform float kernelRadius;\n\t\tuniform float minResolution;\n\t\tuniform vec2 size;\n\t\tuniform float randomSeed;\n\t\t// RGBA depth\n\t\t#include \n\t\tvec4 getDefaultColor( const in vec2 screenPosition ) {\n\t\t\t#if DIFFUSE_TEXTURE == 1\n\t\t\treturn texture2D( tDiffuse, vUv );\n\t\t\t#else\n\t\t\treturn vec4( 1.0 );\n\t\t\t#endif\n\t\t}\n\t\tfloat getDepth( const in vec2 screenPosition ) {\n\t\t\t#if DEPTH_PACKING == 1\n\t\t\treturn unpackRGBAToDepth( texture2D( tDepth, screenPosition ) );\n\t\t\t#else\n\t\t\treturn texture2D( tDepth, screenPosition ).x;\n\t\t\t#endif\n\t\t}\n\t\tfloat getViewZ( const in float depth ) {\n\t\t\t#if PERSPECTIVE_CAMERA == 1\n\t\t\treturn perspectiveDepthToViewZ( depth, cameraNear, cameraFar );\n\t\t\t#else\n\t\t\treturn orthographicDepthToViewZ( depth, cameraNear, cameraFar );\n\t\t\t#endif\n\t\t}\n\t\tvec3 getViewPosition( const in vec2 screenPosition, const in float depth, const in float viewZ ) {\n\t\t\tfloat clipW = cameraProjectionMatrix[2][3] * viewZ + cameraProjectionMatrix[3][3];\n\t\t\tvec4 clipPosition = vec4( ( vec3( screenPosition, depth ) - 0.5 ) * 2.0, 1.0 );\n\t\t\tclipPosition *= clipW; // unprojection.\n\t\t\treturn ( cameraInverseProjectionMatrix * clipPosition ).xyz;\n\t\t}\n\n\t\t//https://wickedengine.net/2019/09/22/improved-normal-reconstruction-from-depth/\n\t\tvec3 viewNormalImproved(in vec2 uv, in vec3 origin)\n\t\t{\t\n\t\t\thighp vec2 dd = abs(vec2(1./size.x, 1./size.y));\n\t\t\thighp vec2 ddx = vec2(dd.x, 0.);\n\t\t\thighp vec2 ddy = vec2(0., dd.y);\n\n\t\t\tfloat sampleDepth = getDepth( uv - ddy );\n\t\t\tfloat sampleViewZ = getViewZ( sampleDepth );\n\t\t\thighp vec3 top = getViewPosition( uv - ddy, sampleDepth, sampleViewZ );\n\n\t\t\tsampleDepth = getDepth( uv + ddy );\n\t\t\tsampleViewZ = getViewZ( sampleDepth );\n\t\t\thighp vec3 bottom = getViewPosition( uv + ddy, sampleDepth, sampleViewZ );\n\n\t\t\thighp vec3 center = origin;\n\t\t\t\n\t\t\tsampleDepth = getDepth( uv - ddx );\n\t\t\tsampleViewZ = getViewZ( sampleDepth );\n\t\t\thighp vec3 left = getViewPosition( uv - ddx, sampleDepth, sampleViewZ );\n\n\t\t\tsampleDepth = getDepth( uv + ddx );\n\t\t\tsampleViewZ = getViewZ( sampleDepth );\n\t\t\thighp vec3 right = getViewPosition( uv + ddx, sampleDepth, sampleViewZ );\n\n\t\t\t // get the difference between the current and each offset position\n\t\t\tvec3 l = center - left;\n\t\t\tvec3 r = right - center;\n\t\t\tvec3 d = center - top;\n\t\t\tvec3 u = bottom - center;\n\n\t\t\t// pick horizontal and vertical diff with the smallest z difference\n\t\t\tvec3 hDeriv = abs(l.z) < abs(r.z) ? l : r;\n\t\t\tvec3 vDeriv = abs(d.z) < abs(u.z) ? d : u;\n\n\t\t\t// get view space normal from the cross product of the two smallest offsets\n\t\t\tvec3 viewNormal = normalize(cross(hDeriv, vDeriv));\n\n\t\t\treturn viewNormal;\n\t\t}\n\n\t\tvec3 viewNormalAccurate(in vec2 uv, in vec3 origin, in float centerDepth) {\n\t\t\thighp vec2 dd = abs(vec2(1./size.x, 1./size.y));\n\t\t\thighp vec2 ddx = vec2(dd.x, 0.);\n\t\t\thighp vec2 ddy = vec2(0., dd.y);\n\n\t\t\tfloat sampleDepth = getDepth( uv - ddy );\n\t\t\tfloat sampleViewZ = getViewZ( sampleDepth );\n\t\t\thighp vec3 top = getViewPosition( uv - ddy, sampleDepth, sampleViewZ );\n\n\t\t\tsampleDepth = getDepth( uv + ddy );\n\t\t\tsampleViewZ = getViewZ( sampleDepth );\n\t\t\thighp vec3 bottom = getViewPosition( uv + ddy, sampleDepth, sampleViewZ );\n\n\t\t\thighp vec3 center = origin;\n\t\t\t\n\t\t\tsampleDepth = getDepth( uv - ddx );\n\t\t\tsampleViewZ = getViewZ( sampleDepth );\n\t\t\thighp vec3 left = getViewPosition( uv - ddx, sampleDepth, sampleViewZ );\n\n\t\t\tsampleDepth = getDepth( uv + ddx );\n\t\t\tsampleViewZ = getViewZ( sampleDepth );\n\t\t\thighp vec3 right = getViewPosition( uv + ddx, sampleDepth, sampleViewZ );\n\n\t\t\t // get the difference between the current and each offset position\n\t\t\tvec3 l = center - left;\n\t\t\tvec3 r = right - center;\n\t\t\tvec3 d = center - top;\n\t\t\tvec3 u = bottom - center;\n\n\t\t\t// get depth values at 1 & 2 pixels offsets from current along the horizontal axis\n\t\t\tvec4 H = vec4(\n\t\t\t\tgetDepth(uv - ddx),\n\t\t\t\tgetDepth(uv + ddx),\n\t\t\t\tgetDepth(uv - 2. * ddx),\n\t\t\t\tgetDepth(uv + 2. * ddx)\n\t\t\t);\n\n\t\t\t// get depth values at 1 & 2 pixels offsets from current along the vertical axis\n\t\t\tvec4 V = vec4(\n\t\t\t\tgetDepth(uv - ddy),\n\t\t\t\tgetDepth(uv + ddy),\n\t\t\t\tgetDepth(uv - 2. * ddy),\n\t\t\t\tgetDepth(uv + 2. * ddy)\n\t\t\t);\n\n\t\t\t// current pixel's depth difference from slope of offset depth samples\n\t\t\t// differs from original article because we're using non-linear depth values\n\t\t\t// see article's comments\n\t\t\tvec2 he = abs((2. * H.xy - H.zw) - centerDepth);\n\t\t\tvec2 ve = abs((2. * V.xy - V.zw) - centerDepth);\n\n\t\t\t// pick horizontal and vertical diff with the smallest depth difference from slopes\n\t\t\tvec3 hDeriv = he.x < he.y ? l : r;\n\t\t\tvec3 vDeriv = ve.x < ve.y ? d : u;\n\n\t\t\t// get view space normal from the cross product of the best derivatives\n\t\t\tvec3 viewNormal = normalize(cross(hDeriv, vDeriv));\n\n\t\t\treturn viewNormal;\n\n\t\t}\n\n\t\tvec3 getViewNormal( const in vec3 viewPosition, const in vec2 screenPosition, in float centerDepth ) {\n\t\t\t#if NORMAL_TEXTURE == 1\n\t\t\t\treturn unpackRGBToNormal( texture2D( tNormal, screenPosition ).xyz );\n\t\t\t#elif IMPROVED_NORMAL_RECONSTRUCTION == 1\n\t\t\t\treturn viewNormalImproved(screenPosition, viewPosition);\n\t\t\t#elif ACCURATE_NORMAL_RECONSTRUCTION == 1\n\t\t\t\treturn viewNormalAccurate(screenPosition, viewPosition, centerDepth);\n\t\t\t#else\n\t\t\t\treturn normalize( cross( dFdx( viewPosition ), dFdy( viewPosition ) ) );\n\t\t\t#endif\n\t\t}\n\n\t\tfloat scaleDividedByCameraFar;\n\t\tfloat minResolutionMultipliedByCameraFar;\n\t\tfloat getOcclusion( const in vec3 centerViewPosition, const in vec3 centerViewNormal, const in vec3 sampleViewPosition ) {\n\t\t\tvec3 viewDelta = sampleViewPosition - centerViewPosition;\n\t\t\tfloat viewDistance = length( viewDelta );\n\t\t\tfloat scaledScreenDistance = scaleDividedByCameraFar * viewDistance;\n\t\t\treturn max(0.0, (dot(centerViewNormal, viewDelta) - minResolutionMultipliedByCameraFar) / scaledScreenDistance - bias) / (1.0 + pow2( scaledScreenDistance ) );\n\t\t}\n\t\t// moving costly divides into consts\n\t\tconst float ANGLE_STEP = PI2 * float( NUM_RINGS ) / float( NUM_SAMPLES );\n\t\tconst float INV_NUM_SAMPLES = 1.0 / float( NUM_SAMPLES );\n\t\tfloat getAmbientOcclusion( const in vec3 centerViewPosition, in float centerDepth ) {\n\t\t\t// precompute some variables require in getOcclusion.\n\t\t\tscaleDividedByCameraFar = scale / cameraFar;\n\t\t\tminResolutionMultipliedByCameraFar = minResolution * cameraFar;\n\t\t\tvec3 centerViewNormal = getViewNormal( centerViewPosition, vUv, centerDepth );\n\t\t\t// jsfiddle that shows sample pattern: https://jsfiddle.net/a16ff1p7/\n\t\t\tfloat angle = rand( vUv + randomSeed ) * PI2;\n\t\t\tvec2 radius = vec2( kernelRadius * INV_NUM_SAMPLES ) / size;\n\t\t\tvec2 radiusStep = radius;\n\t\t\tfloat occlusionSum = 0.0;\n\t\t\tfloat weightSum = 0.0;\n\t\t\tfor( int i = 0; i < NUM_SAMPLES; i ++ ) {\n\t\t\t\tvec2 sampleUv = vUv + vec2( cos( angle ), sin( angle ) ) * radius;\n\t\t\t\tradius += radiusStep;\n\t\t\t\tangle += ANGLE_STEP;\n\t\t\t\tfloat sampleDepth = getDepth( sampleUv );\n\t\t\t\tif( sampleDepth >= ( 1.0 - EPSILON ) ) {\n\t\t\t\t\tcontinue;\n\t\t\t\t}\n\t\t\t\tfloat sampleViewZ = getViewZ( sampleDepth );\n\t\t\t\tvec3 sampleViewPosition = getViewPosition( sampleUv, sampleDepth, sampleViewZ );\n\t\t\t\tocclusionSum += getOcclusion( centerViewPosition, centerViewNormal, sampleViewPosition );\n\t\t\t\tweightSum += 1.0;\n\t\t\t}\n\t\t\tif( weightSum == 0.0 ) discard;\n\t\t\treturn occlusionSum * ( intensity / weightSum );\n\t\t}\n\t\tvoid main() {\n\t\t\tfloat centerDepth = getDepth( vUv );\n\t\t\tif( centerDepth >= ( 1.0 - EPSILON ) ) {\n\t\t\t\tdiscard;\n\t\t\t}\n\t\t\tfloat centerViewZ = getViewZ( centerDepth );\n\t\t\tvec3 viewPosition = getViewPosition( vUv, centerDepth, centerViewZ );\n\n\t\t\t#ifdef OUTPUT_RECONSTRUCTED_NORMALS\n\t\t\t\tvec3 normal;\n\t\t\t\t#if IMPROVED_NORMAL_RECONSTRUCTION == 1\n\t\t\t\t\tnormal = viewNormalImproved(vUv, viewPosition);\n\t\t\t\t#elif ACCURATE_NORMAL_RECONSTRUCTION == 1\n\t\t\t\t\tnormal = viewNormalAccurate(vUv, viewPosition, centerDepth);\n\t\t\t\t#else\n\t\t\t\t\tnormal = normalize( cross( dFdx( viewPosition ), dFdy( viewPosition ) ) );\n\t\t\t\t#endif\n\t\t\t\tgl_FragColor.rgb = packNormalToRGB(normal);\n\t\t\t\tgl_FragColor.a = 1.;\n\t\t\t\treturn;\n\t\t\t#endif\n\t\t\t\n\t\t\tfloat ambientOcclusion = getAmbientOcclusion( viewPosition, centerDepth );\n\t\t\tgl_FragColor = getDefaultColor( vUv );\n\t\t\tgl_FragColor.xyz *= 1. - ambientOcclusion;\n\t\t}";