@pmndrs/vanilla
Advanced tools
Comparing version 1.5.2 to 1.6.0
@@ -1,1 +0,1 @@ | ||
"use strict";Object.defineProperty(exports,"__esModule",{value:!0});var e=require("./pcss.cjs.js"),r=require("./shaderMaterial.cjs.js");require("three"),exports.pcss=e.pcss,exports.shaderMaterial=r.shaderMaterial; | ||
"use strict";Object.defineProperty(exports,"__esModule",{value:!0});var e=require("./pcss.cjs.js"),r=require("./shaderMaterial.cjs.js"),s=require("./AccumulativeShadows.cjs.js");require("three"),require("../materials/MeshDiscardMaterial.cjs.js"),exports.pcss=e.pcss,exports.shaderMaterial=r.shaderMaterial,exports.ProgressiveLightMap=s.ProgressiveLightMap,exports.SoftShadowMaterial=s.SoftShadowMaterial; |
export * from './pcss'; | ||
export * from './shaderMaterial'; | ||
export * from './AccumulativeShadows'; |
export { pcss } from './pcss.js'; | ||
export { shaderMaterial } from './shaderMaterial.js'; | ||
export { ProgressiveLightMap, SoftShadowMaterial } from './AccumulativeShadows.js'; | ||
import 'three'; | ||
import '../materials/MeshDiscardMaterial.js'; |
@@ -1,1 +0,1 @@ | ||
"use strict";Object.defineProperty(exports,"__esModule",{value:!0});var e=require("three");function n(e){if(e&&e.__esModule)return e;var n=Object.create(null);return e&&Object.keys(e).forEach((function(t){if("default"!==t){var r=Object.getOwnPropertyDescriptor(e,t);Object.defineProperty(n,t,r.get?r:{enumerable:!0,get:function(){return e[t]}})}})),n.default=e,Object.freeze(n)}var t=n(e);function r(e,n,r,o){const i=class extends t.ShaderMaterial{constructor(i={}){const a=Object.entries(e);super({uniforms:a.reduce(((e,[n,r])=>({...e,...t.UniformsUtils.clone({[n]:{value:r}})})),{}),vertexShader:n,fragmentShader:r}),this.key="",a.forEach((([e])=>Object.defineProperty(this,e,{get:()=>this.uniforms[e].value,set:n=>this.uniforms[e].value=n}))),Object.assign(this,i),o&&o(this)}};return i.key=t.MathUtils.generateUUID(),i}const o=r({},"void main() { }","void main() { gl_FragColor = vec4(0.0, 0.0, 0.0, 0.0); discard; }");class i extends t.MeshPhysicalMaterial{constructor({samples:e=6,transmissionSampler:n=!1,chromaticAberration:r=.05,transmission:o=0,_transmission:i=1,transmissionMap:a=null,roughness:s=0,thickness:l=0,thicknessMap:u=null,attenuationDistance:m=1/0,attenuationColor:c=new t.Color("white"),anisotropy:f=.1,time:h=0,distortion:v=0,distortionScale:d=.5,temporalDistortion:p=0,buffer:g=null}){super(),this.uniforms={chromaticAberration:{value:r},transmission:{value:o},_transmission:{value:i},transmissionMap:{value:a},roughness:{value:s},thickness:{value:l},thicknessMap:{value:u},attenuationDistance:{value:m},attenuationColor:{value:c},anisotropy:{value:f},time:{value:h},distortion:{value:v},distortionScale:{value:d},temporalDistortion:{value:p},buffer:{value:g}},this.onBeforeCompile=t=>{t.uniforms={...t.uniforms,...this.uniforms},n?t.defines.USE_SAMPLER="":t.defines.USE_TRANSMISSION="",t.fragmentShader="\n uniform float chromaticAberration; \n uniform float anisotropy; \n uniform float time;\n uniform float distortion;\n uniform float distortionScale;\n uniform float temporalDistortion;\n uniform sampler2D buffer;\n\n vec3 random3(vec3 c) {\n float j = 4096.0*sin(dot(c,vec3(17.0, 59.4, 15.0)));\n vec3 r;\n r.z = fract(512.0*j);\n j *= .125;\n r.x = fract(512.0*j);\n j *= .125;\n r.y = fract(512.0*j);\n return r-0.5;\n }\n\n float seed = 0.0;\n uint hash( uint x ) {\n x += ( x << 10u );\n x ^= ( x >> 6u );\n x += ( x << 3u );\n x ^= ( x >> 11u );\n x += ( x << 15u );\n return x;\n }\n\n // Compound versions of the hashing algorithm I whipped together.\n uint hash( uvec2 v ) { return hash( v.x ^ hash(v.y) ); }\n uint hash( uvec3 v ) { return hash( v.x ^ hash(v.y) ^ hash(v.z) ); }\n uint hash( uvec4 v ) { return hash( v.x ^ hash(v.y) ^ hash(v.z) ^ hash(v.w) ); }\n\n // Construct a float with half-open range [0:1] using low 23 bits.\n // All zeroes yields 0.0, all ones yields the next smallest representable value below 1.0.\n float floatConstruct( uint m ) {\n const uint ieeeMantissa = 0x007FFFFFu; // binary32 mantissa bitmask\n const uint ieeeOne = 0x3F800000u; // 1.0 in IEEE binary32\n m &= ieeeMantissa; // Keep only mantissa bits (fractional part)\n m |= ieeeOne; // Add fractional part to 1.0\n float f = uintBitsToFloat( m ); // Range [1:2]\n return f - 1.0; // Range [0:1]\n }\n\n // Pseudo-random value in half-open range [0:1].\n float random( float x ) { return floatConstruct(hash(floatBitsToUint(x))); }\n float random( vec2 v ) { return floatConstruct(hash(floatBitsToUint(v))); }\n float random( vec3 v ) { return floatConstruct(hash(floatBitsToUint(v))); }\n float random( vec4 v ) { return floatConstruct(hash(floatBitsToUint(v))); }\n\n float rand() {\n float result = random(vec3(gl_FragCoord.xy, seed));\n seed += 1.0;\n return result;\n }\n\n const float F3 = 0.3333333;\n const float G3 = 0.1666667;\n\n float snoise(vec3 p) {\n vec3 s = floor(p + dot(p, vec3(F3)));\n vec3 x = p - s + dot(s, vec3(G3));\n vec3 e = step(vec3(0.0), x - x.yzx);\n vec3 i1 = e*(1.0 - e.zxy);\n vec3 i2 = 1.0 - e.zxy*(1.0 - e);\n vec3 x1 = x - i1 + G3;\n vec3 x2 = x - i2 + 2.0*G3;\n vec3 x3 = x - 1.0 + 3.0*G3;\n vec4 w, d;\n w.x = dot(x, x);\n w.y = dot(x1, x1);\n w.z = dot(x2, x2);\n w.w = dot(x3, x3);\n w = max(0.6 - w, 0.0);\n d.x = dot(random3(s), x);\n d.y = dot(random3(s + i1), x1);\n d.z = dot(random3(s + i2), x2);\n d.w = dot(random3(s + 1.0), x3);\n w *= w;\n w *= w;\n d *= w;\n return dot(d, vec4(52.0));\n }\n\n float snoiseFractal(vec3 m) {\n return 0.5333333* snoise(m)\n +0.2666667* snoise(2.0*m)\n +0.1333333* snoise(4.0*m)\n +0.0666667* snoise(8.0*m);\n }\n"+t.fragmentShader,t.fragmentShader=t.fragmentShader.replace("#include <transmission_pars_fragment>","\n #ifdef USE_TRANSMISSION\n // Transmission code is based on glTF-Sampler-Viewer\n // https://github.com/KhronosGroup/glTF-Sample-Viewer\n uniform float _transmission;\n uniform float thickness;\n uniform float attenuationDistance;\n uniform vec3 attenuationColor;\n #ifdef USE_TRANSMISSIONMAP\n uniform sampler2D transmissionMap;\n #endif\n #ifdef USE_THICKNESSMAP\n uniform sampler2D thicknessMap;\n #endif\n uniform vec2 transmissionSamplerSize;\n uniform sampler2D transmissionSamplerMap;\n uniform mat4 modelMatrix;\n uniform mat4 projectionMatrix;\n varying vec3 vWorldPosition;\n vec3 getVolumeTransmissionRay( const in vec3 n, const in vec3 v, const in float thickness, const in float ior, const in mat4 modelMatrix ) {\n // Direction of refracted light.\n vec3 refractionVector = refract( - v, normalize( n ), 1.0 / ior );\n // Compute rotation-independant scaling of the model matrix.\n vec3 modelScale;\n modelScale.x = length( vec3( modelMatrix[ 0 ].xyz ) );\n modelScale.y = length( vec3( modelMatrix[ 1 ].xyz ) );\n modelScale.z = length( vec3( modelMatrix[ 2 ].xyz ) );\n // The thickness is specified in local space.\n return normalize( refractionVector ) * thickness * modelScale;\n }\n float applyIorToRoughness( const in float roughness, const in float ior ) {\n // Scale roughness with IOR so that an IOR of 1.0 results in no microfacet refraction and\n // an IOR of 1.5 results in the default amount of microfacet refraction.\n return roughness * clamp( ior * 2.0 - 2.0, 0.0, 1.0 );\n }\n vec4 getTransmissionSample( const in vec2 fragCoord, const in float roughness, const in float ior ) {\n float framebufferLod = log2( transmissionSamplerSize.x ) * applyIorToRoughness( roughness, ior ); \n #ifdef USE_SAMPLER\n #ifdef texture2DLodEXT\n return texture2DLodEXT(transmissionSamplerMap, fragCoord.xy, framebufferLod);\n #else\n return texture2D(transmissionSamplerMap, fragCoord.xy, framebufferLod);\n #endif\n #else\n return texture2D(buffer, fragCoord.xy);\n #endif\n }\n vec3 applyVolumeAttenuation( const in vec3 radiance, const in float transmissionDistance, const in vec3 attenuationColor, const in float attenuationDistance ) {\n if ( isinf( attenuationDistance ) ) {\n // Attenuation distance is +∞, i.e. the transmitted color is not attenuated at all.\n return radiance;\n } else {\n // Compute light attenuation using Beer's law.\n vec3 attenuationCoefficient = -log( attenuationColor ) / attenuationDistance;\n vec3 transmittance = exp( - attenuationCoefficient * transmissionDistance ); // Beer's law\n return transmittance * radiance;\n }\n }\n vec4 getIBLVolumeRefraction( const in vec3 n, const in vec3 v, const in float roughness, const in vec3 diffuseColor,\n const in vec3 specularColor, const in float specularF90, const in vec3 position, const in mat4 modelMatrix,\n const in mat4 viewMatrix, const in mat4 projMatrix, const in float ior, const in float thickness,\n const in vec3 attenuationColor, const in float attenuationDistance ) {\n vec3 transmissionRay = getVolumeTransmissionRay( n, v, thickness, ior, modelMatrix );\n vec3 refractedRayExit = position + transmissionRay;\n // Project refracted vector on the framebuffer, while mapping to normalized device coordinates.\n vec4 ndcPos = projMatrix * viewMatrix * vec4( refractedRayExit, 1.0 );\n vec2 refractionCoords = ndcPos.xy / ndcPos.w;\n refractionCoords += 1.0;\n refractionCoords /= 2.0;\n // Sample framebuffer to get pixel the refracted ray hits.\n vec4 transmittedLight = getTransmissionSample( refractionCoords, roughness, ior );\n vec3 attenuatedColor = applyVolumeAttenuation( transmittedLight.rgb, length( transmissionRay ), attenuationColor, attenuationDistance );\n // Get the specular component.\n vec3 F = EnvironmentBRDF( n, v, specularColor, specularF90, roughness );\n return vec4( ( 1.0 - F ) * attenuatedColor * diffuseColor, transmittedLight.a );\n }\n #endif\n"),t.fragmentShader=t.fragmentShader.replace("#include <transmission_fragment>",` \n // Improve the refraction to use the world pos\n material.transmission = _transmission;\n material.transmissionAlpha = 1.0;\n material.thickness = thickness;\n material.attenuationDistance = attenuationDistance;\n material.attenuationColor = attenuationColor;\n #ifdef USE_TRANSMISSIONMAP\n material.transmission *= texture2D( transmissionMap, vUv ).r;\n #endif\n #ifdef USE_THICKNESSMAP\n material.thickness *= texture2D( thicknessMap, vUv ).g;\n #endif\n \n vec3 pos = vWorldPosition;\n vec3 v = normalize( cameraPosition - pos );\n vec3 n = inverseTransformDirection( normal, viewMatrix );\n vec3 transmission = vec3(0.0);\n float transmissionR, transmissionB, transmissionG;\n float randomCoords = rand();\n float thickness_smear = thickness * max(pow(roughnessFactor, 0.33), anisotropy);\n vec3 distortionNormal = vec3(0.0);\n vec3 temporalOffset = vec3(time, -time, -time) * temporalDistortion;\n if (distortion > 0.0) {\n distortionNormal = distortion * vec3(snoiseFractal(vec3((pos * distortionScale + temporalOffset))), snoiseFractal(vec3(pos.zxy * distortionScale - temporalOffset)), snoiseFractal(vec3(pos.yxz * distortionScale + temporalOffset)));\n }\n for (float i = 0.0; i < ${e}.0; i ++) {\n vec3 sampleNorm = normalize(n + roughnessFactor * roughnessFactor * 2.0 * normalize(vec3(rand() - 0.5, rand() - 0.5, rand() - 0.5)) * pow(rand(), 0.33) + distortionNormal);\n transmissionR = getIBLVolumeRefraction(\n sampleNorm, v, material.roughness, material.diffuseColor, material.specularColor, material.specularF90,\n pos, modelMatrix, viewMatrix, projectionMatrix, material.ior, material.thickness + thickness_smear * (i + randomCoords) / float(${e}),\n material.attenuationColor, material.attenuationDistance\n ).r;\n transmissionG = getIBLVolumeRefraction(\n sampleNorm, v, material.roughness, material.diffuseColor, material.specularColor, material.specularF90,\n pos, modelMatrix, viewMatrix, projectionMatrix, material.ior * (1.0 + chromaticAberration * (i + randomCoords) / float(${e})) , material.thickness + thickness_smear * (i + randomCoords) / float(${e}),\n material.attenuationColor, material.attenuationDistance\n ).g;\n transmissionB = getIBLVolumeRefraction(\n sampleNorm, v, material.roughness, material.diffuseColor, material.specularColor, material.specularF90,\n pos, modelMatrix, viewMatrix, projectionMatrix, material.ior * (1.0 + 2.0 * chromaticAberration * (i + randomCoords) / float(${e})), material.thickness + thickness_smear * (i + randomCoords) / float(${e}),\n material.attenuationColor, material.attenuationDistance\n ).b;\n transmission.r += transmissionR;\n transmission.g += transmissionG;\n transmission.b += transmissionB;\n }\n transmission /= ${e}.0;\n totalDiffuse = mix( totalDiffuse, transmission.rgb, material.transmission );\n`)},Object.keys(this.uniforms).forEach((e=>Object.defineProperty(this,e,{get:()=>this.uniforms[e].value,set:n=>this.uniforms[e].value=n})))}}const a=r({depth:null,opacity:1,attenuation:2.5,anglePower:12,spotPosition:new e.Vector3(0,0,0),lightColor:new e.Color("white"),cameraNear:0,cameraFar:1,resolution:new e.Vector2(0,0),transparent:!0,depthWrite:!1},"\n varying vec3 vNormal;\n varying vec3 vWorldPosition;\n varying float vViewZ;\n varying float vIntensity;\n uniform vec3 spotPosition;\n uniform float attenuation;\n\n void main() {\n // compute intensity\n vNormal = normalize( normalMatrix * normal );\n vec4 worldPosition\t= modelMatrix * vec4( position, 1.0 );\n vWorldPosition = worldPosition.xyz;\n vec4 viewPosition = viewMatrix * worldPosition;\n vViewZ = viewPosition.z;\n float intensity\t= distance(worldPosition.xyz, spotPosition) / attenuation;\n intensity\t= 1.0 - clamp(intensity, 0.0, 1.0);\n vIntensity = intensity;\n // set gl_Position\n gl_Position\t= projectionMatrix * viewPosition;\n\n }","\n #include <packing>\n\n varying vec3 vNormal;\n varying vec3 vWorldPosition;\n uniform vec3 lightColor;\n uniform vec3 spotPosition;\n uniform float attenuation;\n uniform float anglePower;\n uniform sampler2D depth;\n uniform vec2 resolution;\n uniform float cameraNear;\n uniform float cameraFar;\n varying float vViewZ;\n varying float vIntensity;\n uniform float opacity;\n\n float readDepth( sampler2D depthSampler, vec2 coord ) {\n float fragCoordZ = texture2D( depthSampler, coord ).x;\n float viewZ = perspectiveDepthToViewZ(fragCoordZ, cameraNear, cameraFar);\n return viewZ;\n }\n\n void main() {\n float d = 1.0;\n bool isSoft = resolution[0] > 0.0 && resolution[1] > 0.0;\n if (isSoft) {\n vec2 sUv = gl_FragCoord.xy / resolution;\n d = readDepth(depth, sUv);\n }\n float intensity = vIntensity;\n vec3 normal\t= vec3(vNormal.x, vNormal.y, abs(vNormal.z));\n float angleIntensity\t= pow( dot(normal, vec3(0.0, 0.0, 1.0)), anglePower );\n intensity\t*= angleIntensity;\n // fades when z is close to sampled depth, meaning the cone is intersecting existing geometry\n if (isSoft) {\n intensity\t*= smoothstep(0., 1., vViewZ - d);\n }\n gl_FragColor = vec4(lightColor, intensity * opacity);\n\n #include <tonemapping_fragment>\n #include <encodings_fragment>\n }");class s extends e.ShaderMaterial{constructor(n=new e.Vector2){super({uniforms:{inputBuffer:new e.Uniform(null),depthBuffer:new e.Uniform(null),resolution:new e.Uniform(new e.Vector2),texelSize:new e.Uniform(new e.Vector2),halfTexelSize:new e.Uniform(new e.Vector2),kernel:new e.Uniform(0),scale:new e.Uniform(1),cameraNear:new e.Uniform(0),cameraFar:new e.Uniform(1),minDepthThreshold:new e.Uniform(0),maxDepthThreshold:new e.Uniform(1),depthScale:new e.Uniform(0),depthToBlurRatioBias:new e.Uniform(.25)},fragmentShader:"#include <common>\n #include <dithering_pars_fragment> \n uniform sampler2D inputBuffer;\n uniform sampler2D depthBuffer;\n uniform float cameraNear;\n uniform float cameraFar;\n uniform float minDepthThreshold;\n uniform float maxDepthThreshold;\n uniform float depthScale;\n uniform float depthToBlurRatioBias;\n varying vec2 vUv;\n varying vec2 vUv0;\n varying vec2 vUv1;\n varying vec2 vUv2;\n varying vec2 vUv3;\n\n void main() {\n float depthFactor = 0.0;\n \n #ifdef USE_DEPTH\n vec4 depth = texture2D(depthBuffer, vUv);\n depthFactor = smoothstep(minDepthThreshold, maxDepthThreshold, 1.0-(depth.r * depth.a));\n depthFactor *= depthScale;\n depthFactor = max(0.0, min(1.0, depthFactor + 0.25));\n #endif\n \n vec4 sum = texture2D(inputBuffer, mix(vUv0, vUv, depthFactor));\n sum += texture2D(inputBuffer, mix(vUv1, vUv, depthFactor));\n sum += texture2D(inputBuffer, mix(vUv2, vUv, depthFactor));\n sum += texture2D(inputBuffer, mix(vUv3, vUv, depthFactor));\n gl_FragColor = sum * 0.25 ;\n\n #include <dithering_fragment>\n #include <tonemapping_fragment>\n #include <encodings_fragment>\n }",vertexShader:"uniform vec2 texelSize;\n uniform vec2 halfTexelSize;\n uniform float kernel;\n uniform float scale;\n varying vec2 vUv;\n varying vec2 vUv0;\n varying vec2 vUv1;\n varying vec2 vUv2;\n varying vec2 vUv3;\n\n void main() {\n vec2 uv = position.xy * 0.5 + 0.5;\n vUv = uv;\n\n vec2 dUv = (texelSize * vec2(kernel) + halfTexelSize) * scale;\n vUv0 = vec2(uv.x - dUv.x, uv.y + dUv.y);\n vUv1 = vec2(uv.x + dUv.x, uv.y + dUv.y);\n vUv2 = vec2(uv.x + dUv.x, uv.y - dUv.y);\n vUv3 = vec2(uv.x - dUv.x, uv.y - dUv.y);\n\n gl_Position = vec4(position.xy, 1.0, 1.0);\n }",blending:e.NoBlending,depthWrite:!1,depthTest:!1}),this.toneMapped=!1,this.setTexelSize(n.x,n.y),this.kernel=new Float32Array([0,1,2,2,3])}setTexelSize(e,n){this.uniforms.texelSize.value.set(e,n),this.uniforms.halfTexelSize.value.set(e,n).multiplyScalar(.5)}setResolution(e){this.uniforms.resolution.value.copy(e)}}class l extends e.MeshStandardMaterial{constructor(e={}){super(),this._tDepth={value:null},this._distortionMap={value:null},this._tDiffuse={value:null},this._tDiffuseBlur={value:null},this._textureMatrix={value:null},this._hasBlur={value:!1},this._mirror={value:0},this._mixBlur={value:0},this._blurStrength={value:.5},this._minDepthThreshold={value:.9},this._maxDepthThreshold={value:1},this._depthScale={value:0},this._depthToBlurRatioBias={value:.25},this._distortion={value:1},this._mixContrast={value:1},this._tDepth={value:null},this._distortionMap={value:null},this._tDiffuse={value:null},this._tDiffuseBlur={value:null},this._textureMatrix={value:null},this._hasBlur={value:!1},this._mirror={value:0},this._mixBlur={value:0},this._blurStrength={value:.5},this._minDepthThreshold={value:.9},this._maxDepthThreshold={value:1},this._depthScale={value:0},this._depthToBlurRatioBias={value:.25},this._distortion={value:1},this._mixContrast={value:1},this.setValues(e)}onBeforeCompile(e){var n;null!=(n=e.defines)&&n.USE_UV||(e.defines.USE_UV=""),e.uniforms.hasBlur=this._hasBlur,e.uniforms.tDiffuse=this._tDiffuse,e.uniforms.tDepth=this._tDepth,e.uniforms.distortionMap=this._distortionMap,e.uniforms.tDiffuseBlur=this._tDiffuseBlur,e.uniforms.textureMatrix=this._textureMatrix,e.uniforms.mirror=this._mirror,e.uniforms.mixBlur=this._mixBlur,e.uniforms.mixStrength=this._blurStrength,e.uniforms.minDepthThreshold=this._minDepthThreshold,e.uniforms.maxDepthThreshold=this._maxDepthThreshold,e.uniforms.depthScale=this._depthScale,e.uniforms.depthToBlurRatioBias=this._depthToBlurRatioBias,e.uniforms.distortion=this._distortion,e.uniforms.mixContrast=this._mixContrast,e.vertexShader=`\n uniform mat4 textureMatrix;\n varying vec4 my_vUv;\n ${e.vertexShader}`,e.vertexShader=e.vertexShader.replace("#include <project_vertex>","#include <project_vertex>\n my_vUv = textureMatrix * vec4( position, 1.0 );\n gl_Position = projectionMatrix * modelViewMatrix * vec4( position, 1.0 );"),e.fragmentShader=`\n uniform sampler2D tDiffuse;\n uniform sampler2D tDiffuseBlur;\n uniform sampler2D tDepth;\n uniform sampler2D distortionMap;\n uniform float distortion;\n uniform float cameraNear;\n\t\t\t uniform float cameraFar;\n uniform bool hasBlur;\n uniform float mixBlur;\n uniform float mirror;\n uniform float mixStrength;\n uniform float minDepthThreshold;\n uniform float maxDepthThreshold;\n uniform float mixContrast;\n uniform float depthScale;\n uniform float depthToBlurRatioBias;\n varying vec4 my_vUv;\n ${e.fragmentShader}`,e.fragmentShader=e.fragmentShader.replace("#include <emissivemap_fragment>","#include <emissivemap_fragment>\n\n float distortionFactor = 0.0;\n #ifdef USE_DISTORTION\n distortionFactor = texture2D(distortionMap, vUv).r * distortion;\n #endif\n\n vec4 new_vUv = my_vUv;\n new_vUv.x += distortionFactor;\n new_vUv.y += distortionFactor;\n\n vec4 base = texture2DProj(tDiffuse, new_vUv);\n vec4 blur = texture2DProj(tDiffuseBlur, new_vUv);\n\n vec4 merge = base;\n\n #ifdef USE_NORMALMAP\n vec2 normal_uv = vec2(0.0);\n vec4 normalColor = texture2D(normalMap, vUv * normalScale);\n vec3 my_normal = normalize( vec3( normalColor.r * 2.0 - 1.0, normalColor.b, normalColor.g * 2.0 - 1.0 ) );\n vec3 coord = new_vUv.xyz / new_vUv.w;\n normal_uv = coord.xy + coord.z * my_normal.xz * 0.05;\n vec4 base_normal = texture2D(tDiffuse, normal_uv);\n vec4 blur_normal = texture2D(tDiffuseBlur, normal_uv);\n merge = base_normal;\n blur = blur_normal;\n #endif\n\n float depthFactor = 0.0001;\n float blurFactor = 0.0;\n\n #ifdef USE_DEPTH\n vec4 depth = texture2DProj(tDepth, new_vUv);\n depthFactor = smoothstep(minDepthThreshold, maxDepthThreshold, 1.0-(depth.r * depth.a));\n depthFactor *= depthScale;\n depthFactor = max(0.0001, min(1.0, depthFactor));\n\n #ifdef USE_BLUR\n blur = blur * min(1.0, depthFactor + depthToBlurRatioBias);\n merge = merge * min(1.0, depthFactor + 0.5);\n #else\n merge = merge * depthFactor;\n #endif\n\n #endif\n\n float reflectorRoughnessFactor = roughness;\n #ifdef USE_ROUGHNESSMAP\n vec4 reflectorTexelRoughness = texture2D( roughnessMap, vUv );\n reflectorRoughnessFactor *= reflectorTexelRoughness.g;\n #endif\n\n #ifdef USE_BLUR\n blurFactor = min(1.0, mixBlur * reflectorRoughnessFactor);\n merge = mix(merge, blur, blurFactor);\n #endif\n\n vec4 newMerge = vec4(0.0, 0.0, 0.0, 1.0);\n newMerge.r = (merge.r - 0.5) * mixContrast + 0.5;\n newMerge.g = (merge.g - 0.5) * mixContrast + 0.5;\n newMerge.b = (merge.b - 0.5) * mixContrast + 0.5;\n\n diffuseColor.rgb = diffuseColor.rgb * ((1.0 - min(1.0, mirror)) + newMerge.rgb * mixStrength);\n ")}get tDiffuse(){return this._tDiffuse.value}set tDiffuse(e){this._tDiffuse.value=e}get tDepth(){return this._tDepth.value}set tDepth(e){this._tDepth.value=e}get distortionMap(){return this._distortionMap.value}set distortionMap(e){this._distortionMap.value=e}get tDiffuseBlur(){return this._tDiffuseBlur.value}set tDiffuseBlur(e){this._tDiffuseBlur.value=e}get textureMatrix(){return this._textureMatrix.value}set textureMatrix(e){this._textureMatrix.value=e}get hasBlur(){return this._hasBlur.value}set hasBlur(e){this._hasBlur.value=e}get mirror(){return this._mirror.value}set mirror(e){this._mirror.value=e}get mixBlur(){return this._mixBlur.value}set mixBlur(e){this._mixBlur.value=e}get mixStrength(){return this._blurStrength.value}set mixStrength(e){this._blurStrength.value=e}get minDepthThreshold(){return this._minDepthThreshold.value}set minDepthThreshold(e){this._minDepthThreshold.value=e}get maxDepthThreshold(){return this._maxDepthThreshold.value}set maxDepthThreshold(e){this._maxDepthThreshold.value=e}get depthScale(){return this._depthScale.value}set depthScale(e){this._depthScale.value=e}get depthToBlurRatioBias(){return this._depthToBlurRatioBias.value}set depthToBlurRatioBias(e){this._depthToBlurRatioBias.value=e}get distortion(){return this._distortion.value}set distortion(e){this._distortion.value=e}get mixContrast(){return this._mixContrast.value}set mixContrast(e){this._mixContrast.value=e}}exports.BlurPass=class{constructor({gl:n,resolution:t,width:r=500,height:o=500,minDepthThreshold:i=0,maxDepthThreshold:a=1,depthScale:l=0,depthToBlurRatioBias:u=.25}){this.renderToScreen=!1,this.renderTargetA=new e.WebGLRenderTarget(t,t,{minFilter:e.LinearFilter,magFilter:e.LinearFilter,stencilBuffer:!1,depthBuffer:!1,encoding:n.outputEncoding}),this.renderTargetB=this.renderTargetA.clone(),this.convolutionMaterial=new s,this.convolutionMaterial.setTexelSize(1/r,1/o),this.convolutionMaterial.setResolution(new e.Vector2(r,o)),this.scene=new e.Scene,this.camera=new e.Camera,this.convolutionMaterial.uniforms.minDepthThreshold.value=i,this.convolutionMaterial.uniforms.maxDepthThreshold.value=a,this.convolutionMaterial.uniforms.depthScale.value=l,this.convolutionMaterial.uniforms.depthToBlurRatioBias.value=u,this.convolutionMaterial.defines.USE_DEPTH=l>0;const m=new Float32Array([-1,-1,0,3,-1,0,-1,3,0]),c=new Float32Array([0,0,2,0,0,2]),f=new e.BufferGeometry;f.setAttribute("position",new e.BufferAttribute(m,3)),f.setAttribute("uv",new e.BufferAttribute(c,2)),this.screen=new e.Mesh(f,this.convolutionMaterial),this.screen.frustumCulled=!1,this.scene.add(this.screen)}render(e,n,t){const r=this.scene,o=this.camera,i=this.renderTargetA,a=this.renderTargetB,s=this.convolutionMaterial,l=s.uniforms;l.depthBuffer.value=n.depthTexture;const u=s.kernel;let m,c,f,h=n;for(c=0,f=u.length-1;c<f;++c)m=0==(1&c)?i:a,l.kernel.value=u[c],l.inputBuffer.value=h.texture,e.setRenderTarget(m),e.render(r,o),h=m;l.kernel.value=u[c],l.inputBuffer.value=h.texture,e.setRenderTarget(this.renderToScreen?null:t),e.render(r,o)}},exports.ConvolutionMaterial=s,exports.MeshDiscardMaterial=o,exports.MeshReflectorMaterial=l,exports.MeshTransmissionMaterial=i,exports.SpotLightMaterial=a,exports.pcss=({focus:e=0,size:n=25,samples:r=10}={})=>{const o=t.ShaderChunk.shadowmap_pars_fragment;return t.ShaderChunk.shadowmap_pars_fragment=t.ShaderChunk.shadowmap_pars_fragment.replace("#ifdef USE_SHADOWMAP",`#ifdef USE_SHADOWMAP\n\n #define PENUMBRA_FILTER_SIZE float(${n})\n #define RGB_NOISE_FUNCTION(uv) (randRGB(uv))\n vec3 randRGB(vec2 uv) {\n return vec3(\n fract(sin(dot(uv, vec2(12.75613, 38.12123))) * 13234.76575),\n fract(sin(dot(uv, vec2(19.45531, 58.46547))) * 43678.23431),\n fract(sin(dot(uv, vec2(23.67817, 78.23121))) * 93567.23423)\n );\n }\n \n vec3 lowPassRandRGB(vec2 uv) {\n // 3x3 convolution (average)\n // can be implemented as separable with an extra buffer for a total of 6 samples instead of 9\n vec3 result = vec3(0);\n result += RGB_NOISE_FUNCTION(uv + vec2(-1.0, -1.0));\n result += RGB_NOISE_FUNCTION(uv + vec2(-1.0, 0.0));\n result += RGB_NOISE_FUNCTION(uv + vec2(-1.0, +1.0));\n result += RGB_NOISE_FUNCTION(uv + vec2( 0.0, -1.0));\n result += RGB_NOISE_FUNCTION(uv + vec2( 0.0, 0.0));\n result += RGB_NOISE_FUNCTION(uv + vec2( 0.0, +1.0));\n result += RGB_NOISE_FUNCTION(uv + vec2(+1.0, -1.0));\n result += RGB_NOISE_FUNCTION(uv + vec2(+1.0, 0.0));\n result += RGB_NOISE_FUNCTION(uv + vec2(+1.0, +1.0));\n result *= 0.111111111; // 1.0 / 9.0\n return result;\n }\n vec3 highPassRandRGB(vec2 uv) {\n // by subtracting the low-pass signal from the original signal, we're being left with the high-pass signal\n // hp(x) = x - lp(x)\n return RGB_NOISE_FUNCTION(uv) - lowPassRandRGB(uv) + 0.5;\n }\n \n \n vec2 vogelDiskSample(int sampleIndex, int sampleCount, float angle) {\n const float goldenAngle = 2.399963f; // radians\n float r = sqrt(float(sampleIndex) + 0.5f) / sqrt(float(sampleCount));\n float theta = float(sampleIndex) * goldenAngle + angle;\n float sine = sin(theta);\n float cosine = cos(theta);\n return vec2(cosine, sine) * r;\n }\n float penumbraSize( const in float zReceiver, const in float zBlocker ) { // Parallel plane estimation\n return (zReceiver - zBlocker) / zBlocker;\n }\n float findBlocker(sampler2D shadowMap, vec2 uv, float compare, float angle) {\n float texelSize = 1.0 / float(textureSize(shadowMap, 0).x);\n float blockerDepthSum = float(${e});\n float blockers = 0.0;\n \n int j = 0;\n vec2 offset = vec2(0.);\n float depth = 0.;\n \n #pragma unroll_loop_start\n for(int i = 0; i < ${r}; i ++) {\n offset = (vogelDiskSample(j, ${r}, angle) * texelSize) * 2.0 * PENUMBRA_FILTER_SIZE;\n depth = unpackRGBAToDepth( texture2D( shadowMap, uv + offset));\n if (depth < compare) {\n blockerDepthSum += depth;\n blockers++;\n }\n j++;\n }\n #pragma unroll_loop_end\n \n if (blockers > 0.0) {\n return blockerDepthSum / blockers;\n }\n return -1.0;\n }\n \n float vogelFilter(sampler2D shadowMap, vec2 uv, float zReceiver, float filterRadius, float angle) {\n float texelSize = 1.0 / float(textureSize(shadowMap, 0).x);\n float shadow = 0.0f;\n int j = 0;\n vec2 vogelSample = vec2(0.0);\n vec2 offset = vec2(0.0);\n #pragma unroll_loop_start\n for (int i = 0; i < ${r}; i++) {\n vogelSample = vogelDiskSample(j, ${r}, angle) * texelSize;\n offset = vogelSample * (1.0 + filterRadius * float(${n}));\n shadow += step( zReceiver, unpackRGBAToDepth( texture2D( shadowMap, uv + offset ) ) );\n j++;\n }\n #pragma unroll_loop_end\n return shadow * 1.0 / ${r}.0;\n }\n \n float PCSS (sampler2D shadowMap, vec4 coords) {\n vec2 uv = coords.xy;\n float zReceiver = coords.z; // Assumed to be eye-space z in this code\n float angle = highPassRandRGB(gl_FragCoord.xy).r * PI2;\n float avgBlockerDepth = findBlocker(shadowMap, uv, zReceiver, angle);\n if (avgBlockerDepth == -1.0) {\n return 1.0;\n }\n float penumbraRatio = penumbraSize(zReceiver, avgBlockerDepth);\n return vogelFilter(shadowMap, uv, zReceiver, 1.25 * penumbraRatio, angle);\n }`).replace("#if defined( SHADOWMAP_TYPE_PCF )","\nreturn PCSS(shadowMap, shadowCoord);\n#if defined( SHADOWMAP_TYPE_PCF )"),(e,n,r)=>{t.ShaderChunk.shadowmap_pars_fragment=o,function(e,n,t){n.traverse((n=>{n.material&&(e.properties.remove(n.material),n.material.dispose())})),e.info.programs.length=0,e.compile(n,t)}(e,n,r)}},exports.shaderMaterial=r; | ||
"use strict";Object.defineProperty(exports,"__esModule",{value:!0});var e=require("three");function t(e){if(e&&e.__esModule)return e;var t=Object.create(null);return e&&Object.keys(e).forEach((function(n){if("default"!==n){var r=Object.getOwnPropertyDescriptor(e,n);Object.defineProperty(t,n,r.get?r:{enumerable:!0,get:function(){return e[n]}})}})),t.default=e,Object.freeze(t)}var n=t(e);function r(e,t,r,i){const o=class extends n.ShaderMaterial{constructor(o={}){const a=Object.entries(e);super({uniforms:a.reduce(((e,[t,r])=>({...e,...n.UniformsUtils.clone({[t]:{value:r}})})),{}),vertexShader:t,fragmentShader:r}),this.key="",a.forEach((([e])=>Object.defineProperty(this,e,{get:()=>this.uniforms[e].value,set:t=>this.uniforms[e].value=t}))),Object.assign(this,o),i&&i(this)}};return o.key=n.MathUtils.generateUUID(),o}const i=r({},"void main() { }","void main() { gl_FragColor = vec4(0.0, 0.0, 0.0, 0.0); discard; }");const o=r({color:new n.Color(0),blend:2,alphaTest:.75,opacity:0,map:null},"varying vec2 vUv;\n void main() {\n gl_Position = projectionMatrix * viewMatrix * modelMatrix * vec4(position, 1.);\n vUv = uv;\n }","varying vec2 vUv;\n uniform sampler2D map;\n uniform vec3 color;\n uniform float opacity;\n uniform float alphaTest;\n uniform float blend;\n void main() {\n vec4 sampledDiffuseColor = texture2D(map, vUv);\n gl_FragColor = vec4(color * sampledDiffuseColor.r * blend, max(0.0, (1.0 - (sampledDiffuseColor.r + sampledDiffuseColor.g + sampledDiffuseColor.b) / alphaTest)) * opacity);\n #include <tonemapping_fragment>\n #include <encodings_fragment>\n }");class a extends n.MeshPhysicalMaterial{constructor({samples:e=6,transmissionSampler:t=!1,chromaticAberration:r=.05,transmission:i=0,_transmission:o=1,transmissionMap:a=null,roughness:s=0,thickness:l=0,thicknessMap:u=null,attenuationDistance:h=1/0,attenuationColor:c=new n.Color("white"),anisotropy:m=.1,time:f=0,distortion:v=0,distortionScale:d=.5,temporalDistortion:p=0,buffer:g=null}){super(),this.uniforms={chromaticAberration:{value:r},transmission:{value:i},_transmission:{value:o},transmissionMap:{value:a},roughness:{value:s},thickness:{value:l},thicknessMap:{value:u},attenuationDistance:{value:h},attenuationColor:{value:c},anisotropy:{value:m},time:{value:f},distortion:{value:v},distortionScale:{value:d},temporalDistortion:{value:p},buffer:{value:g}},this.onBeforeCompile=n=>{n.uniforms={...n.uniforms,...this.uniforms},t?n.defines.USE_SAMPLER="":n.defines.USE_TRANSMISSION="",n.fragmentShader="\n uniform float chromaticAberration; \n uniform float anisotropy; \n uniform float time;\n uniform float distortion;\n uniform float distortionScale;\n uniform float temporalDistortion;\n uniform sampler2D buffer;\n\n vec3 random3(vec3 c) {\n float j = 4096.0*sin(dot(c,vec3(17.0, 59.4, 15.0)));\n vec3 r;\n r.z = fract(512.0*j);\n j *= .125;\n r.x = fract(512.0*j);\n j *= .125;\n r.y = fract(512.0*j);\n return r-0.5;\n }\n\n float seed = 0.0;\n uint hash( uint x ) {\n x += ( x << 10u );\n x ^= ( x >> 6u );\n x += ( x << 3u );\n x ^= ( x >> 11u );\n x += ( x << 15u );\n return x;\n }\n\n // Compound versions of the hashing algorithm I whipped together.\n uint hash( uvec2 v ) { return hash( v.x ^ hash(v.y) ); }\n uint hash( uvec3 v ) { return hash( v.x ^ hash(v.y) ^ hash(v.z) ); }\n uint hash( uvec4 v ) { return hash( v.x ^ hash(v.y) ^ hash(v.z) ^ hash(v.w) ); }\n\n // Construct a float with half-open range [0:1] using low 23 bits.\n // All zeroes yields 0.0, all ones yields the next smallest representable value below 1.0.\n float floatConstruct( uint m ) {\n const uint ieeeMantissa = 0x007FFFFFu; // binary32 mantissa bitmask\n const uint ieeeOne = 0x3F800000u; // 1.0 in IEEE binary32\n m &= ieeeMantissa; // Keep only mantissa bits (fractional part)\n m |= ieeeOne; // Add fractional part to 1.0\n float f = uintBitsToFloat( m ); // Range [1:2]\n return f - 1.0; // Range [0:1]\n }\n\n // Pseudo-random value in half-open range [0:1].\n float random( float x ) { return floatConstruct(hash(floatBitsToUint(x))); }\n float random( vec2 v ) { return floatConstruct(hash(floatBitsToUint(v))); }\n float random( vec3 v ) { return floatConstruct(hash(floatBitsToUint(v))); }\n float random( vec4 v ) { return floatConstruct(hash(floatBitsToUint(v))); }\n\n float rand() {\n float result = random(vec3(gl_FragCoord.xy, seed));\n seed += 1.0;\n return result;\n }\n\n const float F3 = 0.3333333;\n const float G3 = 0.1666667;\n\n float snoise(vec3 p) {\n vec3 s = floor(p + dot(p, vec3(F3)));\n vec3 x = p - s + dot(s, vec3(G3));\n vec3 e = step(vec3(0.0), x - x.yzx);\n vec3 i1 = e*(1.0 - e.zxy);\n vec3 i2 = 1.0 - e.zxy*(1.0 - e);\n vec3 x1 = x - i1 + G3;\n vec3 x2 = x - i2 + 2.0*G3;\n vec3 x3 = x - 1.0 + 3.0*G3;\n vec4 w, d;\n w.x = dot(x, x);\n w.y = dot(x1, x1);\n w.z = dot(x2, x2);\n w.w = dot(x3, x3);\n w = max(0.6 - w, 0.0);\n d.x = dot(random3(s), x);\n d.y = dot(random3(s + i1), x1);\n d.z = dot(random3(s + i2), x2);\n d.w = dot(random3(s + 1.0), x3);\n w *= w;\n w *= w;\n d *= w;\n return dot(d, vec4(52.0));\n }\n\n float snoiseFractal(vec3 m) {\n return 0.5333333* snoise(m)\n +0.2666667* snoise(2.0*m)\n +0.1333333* snoise(4.0*m)\n +0.0666667* snoise(8.0*m);\n }\n"+n.fragmentShader,n.fragmentShader=n.fragmentShader.replace("#include <transmission_pars_fragment>","\n #ifdef USE_TRANSMISSION\n // Transmission code is based on glTF-Sampler-Viewer\n // https://github.com/KhronosGroup/glTF-Sample-Viewer\n uniform float _transmission;\n uniform float thickness;\n uniform float attenuationDistance;\n uniform vec3 attenuationColor;\n #ifdef USE_TRANSMISSIONMAP\n uniform sampler2D transmissionMap;\n #endif\n #ifdef USE_THICKNESSMAP\n uniform sampler2D thicknessMap;\n #endif\n uniform vec2 transmissionSamplerSize;\n uniform sampler2D transmissionSamplerMap;\n uniform mat4 modelMatrix;\n uniform mat4 projectionMatrix;\n varying vec3 vWorldPosition;\n vec3 getVolumeTransmissionRay( const in vec3 n, const in vec3 v, const in float thickness, const in float ior, const in mat4 modelMatrix ) {\n // Direction of refracted light.\n vec3 refractionVector = refract( - v, normalize( n ), 1.0 / ior );\n // Compute rotation-independant scaling of the model matrix.\n vec3 modelScale;\n modelScale.x = length( vec3( modelMatrix[ 0 ].xyz ) );\n modelScale.y = length( vec3( modelMatrix[ 1 ].xyz ) );\n modelScale.z = length( vec3( modelMatrix[ 2 ].xyz ) );\n // The thickness is specified in local space.\n return normalize( refractionVector ) * thickness * modelScale;\n }\n float applyIorToRoughness( const in float roughness, const in float ior ) {\n // Scale roughness with IOR so that an IOR of 1.0 results in no microfacet refraction and\n // an IOR of 1.5 results in the default amount of microfacet refraction.\n return roughness * clamp( ior * 2.0 - 2.0, 0.0, 1.0 );\n }\n vec4 getTransmissionSample( const in vec2 fragCoord, const in float roughness, const in float ior ) {\n float framebufferLod = log2( transmissionSamplerSize.x ) * applyIorToRoughness( roughness, ior ); \n #ifdef USE_SAMPLER\n #ifdef texture2DLodEXT\n return texture2DLodEXT(transmissionSamplerMap, fragCoord.xy, framebufferLod);\n #else\n return texture2D(transmissionSamplerMap, fragCoord.xy, framebufferLod);\n #endif\n #else\n return texture2D(buffer, fragCoord.xy);\n #endif\n }\n vec3 applyVolumeAttenuation( const in vec3 radiance, const in float transmissionDistance, const in vec3 attenuationColor, const in float attenuationDistance ) {\n if ( isinf( attenuationDistance ) ) {\n // Attenuation distance is +∞, i.e. the transmitted color is not attenuated at all.\n return radiance;\n } else {\n // Compute light attenuation using Beer's law.\n vec3 attenuationCoefficient = -log( attenuationColor ) / attenuationDistance;\n vec3 transmittance = exp( - attenuationCoefficient * transmissionDistance ); // Beer's law\n return transmittance * radiance;\n }\n }\n vec4 getIBLVolumeRefraction( const in vec3 n, const in vec3 v, const in float roughness, const in vec3 diffuseColor,\n const in vec3 specularColor, const in float specularF90, const in vec3 position, const in mat4 modelMatrix,\n const in mat4 viewMatrix, const in mat4 projMatrix, const in float ior, const in float thickness,\n const in vec3 attenuationColor, const in float attenuationDistance ) {\n vec3 transmissionRay = getVolumeTransmissionRay( n, v, thickness, ior, modelMatrix );\n vec3 refractedRayExit = position + transmissionRay;\n // Project refracted vector on the framebuffer, while mapping to normalized device coordinates.\n vec4 ndcPos = projMatrix * viewMatrix * vec4( refractedRayExit, 1.0 );\n vec2 refractionCoords = ndcPos.xy / ndcPos.w;\n refractionCoords += 1.0;\n refractionCoords /= 2.0;\n // Sample framebuffer to get pixel the refracted ray hits.\n vec4 transmittedLight = getTransmissionSample( refractionCoords, roughness, ior );\n vec3 attenuatedColor = applyVolumeAttenuation( transmittedLight.rgb, length( transmissionRay ), attenuationColor, attenuationDistance );\n // Get the specular component.\n vec3 F = EnvironmentBRDF( n, v, specularColor, specularF90, roughness );\n return vec4( ( 1.0 - F ) * attenuatedColor * diffuseColor, transmittedLight.a );\n }\n #endif\n"),n.fragmentShader=n.fragmentShader.replace("#include <transmission_fragment>",` \n // Improve the refraction to use the world pos\n material.transmission = _transmission;\n material.transmissionAlpha = 1.0;\n material.thickness = thickness;\n material.attenuationDistance = attenuationDistance;\n material.attenuationColor = attenuationColor;\n #ifdef USE_TRANSMISSIONMAP\n material.transmission *= texture2D( transmissionMap, vUv ).r;\n #endif\n #ifdef USE_THICKNESSMAP\n material.thickness *= texture2D( thicknessMap, vUv ).g;\n #endif\n \n vec3 pos = vWorldPosition;\n vec3 v = normalize( cameraPosition - pos );\n vec3 n = inverseTransformDirection( normal, viewMatrix );\n vec3 transmission = vec3(0.0);\n float transmissionR, transmissionB, transmissionG;\n float randomCoords = rand();\n float thickness_smear = thickness * max(pow(roughnessFactor, 0.33), anisotropy);\n vec3 distortionNormal = vec3(0.0);\n vec3 temporalOffset = vec3(time, -time, -time) * temporalDistortion;\n if (distortion > 0.0) {\n distortionNormal = distortion * vec3(snoiseFractal(vec3((pos * distortionScale + temporalOffset))), snoiseFractal(vec3(pos.zxy * distortionScale - temporalOffset)), snoiseFractal(vec3(pos.yxz * distortionScale + temporalOffset)));\n }\n for (float i = 0.0; i < ${e}.0; i ++) {\n vec3 sampleNorm = normalize(n + roughnessFactor * roughnessFactor * 2.0 * normalize(vec3(rand() - 0.5, rand() - 0.5, rand() - 0.5)) * pow(rand(), 0.33) + distortionNormal);\n transmissionR = getIBLVolumeRefraction(\n sampleNorm, v, material.roughness, material.diffuseColor, material.specularColor, material.specularF90,\n pos, modelMatrix, viewMatrix, projectionMatrix, material.ior, material.thickness + thickness_smear * (i + randomCoords) / float(${e}),\n material.attenuationColor, material.attenuationDistance\n ).r;\n transmissionG = getIBLVolumeRefraction(\n sampleNorm, v, material.roughness, material.diffuseColor, material.specularColor, material.specularF90,\n pos, modelMatrix, viewMatrix, projectionMatrix, material.ior * (1.0 + chromaticAberration * (i + randomCoords) / float(${e})) , material.thickness + thickness_smear * (i + randomCoords) / float(${e}),\n material.attenuationColor, material.attenuationDistance\n ).g;\n transmissionB = getIBLVolumeRefraction(\n sampleNorm, v, material.roughness, material.diffuseColor, material.specularColor, material.specularF90,\n pos, modelMatrix, viewMatrix, projectionMatrix, material.ior * (1.0 + 2.0 * chromaticAberration * (i + randomCoords) / float(${e})), material.thickness + thickness_smear * (i + randomCoords) / float(${e}),\n material.attenuationColor, material.attenuationDistance\n ).b;\n transmission.r += transmissionR;\n transmission.g += transmissionG;\n transmission.b += transmissionB;\n }\n transmission /= ${e}.0;\n totalDiffuse = mix( totalDiffuse, transmission.rgb, material.transmission );\n`)},Object.keys(this.uniforms).forEach((e=>Object.defineProperty(this,e,{get:()=>this.uniforms[e].value,set:t=>this.uniforms[e].value=t})))}}const s=r({depth:null,opacity:1,attenuation:2.5,anglePower:12,spotPosition:new e.Vector3(0,0,0),lightColor:new e.Color("white"),cameraNear:0,cameraFar:1,resolution:new e.Vector2(0,0),transparent:!0,depthWrite:!1},"\n varying vec3 vNormal;\n varying vec3 vWorldPosition;\n varying float vViewZ;\n varying float vIntensity;\n uniform vec3 spotPosition;\n uniform float attenuation;\n\n void main() {\n // compute intensity\n vNormal = normalize( normalMatrix * normal );\n vec4 worldPosition\t= modelMatrix * vec4( position, 1.0 );\n vWorldPosition = worldPosition.xyz;\n vec4 viewPosition = viewMatrix * worldPosition;\n vViewZ = viewPosition.z;\n float intensity\t= distance(worldPosition.xyz, spotPosition) / attenuation;\n intensity\t= 1.0 - clamp(intensity, 0.0, 1.0);\n vIntensity = intensity;\n // set gl_Position\n gl_Position\t= projectionMatrix * viewPosition;\n\n }","\n #include <packing>\n\n varying vec3 vNormal;\n varying vec3 vWorldPosition;\n uniform vec3 lightColor;\n uniform vec3 spotPosition;\n uniform float attenuation;\n uniform float anglePower;\n uniform sampler2D depth;\n uniform vec2 resolution;\n uniform float cameraNear;\n uniform float cameraFar;\n varying float vViewZ;\n varying float vIntensity;\n uniform float opacity;\n\n float readDepth( sampler2D depthSampler, vec2 coord ) {\n float fragCoordZ = texture2D( depthSampler, coord ).x;\n float viewZ = perspectiveDepthToViewZ(fragCoordZ, cameraNear, cameraFar);\n return viewZ;\n }\n\n void main() {\n float d = 1.0;\n bool isSoft = resolution[0] > 0.0 && resolution[1] > 0.0;\n if (isSoft) {\n vec2 sUv = gl_FragCoord.xy / resolution;\n d = readDepth(depth, sUv);\n }\n float intensity = vIntensity;\n vec3 normal\t= vec3(vNormal.x, vNormal.y, abs(vNormal.z));\n float angleIntensity\t= pow( dot(normal, vec3(0.0, 0.0, 1.0)), anglePower );\n intensity\t*= angleIntensity;\n // fades when z is close to sampled depth, meaning the cone is intersecting existing geometry\n if (isSoft) {\n intensity\t*= smoothstep(0., 1., vViewZ - d);\n }\n gl_FragColor = vec4(lightColor, intensity * opacity);\n\n #include <tonemapping_fragment>\n #include <encodings_fragment>\n }");class l extends e.ShaderMaterial{constructor(t=new e.Vector2){super({uniforms:{inputBuffer:new e.Uniform(null),depthBuffer:new e.Uniform(null),resolution:new e.Uniform(new e.Vector2),texelSize:new e.Uniform(new e.Vector2),halfTexelSize:new e.Uniform(new e.Vector2),kernel:new e.Uniform(0),scale:new e.Uniform(1),cameraNear:new e.Uniform(0),cameraFar:new e.Uniform(1),minDepthThreshold:new e.Uniform(0),maxDepthThreshold:new e.Uniform(1),depthScale:new e.Uniform(0),depthToBlurRatioBias:new e.Uniform(.25)},fragmentShader:"#include <common>\n #include <dithering_pars_fragment> \n uniform sampler2D inputBuffer;\n uniform sampler2D depthBuffer;\n uniform float cameraNear;\n uniform float cameraFar;\n uniform float minDepthThreshold;\n uniform float maxDepthThreshold;\n uniform float depthScale;\n uniform float depthToBlurRatioBias;\n varying vec2 vUv;\n varying vec2 vUv0;\n varying vec2 vUv1;\n varying vec2 vUv2;\n varying vec2 vUv3;\n\n void main() {\n float depthFactor = 0.0;\n \n #ifdef USE_DEPTH\n vec4 depth = texture2D(depthBuffer, vUv);\n depthFactor = smoothstep(minDepthThreshold, maxDepthThreshold, 1.0-(depth.r * depth.a));\n depthFactor *= depthScale;\n depthFactor = max(0.0, min(1.0, depthFactor + 0.25));\n #endif\n \n vec4 sum = texture2D(inputBuffer, mix(vUv0, vUv, depthFactor));\n sum += texture2D(inputBuffer, mix(vUv1, vUv, depthFactor));\n sum += texture2D(inputBuffer, mix(vUv2, vUv, depthFactor));\n sum += texture2D(inputBuffer, mix(vUv3, vUv, depthFactor));\n gl_FragColor = sum * 0.25 ;\n\n #include <dithering_fragment>\n #include <tonemapping_fragment>\n #include <encodings_fragment>\n }",vertexShader:"uniform vec2 texelSize;\n uniform vec2 halfTexelSize;\n uniform float kernel;\n uniform float scale;\n varying vec2 vUv;\n varying vec2 vUv0;\n varying vec2 vUv1;\n varying vec2 vUv2;\n varying vec2 vUv3;\n\n void main() {\n vec2 uv = position.xy * 0.5 + 0.5;\n vUv = uv;\n\n vec2 dUv = (texelSize * vec2(kernel) + halfTexelSize) * scale;\n vUv0 = vec2(uv.x - dUv.x, uv.y + dUv.y);\n vUv1 = vec2(uv.x + dUv.x, uv.y + dUv.y);\n vUv2 = vec2(uv.x + dUv.x, uv.y - dUv.y);\n vUv3 = vec2(uv.x - dUv.x, uv.y - dUv.y);\n\n gl_Position = vec4(position.xy, 1.0, 1.0);\n }",blending:e.NoBlending,depthWrite:!1,depthTest:!1}),this.toneMapped=!1,this.setTexelSize(t.x,t.y),this.kernel=new Float32Array([0,1,2,2,3])}setTexelSize(e,t){this.uniforms.texelSize.value.set(e,t),this.uniforms.halfTexelSize.value.set(e,t).multiplyScalar(.5)}setResolution(e){this.uniforms.resolution.value.copy(e)}}class u extends e.MeshStandardMaterial{constructor(e={}){super(),this._tDepth={value:null},this._distortionMap={value:null},this._tDiffuse={value:null},this._tDiffuseBlur={value:null},this._textureMatrix={value:null},this._hasBlur={value:!1},this._mirror={value:0},this._mixBlur={value:0},this._blurStrength={value:.5},this._minDepthThreshold={value:.9},this._maxDepthThreshold={value:1},this._depthScale={value:0},this._depthToBlurRatioBias={value:.25},this._distortion={value:1},this._mixContrast={value:1},this._tDepth={value:null},this._distortionMap={value:null},this._tDiffuse={value:null},this._tDiffuseBlur={value:null},this._textureMatrix={value:null},this._hasBlur={value:!1},this._mirror={value:0},this._mixBlur={value:0},this._blurStrength={value:.5},this._minDepthThreshold={value:.9},this._maxDepthThreshold={value:1},this._depthScale={value:0},this._depthToBlurRatioBias={value:.25},this._distortion={value:1},this._mixContrast={value:1},this.setValues(e)}onBeforeCompile(e){var t;null!=(t=e.defines)&&t.USE_UV||(e.defines.USE_UV=""),e.uniforms.hasBlur=this._hasBlur,e.uniforms.tDiffuse=this._tDiffuse,e.uniforms.tDepth=this._tDepth,e.uniforms.distortionMap=this._distortionMap,e.uniforms.tDiffuseBlur=this._tDiffuseBlur,e.uniforms.textureMatrix=this._textureMatrix,e.uniforms.mirror=this._mirror,e.uniforms.mixBlur=this._mixBlur,e.uniforms.mixStrength=this._blurStrength,e.uniforms.minDepthThreshold=this._minDepthThreshold,e.uniforms.maxDepthThreshold=this._maxDepthThreshold,e.uniforms.depthScale=this._depthScale,e.uniforms.depthToBlurRatioBias=this._depthToBlurRatioBias,e.uniforms.distortion=this._distortion,e.uniforms.mixContrast=this._mixContrast,e.vertexShader=`\n uniform mat4 textureMatrix;\n varying vec4 my_vUv;\n ${e.vertexShader}`,e.vertexShader=e.vertexShader.replace("#include <project_vertex>","#include <project_vertex>\n my_vUv = textureMatrix * vec4( position, 1.0 );\n gl_Position = projectionMatrix * modelViewMatrix * vec4( position, 1.0 );"),e.fragmentShader=`\n uniform sampler2D tDiffuse;\n uniform sampler2D tDiffuseBlur;\n uniform sampler2D tDepth;\n uniform sampler2D distortionMap;\n uniform float distortion;\n uniform float cameraNear;\n\t\t\t uniform float cameraFar;\n uniform bool hasBlur;\n uniform float mixBlur;\n uniform float mirror;\n uniform float mixStrength;\n uniform float minDepthThreshold;\n uniform float maxDepthThreshold;\n uniform float mixContrast;\n uniform float depthScale;\n uniform float depthToBlurRatioBias;\n varying vec4 my_vUv;\n ${e.fragmentShader}`,e.fragmentShader=e.fragmentShader.replace("#include <emissivemap_fragment>","#include <emissivemap_fragment>\n\n float distortionFactor = 0.0;\n #ifdef USE_DISTORTION\n distortionFactor = texture2D(distortionMap, vUv).r * distortion;\n #endif\n\n vec4 new_vUv = my_vUv;\n new_vUv.x += distortionFactor;\n new_vUv.y += distortionFactor;\n\n vec4 base = texture2DProj(tDiffuse, new_vUv);\n vec4 blur = texture2DProj(tDiffuseBlur, new_vUv);\n\n vec4 merge = base;\n\n #ifdef USE_NORMALMAP\n vec2 normal_uv = vec2(0.0);\n vec4 normalColor = texture2D(normalMap, vUv * normalScale);\n vec3 my_normal = normalize( vec3( normalColor.r * 2.0 - 1.0, normalColor.b, normalColor.g * 2.0 - 1.0 ) );\n vec3 coord = new_vUv.xyz / new_vUv.w;\n normal_uv = coord.xy + coord.z * my_normal.xz * 0.05;\n vec4 base_normal = texture2D(tDiffuse, normal_uv);\n vec4 blur_normal = texture2D(tDiffuseBlur, normal_uv);\n merge = base_normal;\n blur = blur_normal;\n #endif\n\n float depthFactor = 0.0001;\n float blurFactor = 0.0;\n\n #ifdef USE_DEPTH\n vec4 depth = texture2DProj(tDepth, new_vUv);\n depthFactor = smoothstep(minDepthThreshold, maxDepthThreshold, 1.0-(depth.r * depth.a));\n depthFactor *= depthScale;\n depthFactor = max(0.0001, min(1.0, depthFactor));\n\n #ifdef USE_BLUR\n blur = blur * min(1.0, depthFactor + depthToBlurRatioBias);\n merge = merge * min(1.0, depthFactor + 0.5);\n #else\n merge = merge * depthFactor;\n #endif\n\n #endif\n\n float reflectorRoughnessFactor = roughness;\n #ifdef USE_ROUGHNESSMAP\n vec4 reflectorTexelRoughness = texture2D( roughnessMap, vUv );\n reflectorRoughnessFactor *= reflectorTexelRoughness.g;\n #endif\n\n #ifdef USE_BLUR\n blurFactor = min(1.0, mixBlur * reflectorRoughnessFactor);\n merge = mix(merge, blur, blurFactor);\n #endif\n\n vec4 newMerge = vec4(0.0, 0.0, 0.0, 1.0);\n newMerge.r = (merge.r - 0.5) * mixContrast + 0.5;\n newMerge.g = (merge.g - 0.5) * mixContrast + 0.5;\n newMerge.b = (merge.b - 0.5) * mixContrast + 0.5;\n\n diffuseColor.rgb = diffuseColor.rgb * ((1.0 - min(1.0, mirror)) + newMerge.rgb * mixStrength);\n ")}get tDiffuse(){return this._tDiffuse.value}set tDiffuse(e){this._tDiffuse.value=e}get tDepth(){return this._tDepth.value}set tDepth(e){this._tDepth.value=e}get distortionMap(){return this._distortionMap.value}set distortionMap(e){this._distortionMap.value=e}get tDiffuseBlur(){return this._tDiffuseBlur.value}set tDiffuseBlur(e){this._tDiffuseBlur.value=e}get textureMatrix(){return this._textureMatrix.value}set textureMatrix(e){this._textureMatrix.value=e}get hasBlur(){return this._hasBlur.value}set hasBlur(e){this._hasBlur.value=e}get mirror(){return this._mirror.value}set mirror(e){this._mirror.value=e}get mixBlur(){return this._mixBlur.value}set mixBlur(e){this._mixBlur.value=e}get mixStrength(){return this._blurStrength.value}set mixStrength(e){this._blurStrength.value=e}get minDepthThreshold(){return this._minDepthThreshold.value}set minDepthThreshold(e){this._minDepthThreshold.value=e}get maxDepthThreshold(){return this._maxDepthThreshold.value}set maxDepthThreshold(e){this._maxDepthThreshold.value=e}get depthScale(){return this._depthScale.value}set depthScale(e){this._depthScale.value=e}get depthToBlurRatioBias(){return this._depthToBlurRatioBias.value}set depthToBlurRatioBias(e){this._depthToBlurRatioBias.value=e}get distortion(){return this._distortion.value}set distortion(e){this._distortion.value=e}get mixContrast(){return this._mixContrast.value}set mixContrast(e){this._mixContrast.value=e}}exports.BlurPass=class{constructor({gl:t,resolution:n,width:r=500,height:i=500,minDepthThreshold:o=0,maxDepthThreshold:a=1,depthScale:s=0,depthToBlurRatioBias:u=.25}){this.renderToScreen=!1,this.renderTargetA=new e.WebGLRenderTarget(n,n,{minFilter:e.LinearFilter,magFilter:e.LinearFilter,stencilBuffer:!1,depthBuffer:!1,encoding:t.outputEncoding}),this.renderTargetB=this.renderTargetA.clone(),this.convolutionMaterial=new l,this.convolutionMaterial.setTexelSize(1/r,1/i),this.convolutionMaterial.setResolution(new e.Vector2(r,i)),this.scene=new e.Scene,this.camera=new e.Camera,this.convolutionMaterial.uniforms.minDepthThreshold.value=o,this.convolutionMaterial.uniforms.maxDepthThreshold.value=a,this.convolutionMaterial.uniforms.depthScale.value=s,this.convolutionMaterial.uniforms.depthToBlurRatioBias.value=u,this.convolutionMaterial.defines.USE_DEPTH=s>0;const h=new Float32Array([-1,-1,0,3,-1,0,-1,3,0]),c=new Float32Array([0,0,2,0,0,2]),m=new e.BufferGeometry;m.setAttribute("position",new e.BufferAttribute(h,3)),m.setAttribute("uv",new e.BufferAttribute(c,2)),this.screen=new e.Mesh(m,this.convolutionMaterial),this.screen.frustumCulled=!1,this.scene.add(this.screen)}render(e,t,n){const r=this.scene,i=this.camera,o=this.renderTargetA,a=this.renderTargetB,s=this.convolutionMaterial,l=s.uniforms;l.depthBuffer.value=t.depthTexture;const u=s.kernel;let h,c,m,f=t;for(c=0,m=u.length-1;c<m;++c)h=0==(1&c)?o:a,l.kernel.value=u[c],l.inputBuffer.value=f.texture,e.setRenderTarget(h),e.render(r,i),f=h;l.kernel.value=u[c],l.inputBuffer.value=f.texture,e.setRenderTarget(this.renderToScreen?null:n),e.render(r,i)}},exports.ConvolutionMaterial=l,exports.MeshDiscardMaterial=i,exports.MeshReflectorMaterial=u,exports.MeshTransmissionMaterial=a,exports.ProgressiveLightMap=class{constructor(e,t,r=1024){this.renderer=e,this.res=r,this.scene=t,this.buffer1Active=!1,this.lights=[],this.meshes=[],this.object=null,this.clearColor=new n.Color,this.clearAlpha=0;const o=/(Android|iPad|iPhone|iPod)/g.test(navigator.userAgent)?n.HalfFloatType:n.FloatType;this.progressiveLightMap1=new n.WebGLRenderTarget(this.res,this.res,{type:o}),this.progressiveLightMap2=new n.WebGLRenderTarget(this.res,this.res,{type:o}),this.discardMat=new i,this.targetMat=new n.MeshLambertMaterial({fog:!1}),this.previousShadowMap={value:this.progressiveLightMap1.texture},this.averagingWindow={value:100},this.targetMat.onBeforeCompile=e=>{e.vertexShader="varying vec2 vUv;\n"+e.vertexShader.slice(0,-1)+"vUv = uv; gl_Position = vec4((uv - 0.5) * 2.0, 1.0, 1.0); }";const t=e.fragmentShader.indexOf("void main() {");e.fragmentShader="varying vec2 vUv;\n"+e.fragmentShader.slice(0,t)+"uniform sampler2D previousShadowMap;\n\tuniform float averagingWindow;\n"+e.fragmentShader.slice(t-1,-1)+"\nvec3 texelOld = texture2D(previousShadowMap, vUv).rgb;\n gl_FragColor.rgb = mix(texelOld, gl_FragColor.rgb, 1.0/ averagingWindow);\n }",e.uniforms.previousShadowMap=this.previousShadowMap,e.uniforms.averagingWindow=this.averagingWindow}}clear(){this.renderer.getClearColor(this.clearColor),this.clearAlpha=this.renderer.getClearAlpha(),this.renderer.setClearColor("black",1),this.renderer.setRenderTarget(this.progressiveLightMap1),this.renderer.clear(),this.renderer.setRenderTarget(this.progressiveLightMap2),this.renderer.clear(),this.renderer.setRenderTarget(null),this.renderer.setClearColor(this.clearColor,this.clearAlpha),this.lights=[],this.meshes=[],this.scene.traverse((e=>{!function(e){return!!e.geometry}(e)?function(e){return e.isLight}(e)&&this.lights.push({object:e,intensity:e.intensity}):this.meshes.push({object:e,material:e.material})}))}prepare(){this.lights.forEach((e=>e.object.intensity=0)),this.meshes.forEach((e=>e.object.material=this.discardMat))}finish(){this.lights.forEach((e=>e.object.intensity=e.intensity)),this.meshes.forEach((e=>e.object.material=e.material))}configure(e){this.object=e}update(e,t=100){if(!this.object)return;this.averagingWindow.value=t,this.object.material=this.targetMat;const n=this.buffer1Active?this.progressiveLightMap1:this.progressiveLightMap2,r=this.buffer1Active?this.progressiveLightMap2:this.progressiveLightMap1,i=this.scene.background;this.scene.background=null,this.renderer.setRenderTarget(n),this.previousShadowMap.value=r.texture,this.buffer1Active=!this.buffer1Active,this.renderer.render(this.scene,e),this.renderer.setRenderTarget(null),this.scene.background=i}},exports.SoftShadowMaterial=o,exports.SpotLightMaterial=s,exports.pcss=({focus:e=0,size:t=25,samples:r=10}={})=>{const i=n.ShaderChunk.shadowmap_pars_fragment;return n.ShaderChunk.shadowmap_pars_fragment=n.ShaderChunk.shadowmap_pars_fragment.replace("#ifdef USE_SHADOWMAP",`#ifdef USE_SHADOWMAP\n\n #define PENUMBRA_FILTER_SIZE float(${t})\n #define RGB_NOISE_FUNCTION(uv) (randRGB(uv))\n vec3 randRGB(vec2 uv) {\n return vec3(\n fract(sin(dot(uv, vec2(12.75613, 38.12123))) * 13234.76575),\n fract(sin(dot(uv, vec2(19.45531, 58.46547))) * 43678.23431),\n fract(sin(dot(uv, vec2(23.67817, 78.23121))) * 93567.23423)\n );\n }\n \n vec3 lowPassRandRGB(vec2 uv) {\n // 3x3 convolution (average)\n // can be implemented as separable with an extra buffer for a total of 6 samples instead of 9\n vec3 result = vec3(0);\n result += RGB_NOISE_FUNCTION(uv + vec2(-1.0, -1.0));\n result += RGB_NOISE_FUNCTION(uv + vec2(-1.0, 0.0));\n result += RGB_NOISE_FUNCTION(uv + vec2(-1.0, +1.0));\n result += RGB_NOISE_FUNCTION(uv + vec2( 0.0, -1.0));\n result += RGB_NOISE_FUNCTION(uv + vec2( 0.0, 0.0));\n result += RGB_NOISE_FUNCTION(uv + vec2( 0.0, +1.0));\n result += RGB_NOISE_FUNCTION(uv + vec2(+1.0, -1.0));\n result += RGB_NOISE_FUNCTION(uv + vec2(+1.0, 0.0));\n result += RGB_NOISE_FUNCTION(uv + vec2(+1.0, +1.0));\n result *= 0.111111111; // 1.0 / 9.0\n return result;\n }\n vec3 highPassRandRGB(vec2 uv) {\n // by subtracting the low-pass signal from the original signal, we're being left with the high-pass signal\n // hp(x) = x - lp(x)\n return RGB_NOISE_FUNCTION(uv) - lowPassRandRGB(uv) + 0.5;\n }\n \n \n vec2 vogelDiskSample(int sampleIndex, int sampleCount, float angle) {\n const float goldenAngle = 2.399963f; // radians\n float r = sqrt(float(sampleIndex) + 0.5f) / sqrt(float(sampleCount));\n float theta = float(sampleIndex) * goldenAngle + angle;\n float sine = sin(theta);\n float cosine = cos(theta);\n return vec2(cosine, sine) * r;\n }\n float penumbraSize( const in float zReceiver, const in float zBlocker ) { // Parallel plane estimation\n return (zReceiver - zBlocker) / zBlocker;\n }\n float findBlocker(sampler2D shadowMap, vec2 uv, float compare, float angle) {\n float texelSize = 1.0 / float(textureSize(shadowMap, 0).x);\n float blockerDepthSum = float(${e});\n float blockers = 0.0;\n \n int j = 0;\n vec2 offset = vec2(0.);\n float depth = 0.;\n \n #pragma unroll_loop_start\n for(int i = 0; i < ${r}; i ++) {\n offset = (vogelDiskSample(j, ${r}, angle) * texelSize) * 2.0 * PENUMBRA_FILTER_SIZE;\n depth = unpackRGBAToDepth( texture2D( shadowMap, uv + offset));\n if (depth < compare) {\n blockerDepthSum += depth;\n blockers++;\n }\n j++;\n }\n #pragma unroll_loop_end\n \n if (blockers > 0.0) {\n return blockerDepthSum / blockers;\n }\n return -1.0;\n }\n \n float vogelFilter(sampler2D shadowMap, vec2 uv, float zReceiver, float filterRadius, float angle) {\n float texelSize = 1.0 / float(textureSize(shadowMap, 0).x);\n float shadow = 0.0f;\n int j = 0;\n vec2 vogelSample = vec2(0.0);\n vec2 offset = vec2(0.0);\n #pragma unroll_loop_start\n for (int i = 0; i < ${r}; i++) {\n vogelSample = vogelDiskSample(j, ${r}, angle) * texelSize;\n offset = vogelSample * (1.0 + filterRadius * float(${t}));\n shadow += step( zReceiver, unpackRGBAToDepth( texture2D( shadowMap, uv + offset ) ) );\n j++;\n }\n #pragma unroll_loop_end\n return shadow * 1.0 / ${r}.0;\n }\n \n float PCSS (sampler2D shadowMap, vec4 coords) {\n vec2 uv = coords.xy;\n float zReceiver = coords.z; // Assumed to be eye-space z in this code\n float angle = highPassRandRGB(gl_FragCoord.xy).r * PI2;\n float avgBlockerDepth = findBlocker(shadowMap, uv, zReceiver, angle);\n if (avgBlockerDepth == -1.0) {\n return 1.0;\n }\n float penumbraRatio = penumbraSize(zReceiver, avgBlockerDepth);\n return vogelFilter(shadowMap, uv, zReceiver, 1.25 * penumbraRatio, angle);\n }`).replace("#if defined( SHADOWMAP_TYPE_PCF )","\nreturn PCSS(shadowMap, shadowCoord);\n#if defined( SHADOWMAP_TYPE_PCF )"),(e,t,r)=>{n.ShaderChunk.shadowmap_pars_fragment=i,function(e,t,n){t.traverse((t=>{t.material&&(e.properties.remove(t.material),t.material.dispose())})),e.info.programs.length=0,e.compile(t,n)}(e,t,r)}},exports.shaderMaterial=r; |
export { pcss } from './core/pcss.js'; | ||
export { shaderMaterial } from './core/shaderMaterial.js'; | ||
export { ProgressiveLightMap, SoftShadowMaterial } from './core/AccumulativeShadows.js'; | ||
export { MeshDiscardMaterial } from './materials/MeshDiscardMaterial.js'; | ||
@@ -4,0 +5,0 @@ export { MeshTransmissionMaterial } from './materials/MeshTransmissionMaterial.js'; |
{ | ||
"name": "@pmndrs/vanilla", | ||
"version": "1.5.2", | ||
"version": "1.6.0", | ||
"private": false, | ||
@@ -5,0 +5,0 @@ "publishConfig": { |
@@ -10,2 +10,6 @@ ![logo](logo.jpg) | ||
Storybook demos [![storybook](https://img.shields.io/badge/-storybook-%23ff69b4)](https://pmndrs.github.io/drei-vanilla/) | ||
Storybook code available under [.storybook/stories](https://github.com/pmndrs/drei-vanilla/tree/main/.storybook/stories) | ||
```bash | ||
@@ -45,2 +49,6 @@ npm install @pmndrs/vanilla | ||
</ul> | ||
<li><a href="#staging">Staging</a></li> | ||
<ul> | ||
<li><a href="#accumulativeshadows">AccumulativeShadows</a></li> | ||
</ul> | ||
</ul> | ||
@@ -219,1 +227,13 @@ </td> | ||
Easily add reflections and/or blur to any mesh. It takes surface roughness into account for a more realistic effect. This material extends from [THREE.MeshStandardMaterial](https://threejs.org/docs/#api/en/materials/MeshStandardMaterial) and accepts all its props. | ||
### AccumulativeShadows | ||
[![](https://img.shields.io/badge/-storybook-%23ff69b4)](https://pmndrs.github.io/drei-vanilla/?path=/story/shaders-accumulativeshadows--acc-shadow-story) | ||
<p> | ||
<a href="https://codesandbox.io/s/hxcc1x"><img width="20%" src="https://codesandbox.io/api/v1/sandboxes/hxcc1x/screenshot.png" alt="Demo"/></a> | ||
</p> | ||
A planar, Y-up oriented shadow-catcher that can accumulate into soft shadows and has zero performance impact after all frames have accumulated. It can be temporal, it will accumulate over time, or instantaneous, which might be expensive depending on how many frames you render. | ||
Refer to storybook code on how to use & what each variable does |
Minified code
QualityThis package contains minified code. This may be harmless in some cases where minified code is included in packaged libraries, however packages on npm should not minify code.
Found 2 instances in 1 package
136363
39
1596
237
11