main repo

This commit is contained in:
Basilosaurusrex
2025-11-24 18:09:40 +01:00
parent b636ee5e70
commit f027651f9b
34146 changed files with 4436636 additions and 0 deletions

View File

@@ -0,0 +1,242 @@
import { RenderTarget, Vector2, QuadMesh, NodeMaterial, RendererUtils, TempNode, NodeUpdateType } from 'three/webgpu';
import { nodeObject, Fn, float, uv, texture, passTexture, uniform, sign, max, convertToTexture } from 'three/tsl';
const _size = /*@__PURE__*/ new Vector2();
const _quadMeshComp = /*@__PURE__*/ new QuadMesh();
let _rendererState;
/**
* Post processing node for creating an after image effect.
*
* @augments TempNode
* @three_import import { afterImage } from 'three/addons/tsl/display/AfterImageNode.js';
*/
class AfterImageNode extends TempNode {
static get type() {
return 'AfterImageNode';
}
/**
* Constructs a new after image node.
*
* @param {TextureNode} textureNode - The texture node that represents the input of the effect.
* @param {number} [damp=0.96] - The damping intensity. A higher value means a stronger after image effect.
*/
constructor( textureNode, damp = 0.96 ) {
super( 'vec4' );
/**
* The texture node that represents the input of the effect.
*
* @type {TextureNode}
*/
this.textureNode = textureNode;
/**
* The texture represents the pervious frame.
*
* @type {TextureNode}
*/
this.textureNodeOld = texture( null );
/**
* How quickly the after-image fades. A higher value means the after-image
* persists longer, while a lower value means it fades faster. Should be in
* the range `[0, 1]`.
*
* @type {UniformNode<float>}
*/
this.damp = uniform( damp );
/**
* The render target used for compositing the effect.
*
* @private
* @type {RenderTarget}
*/
this._compRT = new RenderTarget( 1, 1, { depthBuffer: false } );
this._compRT.texture.name = 'AfterImageNode.comp';
/**
* The render target that represents the previous frame.
*
* @private
* @type {RenderTarget}
*/
this._oldRT = new RenderTarget( 1, 1, { depthBuffer: false } );
this._oldRT.texture.name = 'AfterImageNode.old';
/**
* The result of the effect is represented as a separate texture node.
*
* @private
* @type {PassTextureNode}
*/
this._textureNode = passTexture( this, this._compRT.texture );
/**
* The `updateBeforeType` is set to `NodeUpdateType.FRAME` since the node renders
* its effect once per frame in `updateBefore()`.
*
* @type {string}
* @default 'frame'
*/
this.updateBeforeType = NodeUpdateType.FRAME;
}
/**
* Returns the result of the effect as a texture node.
*
* @return {PassTextureNode} A texture node that represents the result of the effect.
*/
getTextureNode() {
return this._textureNode;
}
/**
* Sets the size of the effect.
*
* @param {number} width - The width of the effect.
* @param {number} height - The height of the effect.
*/
setSize( width, height ) {
this._compRT.setSize( width, height );
this._oldRT.setSize( width, height );
}
/**
* This method is used to render the effect once per frame.
*
* @param {NodeFrame} frame - The current node frame.
*/
updateBefore( frame ) {
const { renderer } = frame;
_rendererState = RendererUtils.resetRendererState( renderer, _rendererState );
//
const textureNode = this.textureNode;
const map = textureNode.value;
const textureType = map.type;
this._compRT.texture.type = textureType;
this._oldRT.texture.type = textureType;
renderer.getDrawingBufferSize( _size );
this.setSize( _size.x, _size.y );
const currentTexture = textureNode.value;
this.textureNodeOld.value = this._oldRT.texture;
// comp
_quadMeshComp.material = this._materialComposed;
renderer.setRenderTarget( this._compRT );
_quadMeshComp.render( renderer );
// Swap the textures
const temp = this._oldRT;
this._oldRT = this._compRT;
this._compRT = temp;
//
textureNode.value = currentTexture;
RendererUtils.restoreRendererState( renderer, _rendererState );
}
/**
* This method is used to setup the effect's TSL code.
*
* @param {NodeBuilder} builder - The current node builder.
* @return {PassTextureNode}
*/
setup( builder ) {
const textureNode = this.textureNode;
const textureNodeOld = this.textureNodeOld;
//
textureNodeOld.uvNode = textureNode.uvNode || uv();
const afterImg = Fn( () => {
const texelOld = textureNodeOld.sample().toVar();
const texelNew = textureNode.sample().toVar();
const threshold = float( 0.1 ).toConst();
// m acts as a mask. It's 1 if the previous pixel was "bright enough" (above the threshold) and 0 if it wasn't.
const m = max( sign( texelOld.sub( threshold ) ), 0.0 );
// This is where the after-image fades:
//
// - If m is 0, texelOld is multiplied by 0, effectively clearing the after-image for that pixel.
// - If m is 1, texelOld is multiplied by "damp". Since "damp" is between 0 and 1, this reduces the color value of
// texelOld, making it darker and causing it to fade.
texelOld.mulAssign( this.damp.mul( m ) );
return max( texelNew, texelOld );
} );
//
const materialComposed = this._materialComposed || ( this._materialComposed = new NodeMaterial() );
materialComposed.name = 'AfterImage';
materialComposed.fragmentNode = afterImg();
//
const properties = builder.getNodeProperties( this );
properties.textureNode = textureNode;
//
return this._textureNode;
}
/**
* Frees internal resources. This method should be called
* when the effect is no longer required.
*/
dispose() {
this._compRT.dispose();
this._oldRT.dispose();
}
}
/**
* TSL function for creating an after image node for post processing.
*
* @tsl
* @function
* @param {Node<vec4>} node - The node that represents the input of the effect.
* @param {number} [damp=0.96] - The damping intensity. A higher value means a stronger after image effect.
* @returns {AfterImageNode}
*/
export const afterImage = ( node, damp ) => nodeObject( new AfterImageNode( convertToTexture( node ), damp ) );
export default AfterImageNode;

View File

@@ -0,0 +1,107 @@
import { Matrix3, NodeMaterial } from 'three/webgpu';
import { clamp, nodeObject, Fn, vec4, uv, uniform, max } from 'three/tsl';
import StereoCompositePassNode from './StereoCompositePassNode.js';
/**
* A render pass node that creates an anaglyph effect.
*
* @augments StereoCompositePassNode
* @three_import import { anaglyphPass } from 'three/addons/tsl/display/AnaglyphPassNode.js';
*/
class AnaglyphPassNode extends StereoCompositePassNode {
static get type() {
return 'AnaglyphPassNode';
}
/**
* Constructs a new anaglyph pass node.
*
* @param {Scene} scene - The scene to render.
* @param {Camera} camera - The camera to render the scene with.
*/
constructor( scene, camera ) {
super( scene, camera );
/**
* This flag can be used for type testing.
*
* @type {boolean}
* @readonly
* @default true
*/
this.isAnaglyphPassNode = true;
// Dubois matrices from https://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.7.6968&rep=rep1&type=pdf#page=4
/**
* Color matrix node for the left eye.
*
* @type {UniformNode<mat3>}
*/
this._colorMatrixLeft = uniform( new Matrix3().fromArray( [
0.456100, - 0.0400822, - 0.0152161,
0.500484, - 0.0378246, - 0.0205971,
0.176381, - 0.0157589, - 0.00546856
] ) );
/**
* Color matrix node for the right eye.
*
* @type {UniformNode<mat3>}
*/
this._colorMatrixRight = uniform( new Matrix3().fromArray( [
- 0.0434706, 0.378476, - 0.0721527,
- 0.0879388, 0.73364, - 0.112961,
- 0.00155529, - 0.0184503, 1.2264
] ) );
}
/**
* This method is used to setup the effect's TSL code.
*
* @param {NodeBuilder} builder - The current node builder.
* @return {PassTextureNode}
*/
setup( builder ) {
const uvNode = uv();
const anaglyph = Fn( () => {
const colorL = this._mapLeft.sample( uvNode );
const colorR = this._mapRight.sample( uvNode );
const color = clamp( this._colorMatrixLeft.mul( colorL.rgb ).add( this._colorMatrixRight.mul( colorR.rgb ) ) );
return vec4( color.rgb, max( colorL.a, colorR.a ) );
} );
const material = this._material || ( this._material = new NodeMaterial() );
material.fragmentNode = anaglyph().context( builder.getSharedContext() );
material.name = 'Anaglyph';
material.needsUpdate = true;
return super.setup( builder );
}
}
export default AnaglyphPassNode;
/**
* TSL function for creating an anaglyph pass node.
*
* @tsl
* @function
* @param {Scene} scene - The scene to render.
* @param {Camera} camera - The camera to render the scene with.
* @returns {AnaglyphPassNode}
*/
export const anaglyphPass = ( scene, camera ) => nodeObject( new AnaglyphPassNode( scene, camera ) );

View File

@@ -0,0 +1,281 @@
import { RenderTarget, Vector2, TempNode, QuadMesh, NodeMaterial, RendererUtils } from 'three/webgpu';
import { nodeObject, Fn, float, NodeUpdateType, uv, passTexture, uniform, convertToTexture, vec2, vec3, Loop, mix, luminance } from 'three/tsl';
const _quadMesh = /*@__PURE__*/ new QuadMesh();
let _rendererState;
/**
* Post processing node for adding an anamorphic flare effect.
*
* @augments TempNode
* @three_import import { anamorphic } from 'three/addons/tsl/display/AnamorphicNode.js';
*/
class AnamorphicNode extends TempNode {
static get type() {
return 'AnamorphicNode';
}
/**
* Constructs a new anamorphic node.
*
* @param {TextureNode} textureNode - The texture node that represents the input of the effect.
* @param {Node<float>} thresholdNode - The threshold is one option to control the intensity and size of the effect.
* @param {Node<float>} scaleNode - Defines the vertical scale of the flares.
* @param {number} samples - More samples result in larger flares and a more expensive runtime behavior.
*/
constructor( textureNode, thresholdNode, scaleNode, samples ) {
super( 'vec4' );
/**
* The texture node that represents the input of the effect.
*
* @type {TextureNode}
*/
this.textureNode = textureNode;
/**
* The threshold is one option to control the intensity and size of the effect.
*
* @type {Node<float>}
*/
this.thresholdNode = thresholdNode;
/**
* Defines the vertical scale of the flares.
*
* @type {Node<float>}
*/
this.scaleNode = scaleNode;
/**
* The color of the flares.
*
* @type {Node<vec3>}
*/
this.colorNode = vec3( 0.1, 0.0, 1.0 );
/**
* More samples result in larger flares and a more expensive runtime behavior.
*
* @type {Node<float>}
*/
this.samples = samples;
/**
* The resolution scale.
*
* @type {float}
*/
this.resolutionScale = 1;
/**
* The internal render target of the effect.
*
* @private
* @type {RenderTarget}
*/
this._renderTarget = new RenderTarget( 1, 1, { depthBuffer: false } );
this._renderTarget.texture.name = 'anamorphic';
/**
* A uniform node holding the inverse resolution value.
*
* @private
* @type {UniformNode<vec2>}
*/
this._invSize = uniform( new Vector2() );
/**
* The result of the effect is represented as a separate texture node.
*
* @private
* @type {PassTextureNode}
*/
this._textureNode = passTexture( this, this._renderTarget.texture );
/**
* The `updateBeforeType` is set to `NodeUpdateType.FRAME` since the node renders
* its effect once per frame in `updateBefore()`.
*
* @type {string}
* @default 'frame'
*/
this.updateBeforeType = NodeUpdateType.FRAME;
}
/**
* Returns the result of the effect as a texture node.
*
* @return {PassTextureNode} A texture node that represents the result of the effect.
*/
getTextureNode() {
return this._textureNode;
}
/**
* Sets the size of the effect.
*
* @param {number} width - The width of the effect.
* @param {number} height - The height of the effect.
*/
setSize( width, height ) {
this._invSize.value.set( 1 / width, 1 / height );
width = Math.max( Math.round( width * this.resolutionScale ), 1 );
height = Math.max( Math.round( height * this.resolutionScale ), 1 );
this._renderTarget.setSize( width, height );
}
/**
* This method is used to render the effect once per frame.
*
* @param {NodeFrame} frame - The current node frame.
*/
updateBefore( frame ) {
const { renderer } = frame;
_rendererState = RendererUtils.resetRendererState( renderer, _rendererState );
//
const textureNode = this.textureNode;
const map = textureNode.value;
this._renderTarget.texture.type = map.type;
const currentTexture = textureNode.value;
_quadMesh.material = this._material;
this.setSize( map.image.width, map.image.height );
// render
renderer.setRenderTarget( this._renderTarget );
_quadMesh.render( renderer );
// restore
textureNode.value = currentTexture;
RendererUtils.restoreRendererState( renderer, _rendererState );
}
/**
* This method is used to setup the effect's TSL code.
*
* @param {NodeBuilder} builder - The current node builder.
* @return {PassTextureNode}
*/
setup( builder ) {
const textureNode = this.textureNode;
const uvNode = textureNode.uvNode || uv();
const sampleTexture = ( uv ) => textureNode.sample( uv );
const threshold = ( color, threshold ) => mix( vec3( 0.0 ), color, luminance( color ).sub( threshold ).max( 0 ) );
const anamorph = Fn( () => {
const samples = this.samples;
const halfSamples = Math.floor( samples / 2 );
const total = vec3( 0 ).toVar();
Loop( { start: - halfSamples, end: halfSamples }, ( { i } ) => {
const softness = float( i ).abs().div( halfSamples ).oneMinus();
const uv = vec2( uvNode.x.add( this._invSize.x.mul( i ).mul( this.scaleNode ) ), uvNode.y );
const color = sampleTexture( uv );
const pass = threshold( color, this.thresholdNode ).mul( softness );
total.addAssign( pass );
} );
return total.mul( this.colorNode );
} );
//
const material = this._material || ( this._material = new NodeMaterial() );
material.name = 'Anamorphic';
material.fragmentNode = anamorph();
//
const properties = builder.getNodeProperties( this );
properties.textureNode = textureNode;
//
return this._textureNode;
}
/**
* Frees internal resources. This method should be called
* when the effect is no longer required.
*/
dispose() {
this._renderTarget.dispose();
}
/**
* The resolution scale.
*
* @deprecated
* @type {Vector2}
* @default {(1,1)}
*/
get resolution() {
console.warn( 'THREE.AnamorphicNode: The "resolution" property has been renamed to "resolutionScale" and is now of type `number`.' ); // @deprecated r180
return new Vector2( this.resolutionScale, this.resolutionScale );
}
set resolution( value ) {
console.warn( 'THREE.AnamorphicNode: The "resolution" property has been renamed to "resolutionScale" and is now of type `number`.' ); // @deprecated r180
this.resolutionScale = value.x;
}
}
/**
* TSL function for creating an anamorphic flare effect.
*
* @tsl
* @function
* @param {TextureNode} node - The node that represents the input of the effect.
* @param {Node<float> | number} [threshold=0.9] - The threshold is one option to control the intensity and size of the effect.
* @param {Node<float> | number} [scale=3] - Defines the vertical scale of the flares.
* @param {number} [samples=32] - More samples result in larger flares and a more expensive runtime behavior.
* @returns {AnamorphicNode}
*/
export const anamorphic = ( node, threshold = .9, scale = 3, samples = 32 ) => nodeObject( new AnamorphicNode( convertToTexture( node ), nodeObject( threshold ), nodeObject( scale ), samples ) );
export default AnamorphicNode;

View File

@@ -0,0 +1,33 @@
import { float, Fn, vec3, vec4, min, max, mix, luminance } from 'three/tsl';
/**
* Applies a bleach bypass effect to the given color node.
*
* @tsl
* @function
* @param {Node<vec4>} color - The color node to apply the sepia for.
* @param {Node<float>} [opacity=1] - Influences how strong the effect is blended with the original color.
* @return {Node<vec4>} The updated color node.
*/
export const bleach = /*@__PURE__*/ Fn( ( [ color, opacity = 1 ] ) => {
const base = color;
const lum = luminance( base.rgb );
const blend = vec3( lum );
const L = min( 1.0, max( 0.0, float( 10.0 ).mul( lum.sub( 0.45 ) ) ) );
const result1 = blend.mul( base.rgb ).mul( 2.0 );
const result2 = float( 2.0 ).mul( blend.oneMinus() ).mul( base.rgb.oneMinus() ).oneMinus();
const newColor = mix( result1, result2, L );
const A2 = base.a.mul( opacity );
const mixRGB = A2.mul( newColor.rgb );
mixRGB.addAssign( base.rgb.mul( A2.oneMinus() ) );
return vec4( mixRGB, base.a );
} );

View File

@@ -0,0 +1,520 @@
import { HalfFloatType, RenderTarget, Vector2, Vector3, TempNode, QuadMesh, NodeMaterial, RendererUtils, NodeUpdateType } from 'three/webgpu';
import { nodeObject, Fn, float, uv, passTexture, uniform, Loop, texture, luminance, smoothstep, mix, vec4, uniformArray, add, int } from 'three/tsl';
const _quadMesh = /*@__PURE__*/ new QuadMesh();
const _size = /*@__PURE__*/ new Vector2();
const _BlurDirectionX = /*@__PURE__*/ new Vector2( 1.0, 0.0 );
const _BlurDirectionY = /*@__PURE__*/ new Vector2( 0.0, 1.0 );
let _rendererState;
/**
* Post processing node for creating a bloom effect.
* ```js
* const postProcessing = new THREE.PostProcessing( renderer );
*
* const scenePass = pass( scene, camera );
* const scenePassColor = scenePass.getTextureNode( 'output' );
*
* const bloomPass = bloom( scenePassColor );
*
* postProcessing.outputNode = scenePassColor.add( bloomPass );
* ```
* By default, the node affects the entire image. For a selective bloom,
* use the `emissive` material property to control which objects should
* contribute to bloom or not. This can be achieved via MRT.
* ```js
* const postProcessing = new THREE.PostProcessing( renderer );
*
* const scenePass = pass( scene, camera );
* scenePass.setMRT( mrt( {
* output,
* emissive
* } ) );
*
* const scenePassColor = scenePass.getTextureNode( 'output' );
* const emissivePass = scenePass.getTextureNode( 'emissive' );
*
* const bloomPass = bloom( emissivePass );
* postProcessing.outputNode = scenePassColor.add( bloomPass );
* ```
* @augments TempNode
* @three_import import { bloom } from 'three/addons/tsl/display/BloomNode.js';
*/
class BloomNode extends TempNode {
static get type() {
return 'BloomNode';
}
/**
* Constructs a new bloom node.
*
* @param {Node<vec4>} inputNode - The node that represents the input of the effect.
* @param {number} [strength=1] - The strength of the bloom.
* @param {number} [radius=0] - The radius of the bloom.
* @param {number} [threshold=0] - The luminance threshold limits which bright areas contribute to the bloom effect.
*/
constructor( inputNode, strength = 1, radius = 0, threshold = 0 ) {
super( 'vec4' );
/**
* The node that represents the input of the effect.
*
* @type {Node<vec4>}
*/
this.inputNode = inputNode;
/**
* The strength of the bloom.
*
* @type {UniformNode<float>}
*/
this.strength = uniform( strength );
/**
* The radius of the bloom.
*
* @type {UniformNode<float>}
*/
this.radius = uniform( radius );
/**
* The luminance threshold limits which bright areas contribute to the bloom effect.
*
* @type {UniformNode<float>}
*/
this.threshold = uniform( threshold );
/**
* Can be used to tweak the extracted luminance from the scene.
*
* @type {UniformNode<float>}
*/
this.smoothWidth = uniform( 0.01 );
/**
* An array that holds the render targets for the horizontal blur passes.
*
* @private
* @type {Array<RenderTarget>}
*/
this._renderTargetsHorizontal = [];
/**
* An array that holds the render targets for the vertical blur passes.
*
* @private
* @type {Array<RenderTarget>}
*/
this._renderTargetsVertical = [];
/**
* The number if blur mips.
*
* @private
* @type {number}
*/
this._nMips = 5;
/**
* The render target for the luminance pass.
*
* @private
* @type {RenderTarget}
*/
this._renderTargetBright = new RenderTarget( 1, 1, { depthBuffer: false, type: HalfFloatType } );
this._renderTargetBright.texture.name = 'UnrealBloomPass.bright';
this._renderTargetBright.texture.generateMipmaps = false;
//
for ( let i = 0; i < this._nMips; i ++ ) {
const renderTargetHorizontal = new RenderTarget( 1, 1, { depthBuffer: false, type: HalfFloatType } );
renderTargetHorizontal.texture.name = 'UnrealBloomPass.h' + i;
renderTargetHorizontal.texture.generateMipmaps = false;
this._renderTargetsHorizontal.push( renderTargetHorizontal );
const renderTargetVertical = new RenderTarget( 1, 1, { depthBuffer: false, type: HalfFloatType } );
renderTargetVertical.texture.name = 'UnrealBloomPass.v' + i;
renderTargetVertical.texture.generateMipmaps = false;
this._renderTargetsVertical.push( renderTargetVertical );
}
/**
* The material for the composite pass.
*
* @private
* @type {?NodeMaterial}
*/
this._compositeMaterial = null;
/**
* The material for the luminance pass.
*
* @private
* @type {?NodeMaterial}
*/
this._highPassFilterMaterial = null;
/**
* The materials for the blur pass.
*
* @private
* @type {Array<NodeMaterial>}
*/
this._separableBlurMaterials = [];
/**
* The result of the luminance pass as a texture node for further processing.
*
* @private
* @type {TextureNode}
*/
this._textureNodeBright = texture( this._renderTargetBright.texture );
/**
* The result of the first blur pass as a texture node for further processing.
*
* @private
* @type {TextureNode}
*/
this._textureNodeBlur0 = texture( this._renderTargetsVertical[ 0 ].texture );
/**
* The result of the second blur pass as a texture node for further processing.
*
* @private
* @type {TextureNode}
*/
this._textureNodeBlur1 = texture( this._renderTargetsVertical[ 1 ].texture );
/**
* The result of the third blur pass as a texture node for further processing.
*
* @private
* @type {TextureNode}
*/
this._textureNodeBlur2 = texture( this._renderTargetsVertical[ 2 ].texture );
/**
* The result of the fourth blur pass as a texture node for further processing.
*
* @private
* @type {TextureNode}
*/
this._textureNodeBlur3 = texture( this._renderTargetsVertical[ 3 ].texture );
/**
* The result of the fifth blur pass as a texture node for further processing.
*
* @private
* @type {TextureNode}
*/
this._textureNodeBlur4 = texture( this._renderTargetsVertical[ 4 ].texture );
/**
* The result of the effect is represented as a separate texture node.
*
* @private
* @type {PassTextureNode}
*/
this._textureOutput = passTexture( this, this._renderTargetsHorizontal[ 0 ].texture );
/**
* The `updateBeforeType` is set to `NodeUpdateType.FRAME` since the node renders
* its effect once per frame in `updateBefore()`.
*
* @type {string}
* @default 'frame'
*/
this.updateBeforeType = NodeUpdateType.FRAME;
}
/**
* Returns the result of the effect as a texture node.
*
* @return {PassTextureNode} A texture node that represents the result of the effect.
*/
getTextureNode() {
return this._textureOutput;
}
/**
* Sets the size of the effect.
*
* @param {number} width - The width of the effect.
* @param {number} height - The height of the effect.
*/
setSize( width, height ) {
let resx = Math.round( width / 2 );
let resy = Math.round( height / 2 );
this._renderTargetBright.setSize( resx, resy );
for ( let i = 0; i < this._nMips; i ++ ) {
this._renderTargetsHorizontal[ i ].setSize( resx, resy );
this._renderTargetsVertical[ i ].setSize( resx, resy );
this._separableBlurMaterials[ i ].invSize.value.set( 1 / resx, 1 / resy );
resx = Math.round( resx / 2 );
resy = Math.round( resy / 2 );
}
}
/**
* This method is used to render the effect once per frame.
*
* @param {NodeFrame} frame - The current node frame.
*/
updateBefore( frame ) {
const { renderer } = frame;
_rendererState = RendererUtils.resetRendererState( renderer, _rendererState );
//
const size = renderer.getDrawingBufferSize( _size );
this.setSize( size.width, size.height );
// 1. Extract bright areas
renderer.setRenderTarget( this._renderTargetBright );
_quadMesh.material = this._highPassFilterMaterial;
_quadMesh.render( renderer );
// 2. Blur all the mips progressively
let inputRenderTarget = this._renderTargetBright;
for ( let i = 0; i < this._nMips; i ++ ) {
_quadMesh.material = this._separableBlurMaterials[ i ];
this._separableBlurMaterials[ i ].colorTexture.value = inputRenderTarget.texture;
this._separableBlurMaterials[ i ].direction.value = _BlurDirectionX;
renderer.setRenderTarget( this._renderTargetsHorizontal[ i ] );
_quadMesh.render( renderer );
this._separableBlurMaterials[ i ].colorTexture.value = this._renderTargetsHorizontal[ i ].texture;
this._separableBlurMaterials[ i ].direction.value = _BlurDirectionY;
renderer.setRenderTarget( this._renderTargetsVertical[ i ] );
_quadMesh.render( renderer );
inputRenderTarget = this._renderTargetsVertical[ i ];
}
// 3. Composite all the mips
renderer.setRenderTarget( this._renderTargetsHorizontal[ 0 ] );
_quadMesh.material = this._compositeMaterial;
_quadMesh.render( renderer );
// restore
RendererUtils.restoreRendererState( renderer, _rendererState );
}
/**
* This method is used to setup the effect's TSL code.
*
* @param {NodeBuilder} builder - The current node builder.
* @return {PassTextureNode}
*/
setup( builder ) {
// luminosity high pass material
const luminosityHighPass = Fn( () => {
const texel = this.inputNode;
const v = luminance( texel.rgb );
const alpha = smoothstep( this.threshold, this.threshold.add( this.smoothWidth ), v );
return mix( vec4( 0 ), texel, alpha );
} );
this._highPassFilterMaterial = this._highPassFilterMaterial || new NodeMaterial();
this._highPassFilterMaterial.fragmentNode = luminosityHighPass().context( builder.getSharedContext() );
this._highPassFilterMaterial.name = 'Bloom_highPass';
this._highPassFilterMaterial.needsUpdate = true;
// gaussian blur materials
// These sizes have been changed to account for the altered coefficients-calculation to avoid blockiness,
// while retaining the same blur-strength. For details see https://github.com/mrdoob/three.js/pull/31528
const kernelSizeArray = [ 6, 10, 14, 18, 22 ];
for ( let i = 0; i < this._nMips; i ++ ) {
this._separableBlurMaterials.push( this._getSeparableBlurMaterial( builder, kernelSizeArray[ i ] ) );
}
// composite material
const bloomFactors = uniformArray( [ 1.0, 0.8, 0.6, 0.4, 0.2 ] );
const bloomTintColors = uniformArray( [ new Vector3( 1, 1, 1 ), new Vector3( 1, 1, 1 ), new Vector3( 1, 1, 1 ), new Vector3( 1, 1, 1 ), new Vector3( 1, 1, 1 ) ] );
const lerpBloomFactor = Fn( ( [ factor, radius ] ) => {
const mirrorFactor = float( 1.2 ).sub( factor );
return mix( factor, mirrorFactor, radius );
} ).setLayout( {
name: 'lerpBloomFactor',
type: 'float',
inputs: [
{ name: 'factor', type: 'float' },
{ name: 'radius', type: 'float' },
]
} );
const compositePass = Fn( () => {
const color0 = lerpBloomFactor( bloomFactors.element( 0 ), this.radius ).mul( vec4( bloomTintColors.element( 0 ), 1.0 ) ).mul( this._textureNodeBlur0 );
const color1 = lerpBloomFactor( bloomFactors.element( 1 ), this.radius ).mul( vec4( bloomTintColors.element( 1 ), 1.0 ) ).mul( this._textureNodeBlur1 );
const color2 = lerpBloomFactor( bloomFactors.element( 2 ), this.radius ).mul( vec4( bloomTintColors.element( 2 ), 1.0 ) ).mul( this._textureNodeBlur2 );
const color3 = lerpBloomFactor( bloomFactors.element( 3 ), this.radius ).mul( vec4( bloomTintColors.element( 3 ), 1.0 ) ).mul( this._textureNodeBlur3 );
const color4 = lerpBloomFactor( bloomFactors.element( 4 ), this.radius ).mul( vec4( bloomTintColors.element( 4 ), 1.0 ) ).mul( this._textureNodeBlur4 );
const sum = color0.add( color1 ).add( color2 ).add( color3 ).add( color4 );
return sum.mul( this.strength );
} );
this._compositeMaterial = this._compositeMaterial || new NodeMaterial();
this._compositeMaterial.fragmentNode = compositePass().context( builder.getSharedContext() );
this._compositeMaterial.name = 'Bloom_comp';
this._compositeMaterial.needsUpdate = true;
//
return this._textureOutput;
}
/**
* Frees internal resources. This method should be called
* when the effect is no longer required.
*/
dispose() {
for ( let i = 0; i < this._renderTargetsHorizontal.length; i ++ ) {
this._renderTargetsHorizontal[ i ].dispose();
}
for ( let i = 0; i < this._renderTargetsVertical.length; i ++ ) {
this._renderTargetsVertical[ i ].dispose();
}
this._renderTargetBright.dispose();
}
/**
* Create a separable blur material for the given kernel radius.
*
* @param {NodeBuilder} builder - The current node builder.
* @param {number} kernelRadius - The kernel radius.
* @return {NodeMaterial}
*/
_getSeparableBlurMaterial( builder, kernelRadius ) {
const coefficients = [];
const sigma = kernelRadius / 3;
for ( let i = 0; i < kernelRadius; i ++ ) {
coefficients.push( 0.39894 * Math.exp( - 0.5 * i * i / ( sigma * sigma ) ) / sigma );
}
//
const colorTexture = texture( null );
const gaussianCoefficients = uniformArray( coefficients );
const invSize = uniform( new Vector2() );
const direction = uniform( new Vector2( 0.5, 0.5 ) );
const uvNode = uv();
const sampleTexel = ( uv ) => colorTexture.sample( uv );
const separableBlurPass = Fn( () => {
const diffuseSum = sampleTexel( uvNode ).rgb.mul( gaussianCoefficients.element( 0 ) ).toVar();
Loop( { start: int( 1 ), end: int( kernelRadius ), type: 'int', condition: '<' }, ( { i } ) => {
const x = float( i );
const w = gaussianCoefficients.element( i );
const uvOffset = direction.mul( invSize ).mul( x );
const sample1 = sampleTexel( uvNode.add( uvOffset ) ).rgb;
const sample2 = sampleTexel( uvNode.sub( uvOffset ) ).rgb;
diffuseSum.addAssign( add( sample1, sample2 ).mul( w ) );
} );
return vec4( diffuseSum, 1.0 );
} );
const separableBlurMaterial = new NodeMaterial();
separableBlurMaterial.fragmentNode = separableBlurPass().context( builder.getSharedContext() );
separableBlurMaterial.name = 'Bloom_separable';
separableBlurMaterial.needsUpdate = true;
// uniforms
separableBlurMaterial.colorTexture = colorTexture;
separableBlurMaterial.direction = direction;
separableBlurMaterial.invSize = invSize;
return separableBlurMaterial;
}
}
/**
* TSL function for creating a bloom effect.
*
* @tsl
* @function
* @param {Node<vec4>} node - The node that represents the input of the effect.
* @param {number} [strength=1] - The strength of the bloom.
* @param {number} [radius=0] - The radius of the bloom.
* @param {number} [threshold=0] - The luminance threshold limits which bright areas contribute to the bloom effect.
* @returns {BloomNode}
*/
export const bloom = ( node, strength, radius, threshold ) => nodeObject( new BloomNode( nodeObject( node ), strength, radius, threshold ) );
export default BloomNode;

View File

@@ -0,0 +1,207 @@
import { Vector2, TempNode } from 'three/webgpu';
import {
nodeObject,
Fn,
uniform,
convertToTexture,
float,
vec4,
uv,
NodeUpdateType,
} from 'three/tsl';
/**
* Post processing node for applying chromatic aberration effect.
* This effect simulates the color fringing that occurs in real camera lenses
* by separating and offsetting the red, green, and blue channels.
*
* @augments TempNode
* @three_import import { chromaticAberration } from 'three/addons/tsl/display/ChromaticAberrationNode.js';
*/
class ChromaticAberrationNode extends TempNode {
static get type() {
return 'ChromaticAberrationNode';
}
/**
* Constructs a new chromatic aberration node.
*
* @param {TextureNode} textureNode - The texture node that represents the input of the effect.
* @param {Node} strengthNode - The strength of the chromatic aberration effect as a node.
* @param {Node} centerNode - The center point of the effect as a node.
* @param {Node} scaleNode - The scale factor for stepped scaling from center as a node.
*/
constructor( textureNode, strengthNode, centerNode, scaleNode ) {
super( 'vec4' );
/**
* The texture node that represents the input of the effect.
*
* @type {texture}
*/
this.textureNode = textureNode;
/**
* The `updateBeforeType` is set to `NodeUpdateType.FRAME` since the node updates
* its internal uniforms once per frame in `updateBefore()`.
*
* @type {string}
* @default 'frame'
*/
this.updateBeforeType = NodeUpdateType.FRAME;
/**
* A node holding the strength of the effect.
*
* @type {Node}
*/
this.strengthNode = strengthNode;
/**
* A node holding the center point of the effect.
*
* @type {Node}
*/
this.centerNode = centerNode;
/**
* A node holding the scale factor for stepped scaling.
*
* @type {Node}
*/
this.scaleNode = scaleNode;
/**
* A uniform node holding the inverse resolution value.
*
* @private
* @type {UniformNode<vec2>}
*/
this._invSize = uniform( new Vector2() );
}
/**
* This method is used to update the effect's uniforms once per frame.
*
* @param {NodeFrame} frame - The current node frame.
*/
updateBefore( /* frame */ ) {
const map = this.textureNode.value;
this._invSize.value.set( 1 / map.image.width, 1 / map.image.height );
}
/**
* This method is used to setup the effect's TSL code.
*
* @param {NodeBuilder} builder - The current node builder.
* @return {ShaderCallNodeInternal}
*/
setup( /* builder */ ) {
const textureNode = this.textureNode;
const uvNode = textureNode.uvNode || uv();
const ApplyChromaticAberration = Fn( ( [ uv, strength, center, scale ] ) => {
// Calculate distance from center
const offset = uv.sub( center );
const distance = offset.length();
// Create stepped scaling zones based on distance
// Each channel gets different scaling steps
const redScale = float( 1.0 ).add( scale.mul( 0.02 ).mul( strength ) ); // Red channel scaled outward
const greenScale = float( 1.0 ); // Green stays at original scale
const blueScale = float( 1.0 ).sub( scale.mul( 0.02 ).mul( strength ) ); // Blue channel scaled inward
// Create radial distortion based on distance from center
const aberrationStrength = strength.mul( distance );
// Calculate scaled UV coordinates for each channel
const redUV = center.add( offset.mul( redScale ) );
const greenUV = center.add( offset.mul( greenScale ) );
const blueUV = center.add( offset.mul( blueScale ) );
// Apply additional chromatic offset based on aberration strength
const rOffset = offset.mul( aberrationStrength ).mul( float( 0.01 ) );
const gOffset = offset.mul( aberrationStrength ).mul( float( 0.0 ) );
const bOffset = offset.mul( aberrationStrength ).mul( float( - 0.01 ) );
// Final UV coordinates combining scale and chromatic aberration
const finalRedUV = redUV.add( rOffset );
const finalGreenUV = greenUV.add( gOffset );
const finalBlueUV = blueUV.add( bOffset );
// Sample texture for each channel
const r = textureNode.sample( finalRedUV ).r;
const g = textureNode.sample( finalGreenUV ).g;
const b = textureNode.sample( finalBlueUV ).b;
// Get original alpha
const a = textureNode.sample( uv ).a;
return vec4( r, g, b, a );
} ).setLayout( {
name: 'ChromaticAberrationShader',
type: 'vec4',
inputs: [
{ name: 'uv', type: 'vec2' },
{ name: 'strength', type: 'float' },
{ name: 'center', type: 'vec2' },
{ name: 'scale', type: 'float' },
{ name: 'invSize', type: 'vec2' }
]
} );
const chromaticAberrationFn = Fn( () => {
return ApplyChromaticAberration(
uvNode,
this.strengthNode,
this.centerNode,
this.scaleNode,
this._invSize
);
} );
const outputNode = chromaticAberrationFn();
return outputNode;
}
}
export default ChromaticAberrationNode;
/**
* TSL function for creating a chromatic aberration node for post processing.
*
* @tsl
* @function
* @param {Node<vec4>} node - The node that represents the input of the effect.
* @param {Node|number} [strength=1.0] - The strength of the chromatic aberration effect as a node or value.
* @param {?(Node|Vector2)} [center=null] - The center point of the effect as a node or value. If null, uses screen center (0.5, 0.5).
* @param {Node|number} [scale=1.1] - The scale factor for stepped scaling from center as a node or value.
* @returns {ChromaticAberrationNode}
*/
export const chromaticAberration = ( node, strength = 1.0, center = null, scale = 1.1 ) => {
return nodeObject(
new ChromaticAberrationNode(
convertToTexture( node ),
nodeObject( strength ),
nodeObject( center ),
nodeObject( scale )
)
);
};

View File

@@ -0,0 +1,332 @@
import { DataTexture, RepeatWrapping, Vector2, Vector3, TempNode } from 'three/webgpu';
import { texture, getNormalFromDepth, getViewPosition, convertToTexture, nodeObject, Fn, float, NodeUpdateType, uv, uniform, Loop, luminance, vec2, vec3, vec4, uniformArray, int, dot, max, pow, abs, If, textureSize, sin, cos, mat2, PI, property } from 'three/tsl';
import { SimplexNoise } from '../../math/SimplexNoise.js';
/**
* Post processing node for denoising data like raw screen-space ambient occlusion output.
* Denoise can noticeably improve the quality of ambient occlusion but also add quite some
* overhead to the post processing setup. It's best to make its usage optional (e.g. via
* graphic settings).
*
* Reference: {@link https://openaccess.thecvf.com/content/WACV2021/papers/Khademi_Self-Supervised_Poisson-Gaussian_Denoising_WACV_2021_paper.pdf}.
*
* @augments TempNode
* @three_import import { denoise } from 'three/addons/tsl/display/DenoiseNode.js';
*/
class DenoiseNode extends TempNode {
static get type() {
return 'DenoiseNode';
}
/**
* Constructs a new denoise node.
*
* @param {TextureNode} textureNode - The texture node that represents the input of the effect (e.g. AO).
* @param {Node<float>} depthNode - A node that represents the scene's depth.
* @param {?Node<vec3>} normalNode - A node that represents the scene's normals.
* @param {Camera} camera - The camera the scene is rendered with.
*/
constructor( textureNode, depthNode, normalNode, camera ) {
super( 'vec4' );
/**
* The texture node that represents the input of the effect (e.g. AO).
*
* @type {TextureNode}
*/
this.textureNode = textureNode;
/**
* A node that represents the scene's depth.
*
* @type {Node<float>}
*/
this.depthNode = depthNode;
/**
* A node that represents the scene's normals. If no normals are passed to the
* constructor (because MRT is not available), normals can be automatically
* reconstructed from depth values in the shader.
*
* @type {?Node<vec3>}
*/
this.normalNode = normalNode;
/**
* The node represents the internal noise texture.
*
* @type {TextureNode}
*/
this.noiseNode = texture( generateDefaultNoise() );
/**
* The luma Phi value.
*
* @type {UniformNode<float>}
*/
this.lumaPhi = uniform( 5 );
/**
* The depth Phi value.
*
* @type {UniformNode<float>}
*/
this.depthPhi = uniform( 5 );
/**
* The normal Phi value.
*
* @type {UniformNode<float>}
*/
this.normalPhi = uniform( 5 );
/**
* The radius.
*
* @type {UniformNode<float>}
*/
this.radius = uniform( 5 );
/**
* The index.
*
* @type {UniformNode<float>}
*/
this.index = uniform( 0 );
/**
* The `updateBeforeType` is set to `NodeUpdateType.FRAME` since the node updates
* its internal uniforms once per frame in `updateBefore()`.
*
* @type {string}
* @default 'frame'
*/
this.updateBeforeType = NodeUpdateType.FRAME;
/**
* The resolution of the effect.
*
* @private
* @type {UniformNode<vec2>}
*/
this._resolution = uniform( new Vector2() );
/**
* An array of sample vectors.
*
* @private
* @type {UniformArrayNode<vec3>}
*/
this._sampleVectors = uniformArray( generateDenoiseSamples( 16, 2, 1 ) );
/**
* Represents the inverse projection matrix of the scene's camera.
*
* @private
* @type {UniformNode<mat4>}
*/
this._cameraProjectionMatrixInverse = uniform( camera.projectionMatrixInverse );
}
/**
* This method is used to update internal uniforms once per frame.
*
* @param {NodeFrame} frame - The current node frame.
*/
updateBefore() {
const map = this.textureNode.value;
this._resolution.value.set( map.image.width, map.image.height );
}
/**
* This method is used to setup the effect's TSL code.
*
* @param {NodeBuilder} builder - The current node builder.
* @return {ShaderCallNodeInternal}
*/
setup( /* builder */ ) {
const uvNode = uv();
const sampleTexture = ( uv ) => this.textureNode.sample( uv );
const sampleDepth = ( uv ) => this.depthNode.sample( uv ).x;
const sampleNormal = ( uv ) => ( this.normalNode !== null ) ? this.normalNode.sample( uv ).rgb.normalize() : getNormalFromDepth( uv, this.depthNode.value, this._cameraProjectionMatrixInverse );
const sampleNoise = ( uv ) => this.noiseNode.sample( uv );
const denoiseSample = Fn( ( [ center, viewNormal, viewPosition, sampleUv ] ) => {
const texel = sampleTexture( sampleUv ).toVar();
const depth = sampleDepth( sampleUv ).toVar();
const normal = sampleNormal( sampleUv ).toVar();
const neighborColor = texel.rgb;
const viewPos = getViewPosition( sampleUv, depth, this._cameraProjectionMatrixInverse ).toVar();
const normalDiff = dot( viewNormal, normal ).toVar();
const normalSimilarity = pow( max( normalDiff, 0 ), this.normalPhi ).toVar();
const lumaDiff = abs( luminance( neighborColor ).sub( luminance( center ) ) ).toVar();
const lumaSimilarity = max( float( 1.0 ).sub( lumaDiff.div( this.lumaPhi ) ), 0 ).toVar();
const depthDiff = abs( dot( viewPosition.sub( viewPos ), viewNormal ) ).toVar();
const depthSimilarity = max( float( 1.0 ).sub( depthDiff.div( this.depthPhi ) ), 0 );
const w = lumaSimilarity.mul( depthSimilarity ).mul( normalSimilarity );
return vec4( neighborColor.mul( w ), w );
} );
const denoise = Fn( ( [ uvNode ] ) => {
const depth = sampleDepth( uvNode ).toVar();
const viewNormal = sampleNormal( uvNode ).toVar();
const texel = sampleTexture( uvNode ).toVar();
const result = property( 'vec4' );
If( depth.greaterThanEqual( 1.0 ).or( dot( viewNormal, viewNormal ).equal( 0.0 ) ), () => {
result.assign( texel );
} ).Else( () => {
const center = vec3( texel.rgb );
const viewPosition = getViewPosition( uvNode, depth, this._cameraProjectionMatrixInverse ).toConst();
const noiseResolution = textureSize( this.noiseNode, 0 );
let noiseUv = vec2( uvNode.x, uvNode.y.oneMinus() );
noiseUv = noiseUv.mul( this._resolution.div( noiseResolution ) );
const noiseTexel = sampleNoise( noiseUv ).toVar();
const x = sin( noiseTexel.element( this.index.mod( 4 ).mul( 2 ).mul( PI ) ) );
const y = cos( noiseTexel.element( this.index.mod( 4 ).mul( 2 ).mul( PI ) ) );
const noiseVec = vec2( x, y );
const rotationMatrix = mat2( noiseVec.x, noiseVec.y.negate(), noiseVec.x, noiseVec.y );
const totalWeight = float( 1.0 ).toVar();
const denoised = vec3( texel.rgb ).toVar();
Loop( { start: int( 0 ), end: int( 16 ), type: 'int', condition: '<' }, ( { i } ) => {
const sampleDir = this._sampleVectors.element( i );
const offset = rotationMatrix.mul( sampleDir.xy.mul( float( 1.0 ).add( sampleDir.z.mul( this.radius.sub( 1 ) ) ) ) ).div( this._resolution );
const sampleUv = uvNode.add( offset );
const sampleResult = denoiseSample( center, viewNormal, viewPosition, sampleUv );
denoised.addAssign( sampleResult.xyz );
totalWeight.addAssign( sampleResult.w );
} );
If( totalWeight.greaterThan( float( 0 ) ), () => {
denoised.divAssign( totalWeight );
} );
result.assign( vec4( denoised, texel.a ) );
} );
return result;
}/*, { uv: 'vec2', return: 'vec4' }*/ );
const output = Fn( () => {
return denoise( uvNode );
} );
const outputNode = output();
return outputNode;
}
}
export default DenoiseNode;
/**
* Generates denoise samples based on the given parameters.
*
* @param {number} numSamples - The number of samples.
* @param {number} numRings - The number of rings.
* @param {number} radiusExponent - The radius exponent.
* @return {Array<Vector3>} The denoise samples.
*/
function generateDenoiseSamples( numSamples, numRings, radiusExponent ) {
const samples = [];
for ( let i = 0; i < numSamples; i ++ ) {
const angle = 2 * Math.PI * numRings * i / numSamples;
const radius = Math.pow( i / ( numSamples - 1 ), radiusExponent );
samples.push( new Vector3( Math.cos( angle ), Math.sin( angle ), radius ) );
}
return samples;
}
/**
* Generates a default noise texture for the given size.
*
* @param {number} [size=64] - The texture size.
* @return {DataTexture} The generated noise texture.
*/
function generateDefaultNoise( size = 64 ) {
const simplex = new SimplexNoise();
const arraySize = size * size * 4;
const data = new Uint8Array( arraySize );
for ( let i = 0; i < size; i ++ ) {
for ( let j = 0; j < size; j ++ ) {
const x = i;
const y = j;
data[ ( i * size + j ) * 4 ] = ( simplex.noise( x, y ) * 0.5 + 0.5 ) * 255;
data[ ( i * size + j ) * 4 + 1 ] = ( simplex.noise( x + size, y ) * 0.5 + 0.5 ) * 255;
data[ ( i * size + j ) * 4 + 2 ] = ( simplex.noise( x, y + size ) * 0.5 + 0.5 ) * 255;
data[ ( i * size + j ) * 4 + 3 ] = ( simplex.noise( x + size, y + size ) * 0.5 + 0.5 ) * 255;
}
}
const noiseTexture = new DataTexture( data, size, size );
noiseTexture.wrapS = RepeatWrapping;
noiseTexture.wrapT = RepeatWrapping;
noiseTexture.needsUpdate = true;
return noiseTexture;
}
/**
* TSL function for creating a denoise effect.
*
* @tsl
* @function
* @param {Node} node - The node that represents the input of the effect (e.g. AO).
* @param {Node<float>} depthNode - A node that represents the scene's depth.
* @param {?Node<vec3>} normalNode - A node that represents the scene's normals.
* @param {Camera} camera - The camera the scene is rendered with.
* @returns {DenoiseNode}
*/
export const denoise = ( node, depthNode, normalNode, camera ) => nodeObject( new DenoiseNode( convertToTexture( node ), nodeObject( depthNode ), nodeObject( normalNode ), camera ) );

View File

@@ -0,0 +1,547 @@
import { TempNode, NodeMaterial, NodeUpdateType, RenderTarget, Vector2, HalfFloatType, RedFormat, QuadMesh, RendererUtils } from 'three/webgpu';
import { convertToTexture, nodeObject, Fn, uniform, smoothstep, step, texture, max, uniformArray, outputStruct, property, vec4, vec3, uv, Loop, min, mix } from 'three/tsl';
import { gaussianBlur } from './GaussianBlurNode.js';
const _quadMesh = /*@__PURE__*/ new QuadMesh();
let _rendererState;
/**
* Post processing node for creating depth of field (DOF) effect.
*
* References:
* - {@link https://pixelmischiefblog.wordpress.com/2016/11/25/bokeh-depth-of-field/}
* - {@link https://www.adriancourreges.com/blog/2016/09/09/doom-2016-graphics-study/}
*
* @augments TempNode
* @three_import import { dof } from 'three/addons/tsl/display/DepthOfFieldNode.js';
*/
class DepthOfFieldNode extends TempNode {
static get type() {
return 'DepthOfFieldNode';
}
/**
* Constructs a new DOF node.
*
* @param {TextureNode} textureNode - The texture node that represents the input of the effect.
* @param {Node<float>} viewZNode - Represents the viewZ depth values of the scene.
* @param {Node<float>} focusDistanceNode - Defines the effect's focus which is the distance along the camera's look direction in world units.
* @param {Node<float>} focalLengthNode - How far an object can be from the focal plane before it goes completely out-of-focus in world units.
* @param {Node<float>} bokehScaleNode - A unitless value for artistic purposes to adjust the size of the bokeh.
*/
constructor( textureNode, viewZNode, focusDistanceNode, focalLengthNode, bokehScaleNode ) {
super( 'vec4' );
/**
* The texture node that represents the input of the effect.
*
* @type {TextureNode}
*/
this.textureNode = textureNode;
/**
* Represents the viewZ depth values of the scene.
*
* @type {Node<float>}
*/
this.viewZNode = viewZNode;
/**
* Defines the effect's focus which is the distance along the camera's look direction in world units.
*
* @type {Node<float>}
*/
this.focusDistanceNode = focusDistanceNode;
/**
* How far an object can be from the focal plane before it goes completely out-of-focus in world units.
*
* @type {Node<float>}
*/
this.focalLengthNode = focalLengthNode;
/**
* A unitless value for artistic purposes to adjust the size of the bokeh.
*
* @type {Node<float>}
*/
this.bokehScaleNode = bokehScaleNode;
/**
* The inverse size of the resolution.
*
* @private
* @type {UniformNode<vec2>}
*/
this._invSize = uniform( new Vector2() );
/**
* The render target used for the near and far field.
*
* @private
* @type {RenderTarget}
*/
this._CoCRT = new RenderTarget( 1, 1, { depthBuffer: false, type: HalfFloatType, format: RedFormat, count: 2 } );
this._CoCRT.textures[ 0 ].name = 'DepthOfField.NearField';
this._CoCRT.textures[ 1 ].name = 'DepthOfField.FarField';
/**
* The render target used for blurring the near field.
*
* @private
* @type {RenderTarget}
*/
this._CoCBlurredRT = new RenderTarget( 1, 1, { depthBuffer: false, type: HalfFloatType, format: RedFormat } );
this._CoCBlurredRT.texture.name = 'DepthOfField.NearFieldBlurred';
/**
* The render target used for the first blur pass.
*
* @private
* @type {RenderTarget}
*/
this._blur64RT = new RenderTarget( 1, 1, { depthBuffer: false, type: HalfFloatType } );
this._blur64RT.texture.name = 'DepthOfField.Blur64';
/**
* The render target used for the near field's second blur pass.
*
* @private
* @type {RenderTarget}
*/
this._blur16NearRT = new RenderTarget( 1, 1, { depthBuffer: false, type: HalfFloatType } );
this._blur16NearRT.texture.name = 'DepthOfField.Blur16Near';
/**
* The render target used for the far field's second blur pass.
*
* @private
* @type {RenderTarget}
*/
this._blur16FarRT = new RenderTarget( 1, 1, { depthBuffer: false, type: HalfFloatType } );
this._blur16FarRT.texture.name = 'DepthOfField.Blur16Far';
/**
* The render target used for the composite
*
* @private
* @type {RenderTarget}
*/
this._compositeRT = new RenderTarget( 1, 1, { depthBuffer: false, type: HalfFloatType } );
this._compositeRT.texture.name = 'DepthOfField.Composite';
/**
* The material used for the CoC/near and far fields.
*
* @private
* @type {NodeMaterial}
*/
this._CoCMaterial = new NodeMaterial();
/**
* The material used for blurring the near field.
*
* @private
* @type {NodeMaterial}
*/
this._CoCBlurredMaterial = new NodeMaterial();
/**
* The material used for the 64 tap blur.
*
* @private
* @type {NodeMaterial}
*/
this._blur64Material = new NodeMaterial();
/**
* The material used for the 16 tap blur.
*
* @private
* @type {NodeMaterial}
*/
this._blur16Material = new NodeMaterial();
/**
* The material used for the final composite.
*
* @private
* @type {NodeMaterial}
*/
this._compositeMaterial = new NodeMaterial();
/**
* The result of the effect is represented as a separate texture node.
*
* @private
* @type {TextureNode}
*/
this._textureNode = texture( this._compositeRT.texture );
/**
* The result of the CoC pass as a texture node.
*
* @private
* @type {TextureNode}
*/
this._CoCTextureNode = texture( this._CoCRT.texture );
/**
* The result of the blur64 pass as a texture node.
*
* @private
* @type {TextureNode}
*/
this._blur64TextureNode = texture( this._blur64RT.texture );
/**
* The result of the near field's blur16 pass as a texture node.
*
* @private
* @type {TextureNode}
*/
this._blur16NearTextureNode = texture( this._blur16NearRT.texture );
/**
* The result of the far field's blur16 pass as a texture node.
*
* @private
* @type {TextureNode}
*/
this._blur16FarTextureNode = texture( this._blur16FarRT.texture );
/**
* The `updateBeforeType` is set to `NodeUpdateType.FRAME` since the node updates
* its internal uniforms once per frame in `updateBefore()`.
*
* @type {string}
* @default 'frame'
*/
this.updateBeforeType = NodeUpdateType.FRAME;
}
/**
* Sets the size of the effect.
*
* @param {number} width - The width of the effect.
* @param {number} height - The height of the effect.
*/
setSize( width, height ) {
this._invSize.value.set( 1 / width, 1 / height );
this._CoCRT.setSize( width, height );
this._compositeRT.setSize( width, height );
// blur runs in half resolution
const halfResX = Math.round( width / 2 );
const halfResY = Math.round( height / 2 );
this._CoCBlurredRT.setSize( halfResX, halfResY );
this._blur64RT.setSize( halfResX, halfResY );
this._blur16NearRT.setSize( halfResX, halfResY );
this._blur16FarRT.setSize( halfResX, halfResY );
}
/**
* Returns the result of the effect as a texture node.
*
* @return {PassTextureNode} A texture node that represents the result of the effect.
*/
getTextureNode() {
return this._textureNode;
}
/**
* This method is used to update the effect's uniforms once per frame.
*
* @param {NodeFrame} frame - The current node frame.
*/
updateBefore( frame ) {
const { renderer } = frame;
// resize
const map = this.textureNode.value;
this.setSize( map.image.width, map.image.height );
// save state
_rendererState = RendererUtils.resetRendererState( renderer, _rendererState );
renderer.setClearColor( 0x000000, 0 );
// coc
_quadMesh.material = this._CoCMaterial;
renderer.setRenderTarget( this._CoCRT );
_quadMesh.render( renderer );
// blur near field to avoid visible aliased edges when the near field
// is blended with the background
this._CoCTextureNode.value = this._CoCRT.textures[ 0 ];
_quadMesh.material = this._CoCBlurredMaterial;
renderer.setRenderTarget( this._CoCBlurredRT );
_quadMesh.render( renderer );
// blur64 near
this._CoCTextureNode.value = this._CoCBlurredRT.texture;
_quadMesh.material = this._blur64Material;
renderer.setRenderTarget( this._blur64RT );
_quadMesh.render( renderer );
// blur16 near
_quadMesh.material = this._blur16Material;
renderer.setRenderTarget( this._blur16NearRT );
_quadMesh.render( renderer );
// blur64 far
this._CoCTextureNode.value = this._CoCRT.textures[ 1 ];
_quadMesh.material = this._blur64Material;
renderer.setRenderTarget( this._blur64RT );
_quadMesh.render( renderer );
// blur16 far
_quadMesh.material = this._blur16Material;
renderer.setRenderTarget( this._blur16FarRT );
_quadMesh.render( renderer );
// composite
_quadMesh.material = this._compositeMaterial;
renderer.setRenderTarget( this._compositeRT );
_quadMesh.render( renderer );
// restore
RendererUtils.restoreRendererState( renderer, _rendererState );
}
/**
* This method is used to setup the effect's TSL code.
*
* @param {NodeBuilder} builder - The current node builder.
* @return {ShaderCallNodeInternal}
*/
setup( builder ) {
const kernels = this._generateKernels();
// CoC, near and far fields
const nearField = property( 'float' );
const farField = property( 'float' );
const outputNode = outputStruct( nearField, farField );
const CoC = Fn( () => {
const signedDist = this.viewZNode.negate().sub( this.focusDistanceNode );
const CoC = smoothstep( 0, this.focalLengthNode, signedDist.abs() );
nearField.assign( step( signedDist, 0 ).mul( CoC ) );
farField.assign( step( 0, signedDist ).mul( CoC ) );
return vec4( 0 );
} );
this._CoCMaterial.colorNode = CoC().context( builder.getSharedContext() );
this._CoCMaterial.outputNode = outputNode;
this._CoCMaterial.needsUpdate = true;
// blurred CoC for near field
this._CoCBlurredMaterial.colorNode = gaussianBlur( this._CoCTextureNode, 1, 2 );
this._CoCBlurredMaterial.needsUpdate = true;
// bokeh 64 blur pass
const bokeh64 = uniformArray( kernels.points64 );
const blur64 = Fn( () => {
const acc = vec3();
const uvNode = uv();
const CoC = this._CoCTextureNode.sample( uvNode ).r;
const sampleStep = this._invSize.mul( this.bokehScaleNode ).mul( CoC );
Loop( 64, ( { i } ) => {
const sUV = uvNode.add( sampleStep.mul( bokeh64.element( i ) ) );
const tap = this.textureNode.sample( sUV );
acc.addAssign( tap.rgb );
} );
acc.divAssign( 64 );
return vec4( acc, CoC );
} );
this._blur64Material.fragmentNode = blur64().context( builder.getSharedContext() );
this._blur64Material.needsUpdate = true;
// bokeh 16 blur pass
const bokeh16 = uniformArray( kernels.points16 );
const blur16 = Fn( () => {
const uvNode = uv();
const col = this._blur64TextureNode.sample( uvNode ).toVar();
const maxVal = col.rgb;
const CoC = col.a;
const sampleStep = this._invSize.mul( this.bokehScaleNode ).mul( CoC );
Loop( 16, ( { i } ) => {
const sUV = uvNode.add( sampleStep.mul( bokeh16.element( i ) ) );
const tap = this._blur64TextureNode.sample( sUV );
maxVal.assign( max( tap.rgb, maxVal ) );
} );
return vec4( maxVal, CoC );
} );
this._blur16Material.fragmentNode = blur16().context( builder.getSharedContext() );
this._blur16Material.needsUpdate = true;
// composite
const composite = Fn( () => {
const uvNode = uv();
const near = this._blur16NearTextureNode.sample( uvNode );
const far = this._blur16FarTextureNode.sample( uvNode );
const beauty = this.textureNode.sample( uvNode );
// TODO: applying the bokeh scale to the near field CoC value introduces blending
// issues around edges of blurred foreground objects when their are rendered above
// the background. for now, don't apply the bokeh scale to the blend factors. that
// will cause less blur for objects which are partly out-of-focus (CoC between 0 and 1).
const blendNear = min( near.a, 0.5 ).mul( 2 );
const blendFar = min( far.a, 0.5 ).mul( 2 );
const result = vec4( 0, 0, 0, 1 ).toVar();
result.rgb = mix( beauty.rgb, far.rgb, blendFar );
result.rgb = mix( result.rgb, near.rgb, blendNear );
return result;
} );
this._compositeMaterial.fragmentNode = composite().context( builder.getSharedContext() );
this._compositeMaterial.needsUpdate = true;
return this._textureNode;
}
_generateKernels() {
// Vogel's method, see https://www.shadertoy.com/view/4fBXRG
// this approach allows to generate uniformly distributed sample
// points in a disc-shaped pattern. Blurring with these samples
// produces a typical optical lens blur
const GOLDEN_ANGLE = 2.39996323;
const SAMPLES = 80;
const points64 = [];
const points16 = [];
let idx64 = 0;
let idx16 = 0;
for ( let i = 0; i < SAMPLES; i ++ ) {
const theta = i * GOLDEN_ANGLE;
const r = Math.sqrt( i ) / Math.sqrt( SAMPLES );
const p = new Vector2( r * Math.cos( theta ), r * Math.sin( theta ) );
if ( i % 5 === 0 ) {
points16[ idx16 ] = p;
idx16 ++;
} else {
points64[ idx64 ] = p;
idx64 ++;
}
}
return { points16, points64 };
}
/**
* Frees internal resources. This method should be called
* when the effect is no longer required.
*/
dispose() {
this._CoCRT.dispose();
this._CoCBlurredRT.dispose();
this._blur64RT.dispose();
this._blur16NearRT.dispose();
this._blur16FarRT.dispose();
this._compositeRT.dispose();
this._CoCMaterial.dispose();
this._CoCBlurredMaterial.dispose();
this._blur64Material.dispose();
this._blur16Material.dispose();
this._compositeMaterial.dispose();
}
}
export default DepthOfFieldNode;
/**
* TSL function for creating a depth-of-field effect (DOF) for post processing.
*
* @tsl
* @function
* @param {Node<vec4>} node - The node that represents the input of the effect.
* @param {Node<float>} viewZNode - Represents the viewZ depth values of the scene.
* @param {Node<float> | number} focusDistance - Defines the effect's focus which is the distance along the camera's look direction in world units.
* @param {Node<float> | number} focalLength - How far an object can be from the focal plane before it goes completely out-of-focus in world units.
* @param {Node<float> | number} bokehScale - A unitless value for artistic purposes to adjust the size of the bokeh.
* @returns {DepthOfFieldNode}
*/
export const dof = ( node, viewZNode, focusDistance = 1, focalLength = 1, bokehScale = 1 ) => nodeObject( new DepthOfFieldNode( convertToTexture( node ), nodeObject( viewZNode ), nodeObject( focusDistance ), nodeObject( focalLength ), nodeObject( bokehScale ) ) );

View File

@@ -0,0 +1,104 @@
import { TempNode } from 'three/webgpu';
import { nodeObject, Fn, uv, uniform, vec2, vec3, sin, cos, add, vec4, screenSize } from 'three/tsl';
/**
* Post processing node for creating dot-screen effect.
*
* @augments TempNode
* @three_import import { dotScreen } from 'three/addons/tsl/display/DotScreenNode.js';
*/
class DotScreenNode extends TempNode {
static get type() {
return 'DotScreenNode';
}
/**
* Constructs a new dot screen node.
*
* @param {Node} inputNode - The node that represents the input of the effect.
* @param {number} [angle=1.57] - The rotation of the effect in radians.
* @param {number} [scale=1] - The scale of the effect. A higher value means smaller dots.
*/
constructor( inputNode, angle = 1.57, scale = 1 ) {
super( 'vec4' );
/**
* The node that represents the input of the effect.
*
* @type {Node}
*/
this.inputNode = inputNode;
/**
* A uniform node that represents the rotation of the effect in radians.
*
* @type {UniformNode<float>}
*/
this.angle = uniform( angle );
/**
* A uniform node that represents the scale of the effect. A higher value means smaller dots.
*
* @type {UniformNode<float>}
*/
this.scale = uniform( scale );
}
/**
* This method is used to setup the effect's TSL code.
*
* @param {NodeBuilder} builder - The current node builder.
* @return {ShaderCallNodeInternal}
*/
setup() {
const inputNode = this.inputNode;
const pattern = Fn( () => {
const s = sin( this.angle );
const c = cos( this.angle );
const tex = uv().mul( screenSize );
const point = vec2( c.mul( tex.x ).sub( s.mul( tex.y ) ), s.mul( tex.x ).add( c.mul( tex.y ) ) ).mul( this.scale );
return sin( point.x ).mul( sin( point.y ) ).mul( 4 );
} );
const dotScreen = Fn( () => {
const color = inputNode;
const average = add( color.r, color.g, color.b ).div( 3 );
return vec4( vec3( average.mul( 10 ).sub( 5 ).add( pattern() ) ), color.a );
} );
const outputNode = dotScreen();
return outputNode;
}
}
export default DotScreenNode;
/**
* TSL function for creating a dot-screen node for post processing.
*
* @tsl
* @function
* @param {Node<vec4>} node - The node that represents the input of the effect.
* @param {number} [angle=1.57] - The rotation of the effect in radians.
* @param {number} [scale=1] - The scale of the effect. A higher value means smaller dots.
* @returns {DotScreenNode}
*/
export const dotScreen = ( node, angle, scale ) => nodeObject( new DotScreenNode( nodeObject( node ), angle, scale ) );

365
node_modules/three/examples/jsm/tsl/display/FXAANode.js generated vendored Normal file
View File

@@ -0,0 +1,365 @@
import { Vector2, TempNode } from 'three/webgpu';
import { nodeObject, Fn, uniformArray, select, float, NodeUpdateType, uv, dot, clamp, uniform, convertToTexture, smoothstep, bool, vec2, vec3, If, Loop, max, min, Break, abs } from 'three/tsl';
/**
* Post processing node for applying FXAA. This node requires sRGB input
* so tone mapping and color space conversion must happen before the anti-aliasing.
*
* @augments TempNode
* @three_import import { fxaa } from 'three/addons/tsl/display/FXAANode.js';
*/
class FXAANode extends TempNode {
static get type() {
return 'FXAANode';
}
/**
* Constructs a new FXAA node.
*
* @param {TextureNode} textureNode - The texture node that represents the input of the effect.
*/
constructor( textureNode ) {
super( 'vec4' );
/**
* The texture node that represents the input of the effect.
*
* @type {TextureNode}
*/
this.textureNode = textureNode;
/**
* The `updateBeforeType` is set to `NodeUpdateType.FRAME` since the node updates
* its internal uniforms once per frame in `updateBefore()`.
*
* @type {string}
* @default 'frame'
*/
this.updateBeforeType = NodeUpdateType.FRAME;
/**
* A uniform node holding the inverse resolution value.
*
* @private
* @type {UniformNode<vec2>}
*/
this._invSize = uniform( new Vector2() );
}
/**
* This method is used to update the effect's uniforms once per frame.
*
* @param {NodeFrame} frame - The current node frame.
*/
updateBefore( /* frame */ ) {
const map = this.textureNode.value;
this._invSize.value.set( 1 / map.image.width, 1 / map.image.height );
}
/**
* This method is used to setup the effect's TSL code.
*
* @param {NodeBuilder} builder - The current node builder.
* @return {ShaderCallNodeInternal}
*/
setup( /* builder */ ) {
const textureNode = this.textureNode.bias( - 100 );
const uvNode = textureNode.uvNode || uv();
const EDGE_STEP_COUNT = float( 6 );
const EDGE_GUESS = float( 8.0 );
const EDGE_STEPS = uniformArray( [ 1.0, 1.5, 2.0, 2.0, 2.0, 4.0 ] );
const _ContrastThreshold = float( 0.0312 );
const _RelativeThreshold = float( 0.063 );
const _SubpixelBlending = float( 1.0 );
const Sample = Fn( ( [ uv ] ) => {
return textureNode.sample( uv );
} );
const SampleLuminance = Fn( ( [ uv ] ) => {
return dot( Sample( uv ).rgb, vec3( 0.3, 0.59, 0.11 ) );
} );
const SampleLuminanceOffset = Fn( ( [ texSize, uv, uOffset, vOffset ] ) => {
const shiftedUv = uv.add( texSize.mul( vec2( uOffset, vOffset ) ) );
return SampleLuminance( shiftedUv );
} );
const ShouldSkipPixel = ( l ) => {
const threshold = max( _ContrastThreshold, _RelativeThreshold.mul( l.highest ) );
return l.contrast.lessThan( threshold );
};
const SampleLuminanceNeighborhood = ( texSize, uv ) => {
const m = SampleLuminance( uv );
const n = SampleLuminanceOffset( texSize, uv, 0.0, - 1.0 );
const e = SampleLuminanceOffset( texSize, uv, 1.0, 0.0 );
const s = SampleLuminanceOffset( texSize, uv, 0.0, 1.0 );
const w = SampleLuminanceOffset( texSize, uv, - 1.0, 0.0 );
const ne = SampleLuminanceOffset( texSize, uv, 1.0, - 1.0 );
const nw = SampleLuminanceOffset( texSize, uv, - 1.0, - 1.0 );
const se = SampleLuminanceOffset( texSize, uv, 1.0, 1.0 );
const sw = SampleLuminanceOffset( texSize, uv, - 1.0, 1.0 );
const highest = max( s, e, n, w, m );
const lowest = min( s, e, n, w, m );
const contrast = highest.sub( lowest );
return { m, n, e, s, w, ne, nw, se, sw, highest, lowest, contrast };
};
const DeterminePixelBlendFactor = ( l ) => {
let f = float( 2.0 ).mul( l.s.add( l.e ).add( l.n ).add( l.w ) );
f = f.add( l.se.add( l.sw ).add( l.ne ).add( l.nw ) );
f = f.mul( 1.0 / 12.0 );
f = abs( f.sub( l.m ) );
f = clamp( f.div( max( l.contrast, 0 ) ), 0.0, 1.0 );
const blendFactor = smoothstep( 0.0, 1.0, f );
return blendFactor.mul( blendFactor ).mul( _SubpixelBlending );
};
const DetermineEdge = ( texSize, l ) => {
const horizontal =
abs( l.s.add( l.n ).sub( l.m.mul( 2.0 ) ) ).mul( 2.0 ).add(
abs( l.se.add( l.ne ).sub( l.e.mul( 2.0 ) ) ).add(
abs( l.sw.add( l.nw ).sub( l.w.mul( 2.0 ) ) )
)
);
const vertical =
abs( l.e.add( l.w ).sub( l.m.mul( 2.0 ) ) ).mul( 2.0 ).add(
abs( l.se.add( l.sw ).sub( l.s.mul( 2.0 ) ) ).add(
abs( l.ne.add( l.nw ).sub( l.n.mul( 2.0 ) ) )
)
);
const isHorizontal = horizontal.greaterThanEqual( vertical );
const pLuminance = select( isHorizontal, l.s, l.e );
const nLuminance = select( isHorizontal, l.n, l.w );
const pGradient = abs( pLuminance.sub( l.m ) );
const nGradient = abs( nLuminance.sub( l.m ) );
const pixelStep = select( isHorizontal, texSize.y, texSize.x ).toVar();
const oppositeLuminance = float().toVar();
const gradient = float().toVar();
If( pGradient.lessThan( nGradient ), () => {
pixelStep.assign( pixelStep.negate() );
oppositeLuminance.assign( nLuminance );
gradient.assign( nGradient );
} ).Else( () => {
oppositeLuminance.assign( pLuminance );
gradient.assign( pGradient );
} );
return { isHorizontal, pixelStep, oppositeLuminance, gradient };
};
const DetermineEdgeBlendFactor = ( texSize, l, e, uv ) => {
const uvEdge = uv.toVar();
const edgeStep = vec2().toVar();
If( e.isHorizontal, () => {
uvEdge.y.addAssign( e.pixelStep.mul( 0.5 ) );
edgeStep.assign( vec2( texSize.x, 0.0 ) );
} ).Else( () => {
uvEdge.x.addAssign( e.pixelStep.mul( 0.5 ) );
edgeStep.assign( vec2( 0.0, texSize.y ) );
} );
const edgeLuminance = l.m.add( e.oppositeLuminance ).mul( 0.5 );
const gradientThreshold = e.gradient.mul( 0.25 );
const puv = uvEdge.add( edgeStep.mul( EDGE_STEPS.element( 0 ) ) ).toVar();
const pLuminanceDelta = SampleLuminance( puv ).sub( edgeLuminance ).toVar();
const pAtEnd = abs( pLuminanceDelta ).greaterThanEqual( gradientThreshold ).toVar();
Loop( { start: 1, end: EDGE_STEP_COUNT }, ( { i } ) => {
If( pAtEnd, () => {
Break();
} );
puv.addAssign( edgeStep.mul( EDGE_STEPS.element( i ) ) );
pLuminanceDelta.assign( SampleLuminance( puv ).sub( edgeLuminance ) );
pAtEnd.assign( abs( pLuminanceDelta ).greaterThanEqual( gradientThreshold ) );
} );
If( pAtEnd.not(), () => {
puv.addAssign( edgeStep.mul( EDGE_GUESS ) );
} );
const nuv = uvEdge.sub( edgeStep.mul( EDGE_STEPS.element( 0 ) ) ).toVar();
const nLuminanceDelta = SampleLuminance( nuv ).sub( edgeLuminance ).toVar();
const nAtEnd = abs( nLuminanceDelta ).greaterThanEqual( gradientThreshold ).toVar();
Loop( { start: 1, end: EDGE_STEP_COUNT }, ( { i } ) => {
If( nAtEnd, () => {
Break();
} );
nuv.subAssign( edgeStep.mul( EDGE_STEPS.element( i ) ) );
nLuminanceDelta.assign( SampleLuminance( nuv ).sub( edgeLuminance ) );
nAtEnd.assign( abs( nLuminanceDelta ).greaterThanEqual( gradientThreshold ) );
} );
If( nAtEnd.not(), () => {
nuv.subAssign( edgeStep.mul( EDGE_GUESS ) );
} );
const pDistance = float().toVar();
const nDistance = float().toVar();
If( e.isHorizontal, () => {
pDistance.assign( puv.x.sub( uv.x ) );
nDistance.assign( uv.x.sub( nuv.x ) );
} ).Else( () => {
pDistance.assign( puv.y.sub( uv.y ) );
nDistance.assign( uv.y.sub( nuv.y ) );
} );
const shortestDistance = float().toVar();
const deltaSign = bool().toVar();
If( pDistance.lessThanEqual( nDistance ), () => {
shortestDistance.assign( pDistance );
deltaSign.assign( pLuminanceDelta.greaterThanEqual( 0.0 ) );
} ).Else( () => {
shortestDistance.assign( nDistance );
deltaSign.assign( nLuminanceDelta.greaterThanEqual( 0.0 ) );
} );
const blendFactor = float().toVar();
If( deltaSign.equal( l.m.sub( edgeLuminance ).greaterThanEqual( 0.0 ) ), () => {
blendFactor.assign( 0.0 );
} ).Else( () => {
blendFactor.assign( float( 0.5 ).sub( shortestDistance.div( pDistance.add( nDistance ) ) ) );
} );
return blendFactor;
};
const ApplyFXAA = Fn( ( [ uv, texSize ] ) => {
const luminance = SampleLuminanceNeighborhood( texSize, uv );
If( ShouldSkipPixel( luminance ), () => {
return Sample( uv );
} );
const pixelBlend = DeterminePixelBlendFactor( luminance );
const edge = DetermineEdge( texSize, luminance );
const edgeBlend = DetermineEdgeBlendFactor( texSize, luminance, edge, uv );
const finalBlend = max( pixelBlend, edgeBlend );
const finalUv = uv.toVar();
If( edge.isHorizontal, () => {
finalUv.y.addAssign( edge.pixelStep.mul( finalBlend ) );
} ).Else( () => {
finalUv.x.addAssign( edge.pixelStep.mul( finalBlend ) );
} );
return Sample( finalUv );
} ).setLayout( {
name: 'FxaaPixelShader',
type: 'vec4',
inputs: [
{ name: 'uv', type: 'vec2' },
{ name: 'texSize', type: 'vec2' },
]
} );
const fxaa = Fn( () => {
return ApplyFXAA( uvNode, this._invSize );
} );
const outputNode = fxaa();
return outputNode;
}
}
export default FXAANode;
/**
* TSL function for creating a FXAA node for anti-aliasing via post processing.
*
* @tsl
* @function
* @param {Node<vec4>} node - The node that represents the input of the effect.
* @returns {FXAANode}
*/
export const fxaa = ( node ) => nodeObject( new FXAANode( convertToTexture( node ) ) );

101
node_modules/three/examples/jsm/tsl/display/FilmNode.js generated vendored Normal file
View File

@@ -0,0 +1,101 @@
import { TempNode } from 'three/webgpu';
import { rand, Fn, fract, time, uv, clamp, mix, vec4, nodeProxy } from 'three/tsl';
/**
* Post processing node for creating a film grain effect.
*
* @augments TempNode
* @three_import import { film } from 'three/addons/tsl/display/FilmNode.js';
*/
class FilmNode extends TempNode {
static get type() {
return 'FilmNode';
}
/**
* Constructs a new film node.
*
* @param {Node} inputNode - The node that represents the input of the effect.
* @param {?Node<float>} [intensityNode=null] - A node that represents the effect's intensity.
* @param {?Node<vec2>} [uvNode=null] - A node that allows to pass custom (e.g. animated) uv data.
*/
constructor( inputNode, intensityNode = null, uvNode = null ) {
super( 'vec4' );
/**
* The node that represents the input of the effect.
*
* @type {Node}
*/
this.inputNode = inputNode;
/**
* A node that represents the effect's intensity.
*
* @type {?Node<float>}
* @default null
*/
this.intensityNode = intensityNode;
/**
* A node that allows to pass custom (e.g. animated) uv data.
*
* @type {?Node<vec2>}
* @default null
*/
this.uvNode = uvNode;
}
/**
* This method is used to setup the effect's TSL code.
*
* @param {NodeBuilder} builder - The current node builder.
* @return {ShaderCallNodeInternal}
*/
setup( /* builder */ ) {
const uvNode = this.uvNode || uv();
const film = Fn( () => {
const base = this.inputNode.rgb;
const noise = rand( fract( uvNode.add( time ) ) );
let color = base.add( base.mul( clamp( noise.add( 0.1 ), 0, 1 ) ) );
if ( this.intensityNode !== null ) {
color = mix( base, color, this.intensityNode );
}
return vec4( color, this.inputNode.a );
} );
const outputNode = film();
return outputNode;
}
}
export default FilmNode;
/**
* TSL function for creating a film node for post processing.
*
* @tsl
* @function
* @param {Node<vec4>} inputNode - The node that represents the input of the effect.
* @param {?Node<float>} [intensityNode=null] - A node that represents the effect's intensity.
* @param {?Node<vec2>} [uvNode=null] - A node that allows to pass custom (e.g. animated) uv data.
* @returns {FilmNode}
*/
export const film = /*@__PURE__*/ nodeProxy( FilmNode );

530
node_modules/three/examples/jsm/tsl/display/GTAONode.js generated vendored Normal file
View File

@@ -0,0 +1,530 @@
import { DataTexture, RenderTarget, RepeatWrapping, Vector2, Vector3, TempNode, QuadMesh, NodeMaterial, RendererUtils } from 'three/webgpu';
import { reference, logarithmicDepthToViewZ, viewZToPerspectiveDepth, getNormalFromDepth, getScreenPosition, getViewPosition, nodeObject, Fn, float, NodeUpdateType, uv, uniform, Loop, vec2, vec3, vec4, int, dot, max, pow, abs, If, textureSize, sin, cos, PI, texture, passTexture, mat3, add, normalize, mul, cross, div, mix, sqrt, sub, acos, clamp } from 'three/tsl';
const _quadMesh = /*@__PURE__*/ new QuadMesh();
const _size = /*@__PURE__*/ new Vector2();
let _rendererState;
/**
* Post processing node for applying Ground Truth Ambient Occlusion (GTAO) to a scene.
* ```js
* const postProcessing = new THREE.PostProcessing( renderer );
*
* const scenePass = pass( scene, camera );
* scenePass.setMRT( mrt( {
* output: output,
* normal: normalView
* } ) );
*
* const scenePassColor = scenePass.getTextureNode( 'output' );
* const scenePassNormal = scenePass.getTextureNode( 'normal' );
* const scenePassDepth = scenePass.getTextureNode( 'depth' );
*
* const aoPass = ao( scenePassDepth, scenePassNormal, camera );
*
* postProcessing.outputNod = aoPass.getTextureNode().mul( scenePassColor );
* ```
*
* Reference: {@link https://www.activision.com/cdn/research/Practical_Real_Time_Strategies_for_Accurate_Indirect_Occlusion_NEW%20VERSION_COLOR.pdf}.
*
* @augments TempNode
* @three_import import { ao } from 'three/addons/tsl/display/GTAONode.js';
*/
class GTAONode extends TempNode {
static get type() {
return 'GTAONode';
}
/**
* Constructs a new GTAO node.
*
* @param {Node<float>} depthNode - A node that represents the scene's depth.
* @param {?Node<vec3>} normalNode - A node that represents the scene's normals.
* @param {Camera} camera - The camera the scene is rendered with.
*/
constructor( depthNode, normalNode, camera ) {
super( 'vec4' );
/**
* A node that represents the scene's depth.
*
* @type {Node<float>}
*/
this.depthNode = depthNode;
/**
* A node that represents the scene's normals. If no normals are passed to the
* constructor (because MRT is not available), normals can be automatically
* reconstructed from depth values in the shader.
*
* @type {?Node<vec3>}
*/
this.normalNode = normalNode;
/**
* The resolution scale. By default the effect is rendered in full resolution
* for best quality but a value of `0.5` should be sufficient for most scenes.
*
* @type {number}
* @default 1
*/
this.resolutionScale = 1;
/**
* The `updateBeforeType` is set to `NodeUpdateType.FRAME` since the node renders
* its effect once per frame in `updateBefore()`.
*
* @type {string}
* @default 'frame'
*/
this.updateBeforeType = NodeUpdateType.FRAME;
/**
* The render target the ambient occlusion is rendered into.
*
* @private
* @type {RenderTarget}
*/
this._aoRenderTarget = new RenderTarget( 1, 1, { depthBuffer: false } );
this._aoRenderTarget.texture.name = 'GTAONode.AO';
// uniforms
/**
* The radius of the ambient occlusion.
*
* @type {UniformNode<float>}
*/
this.radius = uniform( 0.25 );
/**
* The resolution of the effect. Can be scaled via
* `resolutionScale`.
*
* @type {UniformNode<vec2>}
*/
this.resolution = uniform( new Vector2() );
/**
* The thickness of the ambient occlusion.
*
* @type {UniformNode<float>}
*/
this.thickness = uniform( 1 );
/**
* Another option to tweak the occlusion. The recommended range is
* `[1,2]` for attenuating the AO.
*
* @type {UniformNode<float>}
*/
this.distanceExponent = uniform( 1 );
/**
* The distance fall off value of the ambient occlusion.
* A lower value leads to a larger AO effect. The value
* should lie in the range `[0,1]`.
*
* @type {UniformNode<float>}
*/
this.distanceFallOff = uniform( 1 );
/**
* The scale of the ambient occlusion.
*
* @type {UniformNode<float>}
*/
this.scale = uniform( 1 );
/**
* How many samples are used to compute the AO.
* A higher value results in better quality but also
* in a more expensive runtime behavior.
*
* @type {UniformNode<float>}
*/
this.samples = uniform( 16 );
/**
* The node represents the internal noise texture used by the AO.
*
* @private
* @type {TextureNode}
*/
this._noiseNode = texture( generateMagicSquareNoise() );
/**
* Represents the projection matrix of the scene's camera.
*
* @private
* @type {UniformNode<mat4>}
*/
this._cameraProjectionMatrix = uniform( camera.projectionMatrix );
/**
* Represents the inverse projection matrix of the scene's camera.
*
* @private
* @type {UniformNode<mat4>}
*/
this._cameraProjectionMatrixInverse = uniform( camera.projectionMatrixInverse );
/**
* Represents the near value of the scene's camera.
*
* @private
* @type {ReferenceNode<float>}
*/
this._cameraNear = reference( 'near', 'float', camera );
/**
* Represents the far value of the scene's camera.
*
* @private
* @type {ReferenceNode<float>}
*/
this._cameraFar = reference( 'far', 'float', camera );
/**
* The material that is used to render the effect.
*
* @private
* @type {NodeMaterial}
*/
this._material = new NodeMaterial();
this._material.name = 'GTAO';
/**
* The result of the effect is represented as a separate texture node.
*
* @private
* @type {PassTextureNode}
*/
this._textureNode = passTexture( this, this._aoRenderTarget.texture );
}
/**
* Returns the result of the effect as a texture node.
*
* @return {PassTextureNode} A texture node that represents the result of the effect.
*/
getTextureNode() {
return this._textureNode;
}
/**
* Sets the size of the effect.
*
* @param {number} width - The width of the effect.
* @param {number} height - The height of the effect.
*/
setSize( width, height ) {
width = Math.round( this.resolutionScale * width );
height = Math.round( this.resolutionScale * height );
this.resolution.value.set( width, height );
this._aoRenderTarget.setSize( width, height );
}
/**
* This method is used to render the effect once per frame.
*
* @param {NodeFrame} frame - The current node frame.
*/
updateBefore( frame ) {
const { renderer } = frame;
_rendererState = RendererUtils.resetRendererState( renderer, _rendererState );
//
const size = renderer.getDrawingBufferSize( _size );
this.setSize( size.width, size.height );
_quadMesh.material = this._material;
// clear
renderer.setClearColor( 0xffffff, 1 );
// ao
renderer.setRenderTarget( this._aoRenderTarget );
_quadMesh.render( renderer );
// restore
RendererUtils.restoreRendererState( renderer, _rendererState );
}
/**
* This method is used to setup the effect's TSL code.
*
* @param {NodeBuilder} builder - The current node builder.
* @return {PassTextureNode}
*/
setup( builder ) {
const uvNode = uv();
const sampleDepth = ( uv ) => {
const depth = this.depthNode.sample( uv ).r;
if ( builder.renderer.logarithmicDepthBuffer === true ) {
const viewZ = logarithmicDepthToViewZ( depth, this._cameraNear, this._cameraFar );
return viewZToPerspectiveDepth( viewZ, this._cameraNear, this._cameraFar );
}
return depth;
};
const sampleNoise = ( uv ) => this._noiseNode.sample( uv );
const sampleNormal = ( uv ) => ( this.normalNode !== null ) ? this.normalNode.sample( uv ).rgb.normalize() : getNormalFromDepth( uv, this.depthNode.value, this._cameraProjectionMatrixInverse );
const ao = Fn( () => {
const depth = sampleDepth( uvNode ).toVar();
depth.greaterThanEqual( 1.0 ).discard();
const viewPosition = getViewPosition( uvNode, depth, this._cameraProjectionMatrixInverse ).toVar();
const viewNormal = sampleNormal( uvNode ).toVar();
const radiusToUse = this.radius;
const noiseResolution = textureSize( this._noiseNode, 0 );
let noiseUv = vec2( uvNode.x, uvNode.y.oneMinus() );
noiseUv = noiseUv.mul( this.resolution.div( noiseResolution ) );
const noiseTexel = sampleNoise( noiseUv );
const randomVec = noiseTexel.xyz.mul( 2.0 ).sub( 1.0 );
const tangent = vec3( randomVec.xy, 0.0 ).normalize();
const bitangent = vec3( tangent.y.mul( - 1.0 ), tangent.x, 0.0 );
const kernelMatrix = mat3( tangent, bitangent, vec3( 0.0, 0.0, 1.0 ) );
const DIRECTIONS = this.samples.lessThan( 30 ).select( 3, 5 ).toVar();
const STEPS = add( this.samples, DIRECTIONS.sub( 1 ) ).div( DIRECTIONS ).toVar();
const ao = float( 0 ).toVar();
// Each iteration analyzes one vertical "slice" of the 3D space around the fragment.
Loop( { start: int( 0 ), end: DIRECTIONS, type: 'int', condition: '<' }, ( { i } ) => {
const angle = float( i ).div( float( DIRECTIONS ) ).mul( PI ).toVar();
const sampleDir = vec4( cos( angle ), sin( angle ), 0., add( 0.5, mul( 0.5, noiseTexel.w ) ) );
sampleDir.xyz = normalize( kernelMatrix.mul( sampleDir.xyz ) );
const viewDir = normalize( viewPosition.xyz.negate() ).toVar();
const sliceBitangent = normalize( cross( sampleDir.xyz, viewDir ) ).toVar();
const sliceTangent = cross( sliceBitangent, viewDir );
const normalInSlice = normalize( viewNormal.sub( sliceBitangent.mul( dot( viewNormal, sliceBitangent ) ) ) );
const tangentToNormalInSlice = cross( normalInSlice, sliceBitangent ).toVar();
const cosHorizons = vec2( dot( viewDir, tangentToNormalInSlice ), dot( viewDir, tangentToNormalInSlice.negate() ) ).toVar();
// For each slice, the inner loop performs ray marching to find the horizons.
Loop( { end: STEPS, type: 'int', name: 'j', condition: '<' }, ( { j } ) => {
const sampleViewOffset = sampleDir.xyz.mul( radiusToUse ).mul( sampleDir.w ).mul( pow( div( float( j ).add( 1.0 ), float( STEPS ) ), this.distanceExponent ) );
// The loop marches in two opposite directions (x and y) along the slice's line to find the horizon on both sides.
// x
const sampleScreenPositionX = getScreenPosition( viewPosition.add( sampleViewOffset ), this._cameraProjectionMatrix ).toVar();
const sampleDepthX = sampleDepth( sampleScreenPositionX ).toVar();
const sampleSceneViewPositionX = getViewPosition( sampleScreenPositionX, sampleDepthX, this._cameraProjectionMatrixInverse ).toVar();
const viewDeltaX = sampleSceneViewPositionX.sub( viewPosition ).toVar();
If( abs( viewDeltaX.z ).lessThan( this.thickness ), () => {
const sampleCosHorizon = dot( viewDir, normalize( viewDeltaX ) );
cosHorizons.x.addAssign( max( 0, mul( sampleCosHorizon.sub( cosHorizons.x ), mix( 1.0, float( 2.0 ).div( float( j ).add( 2 ) ), this.distanceFallOff ) ) ) );
} );
// y
const sampleScreenPositionY = getScreenPosition( viewPosition.sub( sampleViewOffset ), this._cameraProjectionMatrix ).toVar();
const sampleDepthY = sampleDepth( sampleScreenPositionY ).toVar();
const sampleSceneViewPositionY = getViewPosition( sampleScreenPositionY, sampleDepthY, this._cameraProjectionMatrixInverse ).toVar();
const viewDeltaY = sampleSceneViewPositionY.sub( viewPosition ).toVar();
If( abs( viewDeltaY.z ).lessThan( this.thickness ), () => {
const sampleCosHorizon = dot( viewDir, normalize( viewDeltaY ) );
cosHorizons.y.addAssign( max( 0, mul( sampleCosHorizon.sub( cosHorizons.y ), mix( 1.0, float( 2.0 ).div( float( j ).add( 2 ) ), this.distanceFallOff ) ) ) );
} );
} );
// After the horizons are found for a given slice, their contribution to the total occlusion is calculated.
const sinHorizons = sqrt( sub( 1.0, cosHorizons.mul( cosHorizons ) ) ).toVar();
const nx = dot( normalInSlice, sliceTangent );
const ny = dot( normalInSlice, viewDir );
const nxb = mul( 0.5, acos( cosHorizons.y ).sub( acos( cosHorizons.x ) ).add( sinHorizons.x.mul( cosHorizons.x ).sub( sinHorizons.y.mul( cosHorizons.y ) ) ) );
const nyb = mul( 0.5, sub( 2.0, cosHorizons.x.mul( cosHorizons.x ) ).sub( cosHorizons.y.mul( cosHorizons.y ) ) );
const occlusion = nx.mul( nxb ).add( ny.mul( nyb ) );
ao.addAssign( occlusion );
} );
ao.assign( clamp( ao.div( DIRECTIONS ), 0, 1 ) );
ao.assign( pow( ao, this.scale ) );
return vec4( vec3( ao ), 1.0 );
} );
this._material.fragmentNode = ao().context( builder.getSharedContext() );
this._material.needsUpdate = true;
//
return this._textureNode;
}
/**
* Frees internal resources. This method should be called
* when the effect is no longer required.
*/
dispose() {
this._aoRenderTarget.dispose();
this._material.dispose();
}
}
export default GTAONode;
/**
* Generates the AO's noise texture for the given size.
*
* @param {number} [size=5] - The noise size.
* @return {DataTexture} The generated noise texture.
*/
function generateMagicSquareNoise( size = 5 ) {
const noiseSize = Math.floor( size ) % 2 === 0 ? Math.floor( size ) + 1 : Math.floor( size );
const magicSquare = generateMagicSquare( noiseSize );
const noiseSquareSize = magicSquare.length;
const data = new Uint8Array( noiseSquareSize * 4 );
for ( let inx = 0; inx < noiseSquareSize; ++ inx ) {
const iAng = magicSquare[ inx ];
const angle = ( 2 * Math.PI * iAng ) / noiseSquareSize;
const randomVec = new Vector3(
Math.cos( angle ),
Math.sin( angle ),
0
).normalize();
data[ inx * 4 ] = ( randomVec.x * 0.5 + 0.5 ) * 255;
data[ inx * 4 + 1 ] = ( randomVec.y * 0.5 + 0.5 ) * 255;
data[ inx * 4 + 2 ] = 127;
data[ inx * 4 + 3 ] = 255;
}
const noiseTexture = new DataTexture( data, noiseSize, noiseSize );
noiseTexture.wrapS = RepeatWrapping;
noiseTexture.wrapT = RepeatWrapping;
noiseTexture.needsUpdate = true;
return noiseTexture;
}
/**
* Computes an array of magic square values required to generate the noise texture.
*
* @param {number} size - The noise size.
* @return {Array<number>} The magic square values.
*/
function generateMagicSquare( size ) {
const noiseSize = Math.floor( size ) % 2 === 0 ? Math.floor( size ) + 1 : Math.floor( size );
const noiseSquareSize = noiseSize * noiseSize;
const magicSquare = Array( noiseSquareSize ).fill( 0 );
let i = Math.floor( noiseSize / 2 );
let j = noiseSize - 1;
for ( let num = 1; num <= noiseSquareSize; ) {
if ( i === - 1 && j === noiseSize ) {
j = noiseSize - 2;
i = 0;
} else {
if ( j === noiseSize ) {
j = 0;
}
if ( i < 0 ) {
i = noiseSize - 1;
}
}
if ( magicSquare[ i * noiseSize + j ] !== 0 ) {
j -= 2;
i ++;
continue;
} else {
magicSquare[ i * noiseSize + j ] = num ++;
}
j ++;
i --;
}
return magicSquare;
}
/**
* TSL function for creating a Ground Truth Ambient Occlusion (GTAO) effect.
*
* @tsl
* @function
* @param {Node<float>} depthNode - A node that represents the scene's depth.
* @param {?Node<vec3>} normalNode - A node that represents the scene's normals.
* @param {Camera} camera - The camera the scene is rendered with.
* @returns {GTAONode}
*/
export const ao = ( depthNode, normalNode, camera ) => nodeObject( new GTAONode( nodeObject( depthNode ), nodeObject( normalNode ), camera ) );

View File

@@ -0,0 +1,378 @@
import { RenderTarget, Vector2, NodeMaterial, RendererUtils, QuadMesh, TempNode, NodeUpdateType } from 'three/webgpu';
import { nodeObject, Fn, float, uv, uniform, convertToTexture, vec2, vec4, passTexture, premultiplyAlpha, unpremultiplyAlpha } from 'three/tsl';
const _quadMesh = /*@__PURE__*/ new QuadMesh();
let _rendererState;
/**
* Post processing node for creating a gaussian blur effect.
*
* @augments TempNode
* @three_import import { gaussianBlur, premultipliedGaussianBlur } from 'three/addons/tsl/display/GaussianBlurNode.js';
*/
class GaussianBlurNode extends TempNode {
static get type() {
return 'GaussianBlurNode';
}
/**
* Constructs a new gaussian blur node.
*
* @param {TextureNode} textureNode - The texture node that represents the input of the effect.
* @param {Node<vec2|float>} directionNode - Defines the direction and radius of the blur.
* @param {number} sigma - Controls the kernel of the blur filter. Higher values mean a wider blur radius.
* @param {Object} [options={}] - Additional options for the gaussian blur effect.
* @param {boolean} [options.premultipliedAlpha=false] - Whether to use premultiplied alpha for the blur effect.
* @param {Vector2} [options.resolution=new Vector2(1, 1)] - The resolution of the effect. 0.5 means half the resolution of the texture node.
*/
constructor( textureNode, directionNode = null, sigma = 4, options = {} ) {
super( 'vec4' );
/**
* The texture node that represents the input of the effect.
*
* @type {TextureNode}
*/
this.textureNode = textureNode;
/**
* Defines the direction and radius of the blur.
*
* @type {Node<vec2|float>}
*/
this.directionNode = directionNode;
/**
* Controls the kernel of the blur filter. Higher values mean a wider blur radius.
*
* @type {number}
*/
this.sigma = sigma;
/**
* A uniform node holding the inverse resolution value.
*
* @private
* @type {UniformNode<vec2>}
*/
this._invSize = uniform( new Vector2() );
/**
* Gaussian blur is applied in two passes (horizontal, vertical).
* This node controls the direction of each pass.
*
* @private
* @type {UniformNode<vec2>}
*/
this._passDirection = uniform( new Vector2() );
/**
* The render target used for the horizontal pass.
*
* @private
* @type {RenderTarget}
*/
this._horizontalRT = new RenderTarget( 1, 1, { depthBuffer: false } );
this._horizontalRT.texture.name = 'GaussianBlurNode.horizontal';
/**
* The render target used for the vertical pass.
*
* @private
* @type {RenderTarget}
*/
this._verticalRT = new RenderTarget( 1, 1, { depthBuffer: false } );
this._verticalRT.texture.name = 'GaussianBlurNode.vertical';
/**
* The result of the effect is represented as a separate texture node.
*
* @private
* @type {PassTextureNode}
*/
this._textureNode = passTexture( this, this._verticalRT.texture );
this._textureNode.uvNode = textureNode.uvNode;
/**
* The `updateBeforeType` is set to `NodeUpdateType.FRAME` since the node renders
* its effect once per frame in `updateBefore()`.
*
* @type {string}
* @default 'frame'
*/
this.updateBeforeType = NodeUpdateType.FRAME;
/**
* The resolution scale.
*
* @type {float}
* @default (1)
*/
this.resolutionScale = options.resolutionScale || 1;
/**
* Whether the effect should use premultiplied alpha or not. Set this to `true`
* if you are going to blur texture input with transparency.
*
* @type {boolean}
* @default false
*/
this.premultipliedAlpha = options.premultipliedAlpha || false;
}
/**
* Sets the size of the effect.
*
* @param {number} width - The width of the effect.
* @param {number} height - The height of the effect.
*/
setSize( width, height ) {
width = Math.max( Math.round( width * this.resolutionScale ), 1 );
height = Math.max( Math.round( height * this.resolutionScale ), 1 );
this._invSize.value.set( 1 / width, 1 / height );
this._horizontalRT.setSize( width, height );
this._verticalRT.setSize( width, height );
}
/**
* This method is used to render the effect once per frame.
*
* @param {NodeFrame} frame - The current node frame.
*/
updateBefore( frame ) {
const { renderer } = frame;
_rendererState = RendererUtils.resetRendererState( renderer, _rendererState );
//
const textureNode = this.textureNode;
const map = textureNode.value;
const currentTexture = textureNode.value;
_quadMesh.material = this._material;
this.setSize( map.image.width, map.image.height );
const textureType = map.type;
this._horizontalRT.texture.type = textureType;
this._verticalRT.texture.type = textureType;
// horizontal
renderer.setRenderTarget( this._horizontalRT );
this._passDirection.value.set( 1, 0 );
_quadMesh.render( renderer );
// vertical
textureNode.value = this._horizontalRT.texture;
renderer.setRenderTarget( this._verticalRT );
this._passDirection.value.set( 0, 1 );
_quadMesh.render( renderer );
// restore
textureNode.value = currentTexture;
RendererUtils.restoreRendererState( renderer, _rendererState );
}
/**
* Returns the result of the effect as a texture node.
*
* @return {PassTextureNode} A texture node that represents the result of the effect.
*/
getTextureNode() {
return this._textureNode;
}
/**
* This method is used to setup the effect's TSL code.
*
* @param {NodeBuilder} builder - The current node builder.
* @return {PassTextureNode}
*/
setup( builder ) {
const textureNode = this.textureNode;
//
const uvNode = uv();
const directionNode = vec2( this.directionNode || 1 );
let sampleTexture, output;
if ( this.premultipliedAlpha ) {
// https://lisyarus.github.io/blog/posts/blur-coefficients-generator.html
sampleTexture = ( uv ) => premultiplyAlpha( textureNode.sample( uv ) );
output = ( color ) => unpremultiplyAlpha( color );
} else {
sampleTexture = ( uv ) => textureNode.sample( uv );
output = ( color ) => color;
}
const blur = Fn( () => {
const kernelSize = 3 + ( 2 * this.sigma );
const gaussianCoefficients = this._getCoefficients( kernelSize );
const invSize = this._invSize;
const direction = directionNode.mul( this._passDirection );
const diffuseSum = vec4( sampleTexture( uvNode ).mul( gaussianCoefficients[ 0 ] ) ).toVar();
for ( let i = 1; i < kernelSize; i ++ ) {
const x = float( i );
const w = float( gaussianCoefficients[ i ] );
const uvOffset = vec2( direction.mul( invSize.mul( x ) ) ).toVar();
const sample1 = sampleTexture( uvNode.add( uvOffset ) );
const sample2 = sampleTexture( uvNode.sub( uvOffset ) );
diffuseSum.addAssign( sample1.add( sample2 ).mul( w ) );
}
return output( diffuseSum );
} );
//
const material = this._material || ( this._material = new NodeMaterial() );
material.fragmentNode = blur().context( builder.getSharedContext() );
material.name = 'Gaussian_blur';
material.needsUpdate = true;
//
const properties = builder.getNodeProperties( this );
properties.textureNode = textureNode;
//
return this._textureNode;
}
/**
* Frees internal resources. This method should be called
* when the effect is no longer required.
*/
dispose() {
this._horizontalRT.dispose();
this._verticalRT.dispose();
}
/**
* Computes gaussian coefficients depending on the given kernel radius.
*
* @private
* @param {number} kernelRadius - The kernel radius.
* @return {Array<number>}
*/
_getCoefficients( kernelRadius ) {
const coefficients = [];
const sigma = kernelRadius / 3;
for ( let i = 0; i < kernelRadius; i ++ ) {
coefficients.push( 0.39894 * Math.exp( - 0.5 * i * i / ( sigma * sigma ) ) / sigma );
}
return coefficients;
}
/**
* The resolution scale.
*
* @deprecated
* @type {Vector2}
* @default {(1,1)}
*/
get resolution() {
console.warn( 'THREE.GaussianBlurNode: The "resolution" property has been renamed to "resolutionScale" and is now of type `number`.' ); // @deprecated r180
return new Vector2( this.resolutionScale, this.resolutionScale );
}
set resolution( value ) {
console.warn( 'THREE.GaussianBlurNode: The "resolution" property has been renamed to "resolutionScale" and is now of type `number`.' ); // @deprecated r180
this.resolutionScale = value.x;
}
}
export default GaussianBlurNode;
/**
* TSL function for creating a gaussian blur node for post processing.
*
* @tsl
* @function
* @param {Node<vec4>} node - The node that represents the input of the effect.
* @param {Node<vec2|float>} directionNode - Defines the direction and radius of the blur.
* @param {number} sigma - Controls the kernel of the blur filter. Higher values mean a wider blur radius.
* @param {Object} [options={}] - Additional options for the gaussian blur effect.
* @param {boolean} [options.premultipliedAlpha=false] - Whether to use premultiplied alpha for the blur effect.
* @param {Vector2} [options.resolution=new Vector2(1, 1)] - The resolution of the effect. 0.5 means half the resolution of the texture node.
* @returns {GaussianBlurNode}
*/
export const gaussianBlur = ( node, directionNode, sigma, options = {} ) => nodeObject( new GaussianBlurNode( convertToTexture( node ), directionNode, sigma, options ) );
/**
* TSL function for creating a gaussian blur node for post processing with enabled premultiplied alpha.
*
* @tsl
* @function
* @deprecated since r180. Use `gaussianBlur()` with `premultipliedAlpha: true` option instead.
* @param {Node<vec4>} node - The node that represents the input of the effect.
* @param {Node<vec2|float>} directionNode - Defines the direction and radius of the blur.
* @param {number} sigma - Controls the kernel of the blur filter. Higher values mean a wider blur radius.
* @returns {GaussianBlurNode}
*/
export function premultipliedGaussianBlur( node, directionNode, sigma ) {
console.warn( 'THREE.TSL: "premultipliedGaussianBlur()" is deprecated. Use "gaussianBlur()" with "premultipliedAlpha: true" option instead.' ); // deprecated, r180
return gaussianBlur( node, directionNode, sigma, { premultipliedAlpha: true } );
}

View File

@@ -0,0 +1,279 @@
import { RenderTarget, Vector2, TempNode, NodeUpdateType, QuadMesh, RendererUtils, NodeMaterial } from 'three/webgpu';
import { convertToTexture, nodeObject, Fn, passTexture, uv, vec2, vec3, vec4, max, float, sub, int, Loop, fract, pow, distance } from 'three/tsl';
const _quadMesh = /*@__PURE__*/ new QuadMesh();
const _size = /*@__PURE__*/ new Vector2();
let _rendererState;
/**
* Post processing node for adding a bloom-based lens flare effect. This effect
* requires that you extract the bloom of the scene via a bloom pass first.
*
* References:
* - {@link https://john-chapman-graphics.blogspot.com/2013/02/pseudo-lens-flare.html}.
* - {@link https://john-chapman.github.io/2017/11/05/pseudo-lens-flare.html}.
*
* @augments TempNode
* @three_import import { lensflare } from 'three/addons/tsl/display/LensflareNode.js';
*/
class LensflareNode extends TempNode {
static get type() {
return 'LensflareNode';
}
/**
* Constructs a new lens flare node.
*
* @param {TextureNode} textureNode - The texture node that represents the scene's bloom.
* @param {Object} params - The parameter object for configuring the effect.
* @param {Node<vec3> | Color} [params.ghostTint=vec3(1, 1, 1)] - Defines the tint of the flare/ghosts.
* @param {Node<float> | number} [params.threshold=float(0.5)] - Controls the size and strength of the effect. A higher threshold results in smaller flares.
* @param {Node<float> | number} [params.ghostSamples=float(4)] - Represents the number of flares/ghosts per bright spot which pivot around the center.
* @param {Node<float> | number} [params.ghostSpacing=float(0.25)] - Defines the spacing of the flares/ghosts.
* @param {Node<float> | number} [params.ghostAttenuationFactor=float(25)] - Defines the attenuation factor of flares/ghosts.
* @param {number} [params.downSampleRatio=4] - Defines how downsampling since the effect is usually not rendered at full resolution.
*/
constructor( textureNode, params = {} ) {
super( 'vec4' );
/**
* The texture node that represents the scene's bloom.
*
* @type {TextureNode}
*/
this.textureNode = textureNode;
const {
ghostTint = vec3( 1, 1, 1 ),
threshold = float( 0.5 ),
ghostSamples = float( 4 ),
ghostSpacing = float( 0.25 ),
ghostAttenuationFactor = float( 25 ),
downSampleRatio = 4
} = params;
/**
* Defines the tint of the flare/ghosts.
*
* @type {Node<vec3>}
*/
this.ghostTintNode = nodeObject( ghostTint );
/**
* Controls the size and strength of the effect. A higher threshold results in smaller flares.
*
* @type {Node<float>}
*/
this.thresholdNode = nodeObject( threshold );
/**
* Represents the number of flares/ghosts per bright spot which pivot around the center.
*
* @type {Node<float>}
*/
this.ghostSamplesNode = nodeObject( ghostSamples );
/**
* Defines the spacing of the flares/ghosts.
*
* @type {Node<float>}
*/
this.ghostSpacingNode = nodeObject( ghostSpacing );
/**
* Defines the attenuation factor of flares/ghosts.
*
* @type {Node<float>}
*/
this.ghostAttenuationFactorNode = nodeObject( ghostAttenuationFactor );
/**
* Defines how downsampling since the effect is usually not rendered at full resolution.
*
* @type {number}
*/
this.downSampleRatio = downSampleRatio;
/**
* The `updateBeforeType` is set to `NodeUpdateType.FRAME` since the node renders
* its effect once per frame in `updateBefore()`.
*
* @type {string}
* @default 'frame'
*/
this.updateBeforeType = NodeUpdateType.FRAME;
/**
* The internal render target of the effect.
*
* @private
* @type {RenderTarget}
*/
this._renderTarget = new RenderTarget( 1, 1, { depthBuffer: false } );
this._renderTarget.texture.name = 'LensflareNode';
/**
* The node material that holds the effect's TSL code.
*
* @private
* @type {NodeMaterial}
*/
this._material = new NodeMaterial();
this._material.name = 'LensflareNode';
/**
* The result of the effect is represented as a separate texture node.
*
* @private
* @type {PassTextureNode}
*/
this._textureNode = passTexture( this, this._renderTarget.texture );
}
/**
* Returns the result of the effect as a texture node.
*
* @return {PassTextureNode} A texture node that represents the result of the effect.
*/
getTextureNode() {
return this._textureNode;
}
/**
* Sets the size of the effect.
*
* @param {number} width - The width of the effect.
* @param {number} height - The height of the effect.
*/
setSize( width, height ) {
const resx = Math.round( width / this.downSampleRatio );
const resy = Math.round( height / this.downSampleRatio );
this._renderTarget.setSize( resx, resy );
}
/**
* This method is used to render the effect once per frame.
*
* @param {NodeFrame} frame - The current node frame.
*/
updateBefore( frame ) {
const { renderer } = frame;
const size = renderer.getDrawingBufferSize( _size );
this.setSize( size.width, size.height );
_rendererState = RendererUtils.resetRendererState( renderer, _rendererState );
_quadMesh.material = this._material;
// clear
renderer.setMRT( null );
// lensflare
renderer.setRenderTarget( this._renderTarget );
_quadMesh.render( renderer );
// restore
RendererUtils.restoreRendererState( renderer, _rendererState );
}
/**
* This method is used to setup the effect's TSL code.
*
* @param {NodeBuilder} builder - The current node builder.
* @return {PassTextureNode}
*/
setup( builder ) {
const lensflare = Fn( () => {
// flip uvs so lens flare pivot around the image center
const texCoord = uv().oneMinus().toVar();
// ghosts are positioned along this vector
const ghostVec = sub( vec2( 0.5 ), texCoord ).mul( this.ghostSpacingNode ).toVar();
// sample ghosts
const result = vec4().toVar();
Loop( { start: int( 0 ), end: int( this.ghostSamplesNode ), type: 'int', condition: '<' }, ( { i } ) => {
// use fract() to ensure that the texture coordinates wrap around
const sampleUv = fract( texCoord.add( ghostVec.mul( float( i ) ) ) ).toVar();
// reduce contributions from samples at the screen edge
const d = distance( sampleUv, vec2( 0.5 ) );
const weight = pow( d.oneMinus(), this.ghostAttenuationFactorNode );
// accumulate
let sample = this.textureNode.sample( sampleUv ).rgb;
sample = max( sample.sub( this.thresholdNode ), vec3( 0 ) ).mul( this.ghostTintNode );
result.addAssign( sample.mul( weight ) );
} );
return result;
} );
this._material.fragmentNode = lensflare().context( builder.getSharedContext() );
this._material.needsUpdate = true;
return this._textureNode;
}
/**
* Frees internal resources. This method should be called
* when the effect is no longer required.
*/
dispose() {
this._renderTarget.dispose();
this._material.dispose();
}
}
export default LensflareNode;
/**
* TSL function for creating a bloom-based lens flare effect.
*
* @tsl
* @function
* @param {TextureNode} node - The node that represents the scene's bloom.
* @param {Object} params - The parameter object for configuring the effect.
* @param {Node<vec3> | Color} [params.ghostTint=vec3(1, 1, 1)] - Defines the tint of the flare/ghosts.
* @param {Node<float> | number} [params.threshold=float(0.5)] - Controls the size and strength of the effect. A higher threshold results in smaller flares.
* @param {Node<float> | number} [params.ghostSamples=float(4)] - Represents the number of flares/ghosts per bright spot which pivot around the center.
* @param {Node<float> | number} [params.ghostSpacing=float(0.25)] - Defines the spacing of the flares/ghosts.
* @param {Node<float> | number} [params.ghostAttenuationFactor=float(25)] - Defines the attenuation factor of flares/ghosts.
* @param {number} [params.downSampleRatio=4] - Defines how downsampling since the effect is usually not rendered at full resolution.
* @returns {LensflareNode}
*/
export const lensflare = ( node, params ) => nodeObject( new LensflareNode( convertToTexture( node ), params ) );

View File

@@ -0,0 +1,109 @@
import { TempNode } from 'three/webgpu';
import { nodeObject, Fn, float, uniform, vec3, vec4, mix } from 'three/tsl';
/**
* A post processing node for color grading via lookup tables.
*
* @augments TempNode
* @three_import import { lut3D } from 'three/addons/tsl/display/Lut3DNode.js';
*/
class Lut3DNode extends TempNode {
static get type() {
return 'Lut3DNode';
}
/**
* Constructs a new LUT node.
*
* @param {Node} inputNode - The node that represents the input of the effect.
* @param {TextureNode} lutNode - A texture node that represents the lookup table.
* @param {number} size - The size of the lookup table.
* @param {Node<float>} intensityNode - Controls the intensity of the effect.
*/
constructor( inputNode, lutNode, size, intensityNode ) {
super( 'vec4' );
/**
* The node that represents the input of the effect.
*
* @type {Node}
*/
this.inputNode = inputNode;
/**
* A texture node that represents the lookup table.
*
* @type {TextureNode}
*/
this.lutNode = lutNode;
/**
* The size of the lookup table.
*
* @type {UniformNode<float>}
*/
this.size = uniform( size );
/**
* Controls the intensity of the effect.
*
* @type {Node<float>}
*/
this.intensityNode = intensityNode;
}
/**
* This method is used to setup the effect's TSL code.
*
* @param {NodeBuilder} builder - The current node builder.
* @return {ShaderCallNodeInternal}
*/
setup() {
const { inputNode, lutNode } = this;
const sampleLut = ( uv ) => lutNode.sample( uv );
const lut3D = Fn( () => {
const base = inputNode;
// pull the sample in by half a pixel so the sample begins at the center of the edge pixels.
const pixelWidth = float( 1.0 ).div( this.size );
const halfPixelWidth = float( 0.5 ).div( this.size );
const uvw = vec3( halfPixelWidth ).add( base.rgb.mul( float( 1.0 ).sub( pixelWidth ) ) );
const lutValue = vec4( sampleLut( uvw ).rgb, base.a );
return vec4( mix( base, lutValue, this.intensityNode ) );
} );
const outputNode = lut3D();
return outputNode;
}
}
export default Lut3DNode;
/**
* TSL function for creating a LUT node for color grading via post processing.
*
* @tsl
* @function
* @param {Node} node - The node that represents the input of the effect.
* @param {TextureNode} lut - A texture node that represents the lookup table.
* @param {number} size - The size of the lookup table.
* @param {Node<float> | number} intensity - Controls the intensity of the effect.
* @returns {Lut3DNode}
*/
export const lut3D = ( node, lut, size, intensity ) => nodeObject( new Lut3DNode( nodeObject( node ), nodeObject( lut ), size, nodeObject( intensity ) ) );

View File

@@ -0,0 +1,33 @@
import { Fn, float, uv, Loop, int } from 'three/tsl';
/**
* Applies a motion blur effect to the given input node.
*
* @tsl
* @function
* @param {Node<vec4>} inputNode - The input node to apply the motion blur for.
* @param {Node<vec2>} velocity - The motion vectors of the beauty pass.
* @param {Node<int>} [numSamples=int(16)] - How many samples the effect should use. A higher value results in better quality but is also more expensive.
* @return {Node<vec4>} The input node with the motion blur effect applied.
*/
export const motionBlur = /*@__PURE__*/ Fn( ( [ inputNode, velocity, numSamples = int( 16 ) ] ) => {
const sampleColor = ( uv ) => inputNode.sample( uv );
const uvs = uv();
const colorResult = sampleColor( uvs ).toVar();
const fSamples = float( numSamples );
Loop( { start: int( 1 ), end: numSamples, type: 'int', condition: '<=' }, ( { i } ) => {
const offset = velocity.mul( float( i ).div( fSamples.sub( 1 ) ).sub( 0.5 ) );
colorResult.addAssign( sampleColor( uvs.add( offset ) ) );
} );
colorResult.divAssign( fSamples );
return colorResult;
} );

View File

@@ -0,0 +1,751 @@
import { DepthTexture, FloatType, RenderTarget, Vector2, TempNode, QuadMesh, NodeMaterial, RendererUtils, NodeUpdateType } from 'three/webgpu';
import { Loop, int, exp, min, float, mul, uv, vec2, vec3, Fn, textureSize, orthographicDepthToViewZ, screenUV, nodeObject, uniform, vec4, passTexture, texture, perspectiveDepthToViewZ, positionView, reference } from 'three/tsl';
const _quadMesh = /*@__PURE__*/ new QuadMesh();
const _size = /*@__PURE__*/ new Vector2();
const _BLUR_DIRECTION_X = /*@__PURE__*/ new Vector2( 1.0, 0.0 );
const _BLUR_DIRECTION_Y = /*@__PURE__*/ new Vector2( 0.0, 1.0 );
let _rendererState;
/**
* Post processing node for rendering outlines around selected objects. The node
* gives you great flexibility in composing the final outline look depending on
* your requirements.
* ```js
* const postProcessing = new THREE.PostProcessing( renderer );
*
* const scenePass = pass( scene, camera );
*
* // outline parameter
*
* const edgeStrength = uniform( 3.0 );
* const edgeGlow = uniform( 0.0 );
* const edgeThickness = uniform( 1.0 );
* const visibleEdgeColor = uniform( new THREE.Color( 0xffffff ) );
* const hiddenEdgeColor = uniform( new THREE.Color( 0x4e3636 ) );
*
* outlinePass = outline( scene, camera, {
* selectedObjects,
* edgeGlow,
* edgeThickness
* } );
*
* // compose custom outline
*
* const { visibleEdge, hiddenEdge } = outlinePass;
* const outlineColor = visibleEdge.mul( visibleEdgeColor ).add( hiddenEdge.mul( hiddenEdgeColor ) ).mul( edgeStrength );
*
* postProcessing.outputNode = outlineColor.add( scenePass );
* ```
*
* @augments TempNode
* @three_import import { outline } from 'three/addons/tsl/display/OutlineNode.js';
*/
class OutlineNode extends TempNode {
static get type() {
return 'OutlineNode';
}
/**
* Constructs a new outline node.
*
* @param {Scene} scene - A reference to the scene.
* @param {Camera} camera - The camera the scene is rendered with.
* @param {Object} params - The configuration parameters.
* @param {Array<Object3D>} [params.selectedObjects] - An array of selected objects.
* @param {Node<float>} [params.edgeThickness=float(1)] - The thickness of the edges.
* @param {Node<float>} [params.edgeGlow=float(0)] - Can be used for an animated glow/pulse effects.
* @param {number} [params.downSampleRatio=2] - The downsample ratio.
*/
constructor( scene, camera, params = {} ) {
super( 'vec4' );
const {
selectedObjects = [],
edgeThickness = float( 1 ),
edgeGlow = float( 0 ),
downSampleRatio = 2
} = params;
/**
* A reference to the scene.
*
* @type {Scene}
*/
this.scene = scene;
/**
* The camera the scene is rendered with.
*
* @type {Camera}
*/
this.camera = camera;
/**
* An array of selected objects.
*
* @type {Array<Object3D>}
*/
this.selectedObjects = selectedObjects;
/**
* The thickness of the edges.
*
* @type {Node<float>}
*/
this.edgeThicknessNode = nodeObject( edgeThickness );
/**
* Can be used for an animated glow/pulse effect.
*
* @type {Node<float>}
*/
this.edgeGlowNode = nodeObject( edgeGlow );
/**
* The downsample ratio.
*
* @type {number}
* @default 2
*/
this.downSampleRatio = downSampleRatio;
/**
* The `updateBeforeType` is set to `NodeUpdateType.FRAME` since the node renders
* its effect once per frame in `updateBefore()`.
*
* @type {string}
* @default 'frame'
*/
this.updateBeforeType = NodeUpdateType.FRAME;
// render targets
/**
* The render target for the depth pre-pass.
*
* @private
* @type {RenderTarget}
*/
this._renderTargetDepthBuffer = new RenderTarget();
this._renderTargetDepthBuffer.depthTexture = new DepthTexture();
this._renderTargetDepthBuffer.depthTexture.type = FloatType;
/**
* The render target for the mask pass.
*
* @private
* @type {RenderTarget}
*/
this._renderTargetMaskBuffer = new RenderTarget();
/**
* The render target for the mask downsample.
*
* @private
* @type {RenderTarget}
*/
this._renderTargetMaskDownSampleBuffer = new RenderTarget( 1, 1, { depthBuffer: false } );
/**
* The first render target for the edge detection.
*
* @private
* @type {RenderTarget}
*/
this._renderTargetEdgeBuffer1 = new RenderTarget( 1, 1, { depthBuffer: false } );
/**
* The second render target for the edge detection.
*
* @private
* @type {RenderTarget}
*/
this._renderTargetEdgeBuffer2 = new RenderTarget( 1, 1, { depthBuffer: false } );
/**
* The first render target for the blur pass.
*
* @private
* @type {RenderTarget}
*/
this._renderTargetBlurBuffer1 = new RenderTarget( 1, 1, { depthBuffer: false } );
/**
* The second render target for the blur pass.
*
* @private
* @type {RenderTarget}
*/
this._renderTargetBlurBuffer2 = new RenderTarget( 1, 1, { depthBuffer: false } );
/**
* The render target for the final composite.
*
* @private
* @type {RenderTarget}
*/
this._renderTargetComposite = new RenderTarget( 1, 1, { depthBuffer: false } );
// uniforms
/**
* Represents the near value of the scene's camera.
*
* @private
* @type {ReferenceNode<float>}
*/
this._cameraNear = reference( 'near', 'float', camera );
/**
* Represents the far value of the scene's camera.
*
* @private
* @type {ReferenceNode<float>}
*/
this._cameraFar = reference( 'far', 'float', camera );
/**
* Uniform that represents the blur direction of the pass.
*
* @private
* @type {UniformNode<vec2>}
*/
this._blurDirection = uniform( new Vector2() );
/**
* Texture node that holds the data from the depth pre-pass.
*
* @private
* @type {TextureNode}
*/
this._depthTextureUniform = texture( this._renderTargetDepthBuffer.depthTexture );
/**
* Texture node that holds the data from the mask pass.
*
* @private
* @type {TextureNode}
*/
this._maskTextureUniform = texture( this._renderTargetMaskBuffer.texture );
/**
* Texture node that holds the data from the mask downsample pass.
*
* @private
* @type {TextureNode}
*/
this._maskTextureDownsSampleUniform = texture( this._renderTargetMaskDownSampleBuffer.texture );
/**
* Texture node that holds the data from the first edge detection pass.
*
* @private
* @type {TextureNode}
*/
this._edge1TextureUniform = texture( this._renderTargetEdgeBuffer1.texture );
/**
* Texture node that holds the data from the second edge detection pass.
*
* @private
* @type {TextureNode}
*/
this._edge2TextureUniform = texture( this._renderTargetEdgeBuffer2.texture );
/**
* Texture node that holds the current blurred color data.
*
* @private
* @type {TextureNode}
*/
this._blurColorTextureUniform = texture( this._renderTargetEdgeBuffer1.texture );
// constants
/**
* Visible edge color.
*
* @private
* @type {Node<vec3>}
*/
this._visibleEdgeColor = vec3( 1, 0, 0 );
/**
* Hidden edge color.
*
* @private
* @type {Node<vec3>}
*/
this._hiddenEdgeColor = vec3( 0, 1, 0 );
// materials
/**
* The material for the depth pre-pass.
*
* @private
* @type {NodeMaterial}
*/
this._depthMaterial = new NodeMaterial();
this._depthMaterial.fragmentNode = vec4( 0, 0, 0, 1 );
this._depthMaterial.name = 'OutlineNode.depth';
/**
* The material for preparing the mask.
*
* @private
* @type {NodeMaterial}
*/
this._prepareMaskMaterial = new NodeMaterial();
this._prepareMaskMaterial.name = 'OutlineNode.prepareMask';
/**
* The copy material
*
* @private
* @type {NodeMaterial}
*/
this._materialCopy = new NodeMaterial();
this._materialCopy.name = 'OutlineNode.copy';
/**
* The edge detection material.
*
* @private
* @type {NodeMaterial}
*/
this._edgeDetectionMaterial = new NodeMaterial();
this._edgeDetectionMaterial.name = 'OutlineNode.edgeDetection';
/**
* The material that is used to render in the blur pass.
*
* @private
* @type {NodeMaterial}
*/
this._separableBlurMaterial = new NodeMaterial();
this._separableBlurMaterial.name = 'OutlineNode.separableBlur';
/**
* The material that is used to render in the blur pass.
*
* @private
* @type {NodeMaterial}
*/
this._separableBlurMaterial2 = new NodeMaterial();
this._separableBlurMaterial2.name = 'OutlineNode.separableBlur2';
/**
* The final composite material.
*
* @private
* @type {NodeMaterial}
*/
this._compositeMaterial = new NodeMaterial();
this._compositeMaterial.name = 'OutlineNode.composite';
/**
* A set to cache selected objects in the scene.
*
* @private
* @type {Set<Object3D>}
*/
this._selectionCache = new Set();
/**
* The result of the effect is represented as a separate texture node.
*
* @private
* @type {PassTextureNode}
*/
this._textureNode = passTexture( this, this._renderTargetComposite.texture );
}
/**
* A mask value that represents the visible edge.
*
* @return {Node<float>} The visible edge.
*/
get visibleEdge() {
return this.r;
}
/**
* A mask value that represents the hidden edge.
*
* @return {Node<float>} The hidden edge.
*/
get hiddenEdge() {
return this.g;
}
/**
* Returns the result of the effect as a texture node.
*
* @return {PassTextureNode} A texture node that represents the result of the effect.
*/
getTextureNode() {
return this._textureNode;
}
/**
* Sets the size of the effect.
*
* @param {number} width - The width of the effect.
* @param {number} height - The height of the effect.
*/
setSize( width, height ) {
this._renderTargetDepthBuffer.setSize( width, height );
this._renderTargetMaskBuffer.setSize( width, height );
this._renderTargetComposite.setSize( width, height );
// downsample 1
let resx = Math.round( width / this.downSampleRatio );
let resy = Math.round( height / this.downSampleRatio );
this._renderTargetMaskDownSampleBuffer.setSize( resx, resy );
this._renderTargetEdgeBuffer1.setSize( resx, resy );
this._renderTargetBlurBuffer1.setSize( resx, resy );
// downsample 2
resx = Math.round( resx / 2 );
resy = Math.round( resy / 2 );
this._renderTargetEdgeBuffer2.setSize( resx, resy );
this._renderTargetBlurBuffer2.setSize( resx, resy );
}
/**
* This method is used to render the effect once per frame.
*
* @param {NodeFrame} frame - The current node frame.
*/
updateBefore( frame ) {
const { renderer } = frame;
const { camera, scene } = this;
_rendererState = RendererUtils.resetRendererAndSceneState( renderer, scene, _rendererState );
//
const size = renderer.getDrawingBufferSize( _size );
this.setSize( size.width, size.height );
//
renderer.setClearColor( 0xffffff, 1 );
this._updateSelectionCache();
// 1. Draw non-selected objects in the depth buffer
scene.overrideMaterial = this._depthMaterial;
renderer.setRenderTarget( this._renderTargetDepthBuffer );
renderer.setRenderObjectFunction( ( object, ...params ) => {
if ( this._selectionCache.has( object ) === false ) {
renderer.renderObject( object, ...params );
}
} );
renderer.render( scene, camera );
// 2. Draw only the selected objects by comparing the depth buffer of non-selected objects
scene.overrideMaterial = this._prepareMaskMaterial;
renderer.setRenderTarget( this._renderTargetMaskBuffer );
renderer.setRenderObjectFunction( ( object, ...params ) => {
if ( this._selectionCache.has( object ) === true ) {
renderer.renderObject( object, ...params );
}
} );
renderer.render( scene, camera );
//
renderer.setRenderObjectFunction( _rendererState.renderObjectFunction );
this._selectionCache.clear();
// 3. Downsample to (at least) half resolution
_quadMesh.material = this._materialCopy;
renderer.setRenderTarget( this._renderTargetMaskDownSampleBuffer );
_quadMesh.render( renderer );
// 4. Perform edge detection (half resolution)
_quadMesh.material = this._edgeDetectionMaterial;
renderer.setRenderTarget( this._renderTargetEdgeBuffer1 );
_quadMesh.render( renderer );
// 5. Apply blur (half resolution)
this._blurColorTextureUniform.value = this._renderTargetEdgeBuffer1.texture;
this._blurDirection.value.copy( _BLUR_DIRECTION_X );
_quadMesh.material = this._separableBlurMaterial;
renderer.setRenderTarget( this._renderTargetBlurBuffer1 );
_quadMesh.render( renderer );
this._blurColorTextureUniform.value = this._renderTargetBlurBuffer1.texture;
this._blurDirection.value.copy( _BLUR_DIRECTION_Y );
renderer.setRenderTarget( this._renderTargetEdgeBuffer1 );
_quadMesh.render( renderer );
// 6. Apply blur (quarter resolution)
this._blurColorTextureUniform.value = this._renderTargetEdgeBuffer1.texture;
this._blurDirection.value.copy( _BLUR_DIRECTION_X );
_quadMesh.material = this._separableBlurMaterial2;
renderer.setRenderTarget( this._renderTargetBlurBuffer2 );
_quadMesh.render( renderer );
this._blurColorTextureUniform.value = this._renderTargetBlurBuffer2.texture;
this._blurDirection.value.copy( _BLUR_DIRECTION_Y );
renderer.setRenderTarget( this._renderTargetEdgeBuffer2 );
_quadMesh.render( renderer );
// 7. Composite
_quadMesh.material = this._compositeMaterial;
renderer.setRenderTarget( this._renderTargetComposite );
_quadMesh.render( renderer );
// restore
RendererUtils.restoreRendererAndSceneState( renderer, scene, _rendererState );
}
/**
* This method is used to setup the effect's TSL code.
*
* @param {NodeBuilder} builder - The current node builder.
* @return {PassTextureNode}
*/
setup() {
// prepare mask material
const prepareMask = () => {
const depth = this._depthTextureUniform.sample( screenUV );
let viewZNode;
if ( this.camera.isPerspectiveCamera ) {
viewZNode = perspectiveDepthToViewZ( depth, this._cameraNear, this._cameraFar );
} else {
viewZNode = orthographicDepthToViewZ( depth, this._cameraNear, this._cameraFar );
}
const depthTest = positionView.z.lessThanEqual( viewZNode ).select( 1, 0 );
return vec4( 0.0, depthTest, 1.0, 1.0 );
};
this._prepareMaskMaterial.fragmentNode = prepareMask();
this._prepareMaskMaterial.needsUpdate = true;
// copy material
this._materialCopy.fragmentNode = this._maskTextureUniform;
this._materialCopy.needsUpdate = true;
// edge detection material
const edgeDetection = Fn( () => {
const resolution = textureSize( this._maskTextureDownsSampleUniform );
const invSize = vec2( 1 ).div( resolution ).toVar();
const uvOffset = vec4( 1.0, 0.0, 0.0, 1.0 ).mul( vec4( invSize, invSize ) );
const uvNode = uv();
const c1 = this._maskTextureDownsSampleUniform.sample( uvNode.add( uvOffset.xy ) ).toVar();
const c2 = this._maskTextureDownsSampleUniform.sample( uvNode.sub( uvOffset.xy ) ).toVar();
const c3 = this._maskTextureDownsSampleUniform.sample( uvNode.add( uvOffset.yw ) ).toVar();
const c4 = this._maskTextureDownsSampleUniform.sample( uvNode.sub( uvOffset.yw ) ).toVar();
const diff1 = mul( c1.r.sub( c2.r ), 0.5 );
const diff2 = mul( c3.r.sub( c4.r ), 0.5 );
const d = vec2( diff1, diff2 ).length();
const a1 = min( c1.g, c2.g );
const a2 = min( c3.g, c4.g );
const visibilityFactor = min( a1, a2 );
const edgeColor = visibilityFactor.oneMinus().greaterThan( 0.001 ).select( this._visibleEdgeColor, this._hiddenEdgeColor );
return vec4( edgeColor, 1 ).mul( d );
} );
this._edgeDetectionMaterial.fragmentNode = edgeDetection();
this._edgeDetectionMaterial.needsUpdate = true;
// separable blur material
const MAX_RADIUS = 4;
const gaussianPdf = Fn( ( [ x, sigma ] ) => {
return float( 0.39894 ).mul( exp( float( - 0.5 ).mul( x ).mul( x ).div( sigma.mul( sigma ) ) ).div( sigma ) );
} );
const separableBlur = Fn( ( [ kernelRadius ] ) => {
const resolution = textureSize( this._maskTextureDownsSampleUniform );
const invSize = vec2( 1 ).div( resolution ).toVar();
const uvNode = uv();
const sigma = kernelRadius.div( 2 ).toVar();
const weightSum = gaussianPdf( 0, sigma ).toVar();
const diffuseSum = this._blurColorTextureUniform.sample( uvNode ).mul( weightSum ).toVar();
const delta = this._blurDirection.mul( invSize ).mul( kernelRadius ).div( MAX_RADIUS ).toVar();
const uvOffset = delta.toVar();
Loop( { start: int( 1 ), end: int( MAX_RADIUS ), type: 'int', condition: '<=' }, ( { i } ) => {
const x = kernelRadius.mul( float( i ) ).div( MAX_RADIUS );
const w = gaussianPdf( x, sigma );
const sample1 = this._blurColorTextureUniform.sample( uvNode.add( uvOffset ) );
const sample2 = this._blurColorTextureUniform.sample( uvNode.sub( uvOffset ) );
diffuseSum.addAssign( sample1.add( sample2 ).mul( w ) );
weightSum.addAssign( w.mul( 2 ) );
uvOffset.addAssign( delta );
} );
return diffuseSum.div( weightSum );
} );
this._separableBlurMaterial.fragmentNode = separableBlur( this.edgeThicknessNode );
this._separableBlurMaterial.needsUpdate = true;
this._separableBlurMaterial2.fragmentNode = separableBlur( MAX_RADIUS );
this._separableBlurMaterial2.needsUpdate = true;
// composite material
const composite = Fn( () => {
const edgeValue1 = this._edge1TextureUniform;
const edgeValue2 = this._edge2TextureUniform;
const maskColor = this._maskTextureUniform;
const edgeValue = edgeValue1.add( edgeValue2.mul( this.edgeGlowNode ) );
return maskColor.r.mul( edgeValue );
} );
this._compositeMaterial.fragmentNode = composite();
this._compositeMaterial.needsUpdate = true;
return this._textureNode;
}
/**
* Frees internal resources. This method should be called
* when the effect is no longer required.
*/
dispose() {
this.selectedObjects.length = 0;
this._renderTargetDepthBuffer.dispose();
this._renderTargetMaskBuffer.dispose();
this._renderTargetMaskDownSampleBuffer.dispose();
this._renderTargetEdgeBuffer1.dispose();
this._renderTargetEdgeBuffer2.dispose();
this._renderTargetBlurBuffer1.dispose();
this._renderTargetBlurBuffer2.dispose();
this._renderTargetComposite.dispose();
this._depthMaterial.dispose();
this._prepareMaskMaterial.dispose();
this._materialCopy.dispose();
this._edgeDetectionMaterial.dispose();
this._separableBlurMaterial.dispose();
this._separableBlurMaterial2.dispose();
this._compositeMaterial.dispose();
}
/**
* Updates the selection cache based on the selected objects.
*
* @private
*/
_updateSelectionCache() {
for ( let i = 0; i < this.selectedObjects.length; i ++ ) {
const selectedObject = this.selectedObjects[ i ];
selectedObject.traverse( ( object ) => {
if ( object.isMesh ) this._selectionCache.add( object );
} );
}
}
}
export default OutlineNode;
/**
* TSL function for creating an outline effect around selected objects.
*
* @tsl
* @function
* @param {Scene} scene - A reference to the scene.
* @param {Camera} camera - The camera the scene is rendered with.
* @param {Object} params - The configuration parameters.
* @param {Array<Object3D>} [params.selectedObjects] - An array of selected objects.
* @param {Node<float>} [params.edgeThickness=float(1)] - The thickness of the edges.
* @param {Node<float>} [params.edgeGlow=float(0)] - Can be used for animated glow/pulse effects.
* @param {number} [params.downSampleRatio=2] - The downsample ratio.
* @returns {OutlineNode}
*/
export const outline = ( scene, camera, params ) => nodeObject( new OutlineNode( scene, camera, params ) );

View File

@@ -0,0 +1,89 @@
import { NodeMaterial } from 'three/webgpu';
import { nodeObject, Fn, vec4, uv, If, mod, screenCoordinate } from 'three/tsl';
import StereoCompositePassNode from './StereoCompositePassNode.js';
/**
* A render pass node that creates a parallax barrier effect.
*
* @augments StereoCompositePassNode
* @three_import import { parallaxBarrierPass } from 'three/addons/tsl/display/ParallaxBarrierPassNode.js';
*/
class ParallaxBarrierPassNode extends StereoCompositePassNode {
static get type() {
return 'ParallaxBarrierPassNode';
}
/**
* Constructs a new parallax barrier pass node.
*
* @param {Scene} scene - The scene to render.
* @param {Camera} camera - The camera to render the scene with.
*/
constructor( scene, camera ) {
super( scene, camera );
/**
* This flag can be used for type testing.
*
* @type {boolean}
* @readonly
* @default true
*/
this.isParallaxBarrierPassNode = true;
}
/**
* This method is used to setup the effect's TSL code.
*
* @param {NodeBuilder} builder - The current node builder.
* @return {PassTextureNode}
*/
setup( builder ) {
const uvNode = uv();
const parallaxBarrier = Fn( () => {
const color = vec4().toVar();
If( mod( screenCoordinate.y, 2 ).greaterThan( 1 ), () => {
color.assign( this._mapLeft.sample( uvNode ) );
} ).Else( () => {
color.assign( this._mapRight.sample( uvNode ) );
} );
return color;
} );
const material = this._material || ( this._material = new NodeMaterial() );
material.fragmentNode = parallaxBarrier().context( builder.getSharedContext() );
material.needsUpdate = true;
return super.setup( builder );
}
}
export default ParallaxBarrierPassNode;
/**
* TSL function for creating an parallax barrier pass node.
*
* @tsl
* @function
* @param {Scene} scene - The scene to render.
* @param {Camera} camera - The camera to render the scene with.
* @returns {ParallaxBarrierPassNode}
*/
export const parallaxBarrierPass = ( scene, camera ) => nodeObject( new ParallaxBarrierPassNode( scene, camera ) );

View File

@@ -0,0 +1,334 @@
import { NearestFilter, Vector4, TempNode, NodeUpdateType, PassNode } from 'three/webgpu';
import { nodeObject, Fn, float, uv, uniform, convertToTexture, vec2, vec3, clamp, floor, dot, smoothstep, If, sign, step, mrt, output, normalView, property } from 'three/tsl';
/**
* A inner node definition that implements the actual pixelation TSL code.
*
* @inner
* @augments TempNode
*/
class PixelationNode extends TempNode {
static get type() {
return 'PixelationNode';
}
/**
* Constructs a new pixelation node.
*
* @param {TextureNode} textureNode - The texture node that represents the beauty pass.
* @param {TextureNode} depthNode - The texture that represents the beauty's depth.
* @param {TextureNode} normalNode - The texture that represents the beauty's normals.
* @param {Node<float>} pixelSize - The pixel size.
* @param {Node<float>} normalEdgeStrength - The normal edge strength.
* @param {Node<float>} depthEdgeStrength - The depth edge strength.
*/
constructor( textureNode, depthNode, normalNode, pixelSize, normalEdgeStrength, depthEdgeStrength ) {
super( 'vec4' );
/**
* The texture node that represents the beauty pass.
*
* @type {TextureNode}
*/
this.textureNode = textureNode;
/**
* The texture that represents the beauty's depth.
*
* @type {TextureNode}
*/
this.depthNode = depthNode;
/**
* The texture that represents the beauty's normals.
*
* @type {TextureNode}
*/
this.normalNode = normalNode;
/**
* The pixel size.
*
* @type {Node<float>}
*/
this.pixelSize = pixelSize;
/**
* The pixel size.
*
* @type {Node<float>}
*/
this.normalEdgeStrength = normalEdgeStrength;
/**
* The depth edge strength.
*
* @type {Node<float>}
*/
this.depthEdgeStrength = depthEdgeStrength;
/**
* Uniform node that represents the resolution.
*
* @type {Node<vec4>}
*/
this._resolution = uniform( new Vector4() );
/**
* The `updateBeforeType` is set to `NodeUpdateType.FRAME` since the node updates
* its internal uniforms once per frame in `updateBefore()`.
*
* @type {string}
* @default 'frame'
*/
this.updateBeforeType = NodeUpdateType.FRAME;
}
/**
* This method is used to update uniforms once per frame.
*
* @param {NodeFrame} frame - The current node frame.
*/
updateBefore() {
const map = this.textureNode.value;
const width = map.image.width;
const height = map.image.height;
this._resolution.value.set( width, height, 1 / width, 1 / height );
}
/**
* This method is used to setup the effect's TSL code.
*
* @param {NodeBuilder} builder - The current node builder.
* @return {ShaderCallNodeInternal}
*/
setup() {
const { textureNode, depthNode, normalNode } = this;
const uvNodeTexture = textureNode.uvNode || uv();
const uvNodeDepth = depthNode.uvNode || uv();
const uvNodeNormal = normalNode.uvNode || uv();
const sampleTexture = () => textureNode.sample( uvNodeTexture );
const sampleDepth = ( x, y ) => depthNode.sample( uvNodeDepth.add( vec2( x, y ).mul( this._resolution.zw ) ) ).r;
const sampleNormal = ( x, y ) => normalNode.sample( uvNodeNormal.add( vec2( x, y ).mul( this._resolution.zw ) ) ).rgb.normalize();
const depthEdgeIndicator = ( depth ) => {
const diff = property( 'float', 'diff' );
diff.addAssign( clamp( sampleDepth( 1, 0 ).sub( depth ) ) );
diff.addAssign( clamp( sampleDepth( - 1, 0 ).sub( depth ) ) );
diff.addAssign( clamp( sampleDepth( 0, 1 ).sub( depth ) ) );
diff.addAssign( clamp( sampleDepth( 0, - 1 ).sub( depth ) ) );
return floor( smoothstep( 0.01, 0.02, diff ).mul( 2 ) ).div( 2 );
};
const neighborNormalEdgeIndicator = ( x, y, depth, normal ) => {
const depthDiff = sampleDepth( x, y ).sub( depth );
const neighborNormal = sampleNormal( x, y );
// Edge pixels should yield to faces who's normals are closer to the bias normal.
const normalEdgeBias = vec3( 1, 1, 1 ); // This should probably be a parameter.
const normalDiff = dot( normal.sub( neighborNormal ), normalEdgeBias );
const normalIndicator = clamp( smoothstep( - 0.01, 0.01, normalDiff ), 0.0, 1.0 );
// Only the shallower pixel should detect the normal edge.
const depthIndicator = clamp( sign( depthDiff.mul( .25 ).add( .0025 ) ), 0.0, 1.0 );
return float( 1.0 ).sub( dot( normal, neighborNormal ) ).mul( depthIndicator ).mul( normalIndicator );
};
const normalEdgeIndicator = ( depth, normal ) => {
const indicator = property( 'float', 'indicator' );
indicator.addAssign( neighborNormalEdgeIndicator( 0, - 1, depth, normal ) );
indicator.addAssign( neighborNormalEdgeIndicator( 0, 1, depth, normal ) );
indicator.addAssign( neighborNormalEdgeIndicator( - 1, 0, depth, normal ) );
indicator.addAssign( neighborNormalEdgeIndicator( 1, 0, depth, normal ) );
return step( 0.1, indicator );
};
const pixelation = Fn( () => {
const texel = sampleTexture();
const depth = property( 'float', 'depth' );
const normal = property( 'vec3', 'normal' );
If( this.depthEdgeStrength.greaterThan( 0.0 ).or( this.normalEdgeStrength.greaterThan( 0.0 ) ), () => {
depth.assign( sampleDepth( 0, 0 ) );
normal.assign( sampleNormal( 0, 0 ) );
} );
const dei = property( 'float', 'dei' );
If( this.depthEdgeStrength.greaterThan( 0.0 ), () => {
dei.assign( depthEdgeIndicator( depth ) );
} );
const nei = property( 'float', 'nei' );
If( this.normalEdgeStrength.greaterThan( 0.0 ), () => {
nei.assign( normalEdgeIndicator( depth, normal ) );
} );
const strength = dei.greaterThan( 0 ).select( float( 1.0 ).sub( dei.mul( this.depthEdgeStrength ) ), nei.mul( this.normalEdgeStrength ).add( 1 ) );
return texel.mul( strength );
} );
const outputNode = pixelation();
return outputNode;
}
}
const pixelation = ( node, depthNode, normalNode, pixelSize = 6, normalEdgeStrength = 0.3, depthEdgeStrength = 0.4 ) => nodeObject( new PixelationNode( convertToTexture( node ), convertToTexture( depthNode ), convertToTexture( normalNode ), nodeObject( pixelSize ), nodeObject( normalEdgeStrength ), nodeObject( depthEdgeStrength ) ) );
/**
* A special render pass node that renders the scene with a pixelation effect.
*
* @augments PassNode
* @three_import import { pixelationPass } from 'three/addons/tsl/display/PixelationPassNode.js';
*/
class PixelationPassNode extends PassNode {
static get type() {
return 'PixelationPassNode';
}
/**
* Constructs a new pixelation pass node.
*
* @param {Scene} scene - The scene to render.
* @param {Camera} camera - The camera to render the scene with.
* @param {Node<float> | number} [pixelSize=6] - The pixel size.
* @param {Node<float> | number} [normalEdgeStrength=0.3] - The normal edge strength.
* @param {Node<float> | number} [depthEdgeStrength=0.4] - The depth edge strength.
*/
constructor( scene, camera, pixelSize = 6, normalEdgeStrength = 0.3, depthEdgeStrength = 0.4 ) {
super( PassNode.COLOR, scene, camera, { minFilter: NearestFilter, magFilter: NearestFilter } );
/**
* The pixel size.
*
* @type {number}
* @default 6
*/
this.pixelSize = pixelSize;
/**
* The normal edge strength.
*
* @type {number}
* @default 0.3
*/
this.normalEdgeStrength = normalEdgeStrength;
/**
* The depth edge strength.
*
* @type {number}
* @default 0.4
*/
this.depthEdgeStrength = depthEdgeStrength;
/**
* This flag can be used for type testing.
*
* @type {boolean}
* @readonly
* @default true
*/
this.isPixelationPassNode = true;
this._mrt = mrt( {
output: output,
normal: normalView
} );
}
/**
* Sets the size of the pass.
*
* @param {number} width - The width of the pass.
* @param {number} height - The height of the pass.
*/
setSize( width, height ) {
const pixelSize = this.pixelSize.value ? this.pixelSize.value : this.pixelSize;
const adjustedWidth = Math.floor( width / pixelSize );
const adjustedHeight = Math.floor( height / pixelSize );
super.setSize( adjustedWidth, adjustedHeight );
}
/**
* This method is used to setup the effect's TSL code.
*
* @param {NodeBuilder} builder - The current node builder.
* @return {PixelationNode}
*/
setup() {
const color = super.getTextureNode( 'output' );
const depth = super.getTextureNode( 'depth' );
const normal = super.getTextureNode( 'normal' );
return pixelation( color, depth, normal, this.pixelSize, this.normalEdgeStrength, this.depthEdgeStrength );
}
}
/**
* TSL function for creating a pixelation render pass node for post processing.
*
* @tsl
* @function
* @param {Scene} scene - The scene to render.
* @param {Camera} camera - The camera to render the scene with.
* @param {Node<float> | number} [pixelSize=6] - The pixel size.
* @param {Node<float> | number} [normalEdgeStrength=0.3] - The normal edge strength.
* @param {Node<float> | number} [depthEdgeStrength=0.4] - The depth edge strength.
* @returns {PixelationPassNode}
*/
export const pixelationPass = ( scene, camera, pixelSize, normalEdgeStrength, depthEdgeStrength ) => nodeObject( new PixelationPassNode( scene, camera, pixelSize, normalEdgeStrength, depthEdgeStrength ) );
export default PixelationPassNode;

View File

@@ -0,0 +1,96 @@
import { TempNode } from 'three/webgpu';
import { nodeObject, Fn, uv, uniform, vec2, sin, cos, vec4, convertToTexture } from 'three/tsl';
/**
* Post processing node for shifting/splitting RGB color channels. The effect
* separates color channels and offsets them from each other.
*
* @augments TempNode
* @three_import import { rgbShift } from 'three/addons/tsl/display/RGBShiftNode.js';
*/
class RGBShiftNode extends TempNode {
static get type() {
return 'RGBShiftNode';
}
/**
* Constructs a new RGB shift node.
*
* @param {TextureNode} textureNode - The texture node that represents the input of the effect.
* @param {number} [amount=0.005] - The amount of the RGB shift.
* @param {number} [angle=0] - Defines the orientation in which colors are shifted.
*/
constructor( textureNode, amount = 0.005, angle = 0 ) {
super( 'vec4' );
/**
* The texture node that represents the input of the effect.
*
* @type {TextureNode}
*/
this.textureNode = textureNode;
/**
* The amount of the RGB shift.
*
* @type {UniformNode<float>}
*/
this.amount = uniform( amount );
/**
* Defines in which direction colors are shifted.
*
* @type {UniformNode<float>}
*/
this.angle = uniform( angle );
}
/**
* This method is used to setup the effect's TSL code.
*
* @param {NodeBuilder} builder - The current node builder.
* @return {ShaderCallNodeInternal}
*/
setup( /* builder */ ) {
const { textureNode } = this;
const uvNode = textureNode.uvNode || uv();
const sampleTexture = ( uv ) => textureNode.sample( uv );
const rgbShift = Fn( () => {
const offset = vec2( cos( this.angle ), sin( this.angle ) ).mul( this.amount );
const cr = sampleTexture( uvNode.add( offset ) );
const cga = sampleTexture( uvNode );
const cb = sampleTexture( uvNode.sub( offset ) );
return vec4( cr.r, cga.g, cb.b, cga.a );
} );
return rgbShift();
}
}
export default RGBShiftNode;
/**
* TSL function for creating a RGB shift or split effect for post processing.
*
* @tsl
* @function
* @param {Node<vec4>} node - The node that represents the input of the effect.
* @param {number} [amount=0.005] - The amount of the RGB shift.
* @param {number} [angle=0] - Defines in which direction colors are shifted.
* @returns {RGBShiftNode}
*/
export const rgbShift = ( node, amount, angle ) => nodeObject( new RGBShiftNode( convertToTexture( node ), amount, angle ) );

768
node_modules/three/examples/jsm/tsl/display/SMAANode.js generated vendored Normal file

File diff suppressed because one or more lines are too long

View File

@@ -0,0 +1,358 @@
import { AdditiveBlending, Color, Vector2, RendererUtils, PassNode, QuadMesh, NodeMaterial } from 'three/webgpu';
import { nodeObject, uniform, mrt, texture, getTextureIndex, unpremultiplyAlpha } from 'three/tsl';
const _size = /*@__PURE__*/ new Vector2();
let _rendererState;
/**
* A special render pass node that renders the scene with SSAA (Supersampling Anti-Aliasing).
* This manual SSAA approach re-renders the scene ones for each sample with camera jitter and accumulates the results.
*
* This node produces a high-quality anti-aliased output but is also extremely expensive because of
* its brute-force approach of re-rendering the entire scene multiple times.
*
* Reference: {@link https://en.wikipedia.org/wiki/Supersampling}
*
* @augments PassNode
* @three_import import { ssaaPass } from 'three/addons/tsl/display/SSAAPassNode.js';
*/
class SSAAPassNode extends PassNode {
static get type() {
return 'SSAAPassNode';
}
/**
* Constructs a new SSAA pass node.
*
* @param {Scene} scene - The scene to render.
* @param {Camera} camera - The camera to render the scene with.
*/
constructor( scene, camera ) {
super( PassNode.COLOR, scene, camera );
/**
* This flag can be used for type testing.
*
* @type {boolean}
* @readonly
* @default true
*/
this.isSSAAPassNode = true;
/**
* The sample level specified as n, where the number of samples is 2^n,
* so sampleLevel = 4, is 2^4 samples, 16.
*
* @type {number}
* @default 4
*/
this.sampleLevel = 4;
/**
* Whether rounding errors should be mitigated or not.
*
* @type {boolean}
* @default true
*/
this.unbiased = true;
/**
* The clear color of the pass.
*
* @type {Color}
* @default 0x000000
*/
this.clearColor = new Color( 0x000000 );
/**
* The clear alpha of the pass.
*
* @type {number}
* @default 0
*/
this.clearAlpha = 0;
/**
* A uniform node representing the sample weight.
*
* @type {UniformNode<float>}
* @default 1
*/
this.sampleWeight = uniform( 1 );
/**
* Reference to the internal render target that holds the current sample.
*
* @private
* @type {?RenderTarget}
* @default null
*/
this._sampleRenderTarget = null;
/**
* Reference to the internal quad mesh.
*
* @private
* @type {QuadMesh}
*/
this._quadMesh = new QuadMesh();
}
/**
* This method is used to render the SSAA effect once per frame.
*
* @param {NodeFrame} frame - The current node frame.
*/
updateBefore( frame ) {
const { renderer } = frame;
const { scene, camera } = this;
_rendererState = RendererUtils.resetRendererState( renderer, _rendererState );
//
this._pixelRatio = renderer.getPixelRatio();
const size = renderer.getSize( _size );
this.setSize( size.width, size.height );
this._sampleRenderTarget.setSize( this.renderTarget.width, this.renderTarget.height );
//
this._cameraNear.value = camera.near;
this._cameraFar.value = camera.far;
renderer.setMRT( this.getMRT() );
renderer.autoClear = false;
const jitterOffsets = _JitterVectors[ Math.max( 0, Math.min( this.sampleLevel, 5 ) ) ];
const baseSampleWeight = 1.0 / jitterOffsets.length;
const roundingRange = 1 / 32;
const viewOffset = {
fullWidth: this.renderTarget.width,
fullHeight: this.renderTarget.height,
offsetX: 0,
offsetY: 0,
width: this.renderTarget.width,
height: this.renderTarget.height
};
const originalViewOffset = Object.assign( {}, camera.view );
if ( originalViewOffset.enabled ) Object.assign( viewOffset, originalViewOffset );
// render the scene multiple times, each slightly jitter offset from the last and accumulate the results.
for ( let i = 0; i < jitterOffsets.length; i ++ ) {
const jitterOffset = jitterOffsets[ i ];
if ( camera.setViewOffset ) {
camera.setViewOffset(
viewOffset.fullWidth, viewOffset.fullHeight,
viewOffset.offsetX + jitterOffset[ 0 ] * 0.0625, viewOffset.offsetY + jitterOffset[ 1 ] * 0.0625, // 0.0625 = 1 / 16
viewOffset.width, viewOffset.height
);
}
this.sampleWeight.value = baseSampleWeight;
if ( this.unbiased ) {
// the theory is that equal weights for each sample lead to an accumulation of rounding errors.
// The following equation varies the sampleWeight per sample so that it is uniformly distributed
// across a range of values whose rounding errors cancel each other out.
const uniformCenteredDistribution = ( - 0.5 + ( i + 0.5 ) / jitterOffsets.length );
this.sampleWeight.value += roundingRange * uniformCenteredDistribution;
}
renderer.setClearColor( this.clearColor, this.clearAlpha );
renderer.setRenderTarget( this._sampleRenderTarget );
renderer.clear();
renderer.render( scene, camera );
// accumulation
renderer.setRenderTarget( this.renderTarget );
if ( i === 0 ) {
renderer.setClearColor( 0x000000, 0.0 );
renderer.clear();
}
this._quadMesh.render( renderer );
}
renderer.copyTextureToTexture( this._sampleRenderTarget.depthTexture, this.renderTarget.depthTexture );
// restore
if ( camera.setViewOffset && originalViewOffset.enabled ) {
camera.setViewOffset(
originalViewOffset.fullWidth, originalViewOffset.fullHeight,
originalViewOffset.offsetX, originalViewOffset.offsetY,
originalViewOffset.width, originalViewOffset.height
);
} else if ( camera.clearViewOffset ) {
camera.clearViewOffset();
}
//
RendererUtils.restoreRendererState( renderer, _rendererState );
}
/**
* This method is used to setup the effect's MRT configuration and quad mesh.
*
* @param {NodeBuilder} builder - The current node builder.
* @return {PassTextureNode}
*/
setup( builder ) {
if ( this._sampleRenderTarget === null ) {
this._sampleRenderTarget = this.renderTarget.clone();
}
let sampleTexture;
const passMRT = this.getMRT();
if ( passMRT !== null ) {
const outputs = {};
for ( const name in passMRT.outputNodes ) {
const index = getTextureIndex( this._sampleRenderTarget.textures, name );
if ( index >= 0 ) {
outputs[ name ] = texture( this._sampleRenderTarget.textures[ index ] ).mul( this.sampleWeight );
}
}
sampleTexture = mrt( outputs );
} else {
sampleTexture = texture( this._sampleRenderTarget.texture ).mul( this.sampleWeight );
}
this._quadMesh.material = new NodeMaterial();
this._quadMesh.material.fragmentNode = unpremultiplyAlpha( sampleTexture );
this._quadMesh.material.transparent = true;
this._quadMesh.material.depthTest = false;
this._quadMesh.material.depthWrite = false;
this._quadMesh.material.premultipliedAlpha = true;
this._quadMesh.material.blending = AdditiveBlending;
this._quadMesh.material.name = 'SSAA';
return super.setup( builder );
}
/**
* Frees internal resources. This method should be called
* when the pass is no longer required.
*/
dispose() {
super.dispose();
if ( this._sampleRenderTarget !== null ) {
this._sampleRenderTarget.dispose();
}
}
}
export default SSAAPassNode;
// These jitter vectors are specified in integers because it is easier.
// I am assuming a [-8,8) integer grid, but it needs to be mapped onto [-0.5,0.5)
// before being used, thus these integers need to be scaled by 1/16.
//
// Sample patterns reference: https://msdn.microsoft.com/en-us/library/windows/desktop/ff476218%28v=vs.85%29.aspx?f=255&MSPPError=-2147217396
const _JitterVectors = [
[
[ 0, 0 ]
],
[
[ 4, 4 ], [ - 4, - 4 ]
],
[
[ - 2, - 6 ], [ 6, - 2 ], [ - 6, 2 ], [ 2, 6 ]
],
[
[ 1, - 3 ], [ - 1, 3 ], [ 5, 1 ], [ - 3, - 5 ],
[ - 5, 5 ], [ - 7, - 1 ], [ 3, 7 ], [ 7, - 7 ]
],
[
[ 1, 1 ], [ - 1, - 3 ], [ - 3, 2 ], [ 4, - 1 ],
[ - 5, - 2 ], [ 2, 5 ], [ 5, 3 ], [ 3, - 5 ],
[ - 2, 6 ], [ 0, - 7 ], [ - 4, - 6 ], [ - 6, 4 ],
[ - 8, 0 ], [ 7, - 4 ], [ 6, 7 ], [ - 7, - 8 ]
],
[
[ - 4, - 7 ], [ - 7, - 5 ], [ - 3, - 5 ], [ - 5, - 4 ],
[ - 1, - 4 ], [ - 2, - 2 ], [ - 6, - 1 ], [ - 4, 0 ],
[ - 7, 1 ], [ - 1, 2 ], [ - 6, 3 ], [ - 3, 3 ],
[ - 7, 6 ], [ - 3, 6 ], [ - 5, 7 ], [ - 1, 7 ],
[ 5, - 7 ], [ 1, - 6 ], [ 6, - 5 ], [ 4, - 4 ],
[ 2, - 3 ], [ 7, - 2 ], [ 1, - 1 ], [ 4, - 1 ],
[ 2, 1 ], [ 6, 2 ], [ 0, 4 ], [ 4, 4 ],
[ 2, 5 ], [ 7, 5 ], [ 5, 6 ], [ 3, 7 ]
]
];
/**
* TSL function for creating a SSAA pass node for Supersampling Anti-Aliasing.
*
* @tsl
* @function
* @param {Scene} scene - The scene to render.
* @param {Camera} camera - The camera to render the scene with.
* @returns {SSAAPassNode}
*/
export const ssaaPass = ( scene, camera ) => nodeObject( new SSAAPassNode( scene, camera ) );

654
node_modules/three/examples/jsm/tsl/display/SSRNode.js generated vendored Normal file
View File

@@ -0,0 +1,654 @@
import { HalfFloatType, RenderTarget, Vector2, RendererUtils, QuadMesh, TempNode, NodeMaterial, NodeUpdateType, LinearFilter, LinearMipmapLinearFilter } from 'three/webgpu';
import { texture, reference, viewZToPerspectiveDepth, logarithmicDepthToViewZ, getScreenPosition, getViewPosition, sqrt, mul, div, cross, float, Continue, Break, Loop, int, max, abs, sub, If, dot, reflect, normalize, screenCoordinate, nodeObject, Fn, passTexture, uv, uniform, perspectiveDepthToViewZ, orthographicDepthToViewZ, vec2, vec3, vec4 } from 'three/tsl';
import { boxBlur } from './boxBlur.js';
const _quadMesh = /*@__PURE__*/ new QuadMesh();
const _size = /*@__PURE__*/ new Vector2();
let _rendererState;
/**
* Post processing node for computing screen space reflections (SSR).
*
* Reference: {@link https://lettier.github.io/3d-game-shaders-for-beginners/screen-space-reflection.html}
*
* @augments TempNode
* @three_import import { ssr } from 'three/addons/tsl/display/SSRNode.js';
*/
class SSRNode extends TempNode {
static get type() {
return 'SSRNode';
}
/**
* Constructs a new SSR node.
*
* @param {Node<vec4>} colorNode - The node that represents the beauty pass.
* @param {Node<float>} depthNode - A node that represents the beauty pass's depth.
* @param {Node<vec3>} normalNode - A node that represents the beauty pass's normals.
* @param {Node<float>} metalnessNode - A node that represents the beauty pass's metalness.
* @param {?Node<float>} [roughnessNode=null] - A node that represents the beauty pass's roughness.
* @param {?Camera} [camera=null] - The camera the scene is rendered with.
*/
constructor( colorNode, depthNode, normalNode, metalnessNode, roughnessNode = null, camera = null ) {
super( 'vec4' );
/**
* The node that represents the beauty pass.
*
* @type {Node<vec4>}
*/
this.colorNode = colorNode;
/**
* A node that represents the beauty pass's depth.
*
* @type {Node<float>}
*/
this.depthNode = depthNode;
/**
* A node that represents the beauty pass's normals.
*
* @type {Node<vec3>}
*/
this.normalNode = normalNode;
/**
* A node that represents the beauty pass's metalness.
*
* @type {Node<float>}
*/
this.metalnessNode = metalnessNode;
/**
* Whether the SSR reflections should be blurred or not. Blurring is a costly
* operation so turn it off if you encounter performance issues on certain
* devices.
*
* @private
* @type {Node<float>}
* @default false
*/
this.roughnessNode = roughnessNode;
/**
* The resolution scale. Valid values are in the range
* `[0,1]`. `1` means best quality but also results in
* more computational overhead. Setting to `0.5` means
* the effect is computed in half-resolution.
*
* @type {number}
* @default 1
*/
this.resolutionScale = 1;
/**
* The `updateBeforeType` is set to `NodeUpdateType.FRAME` since the node renders
* its effect once per frame in `updateBefore()`.
*
* @type {string}
* @default 'frame'
*/
this.updateBeforeType = NodeUpdateType.FRAME;
/**
* Controls how far a fragment can reflect. Increasing this value result in more
* computational overhead but also increases the reflection distance.
*
* @type {UniformNode<float>}
*/
this.maxDistance = uniform( 1 );
/**
* Controls the cutoff between what counts as a possible reflection hit and what does not.
*
* @type {UniformNode<float>}
*/
this.thickness = uniform( 0.1 );
/**
* Controls how the SSR reflections are blended with the beauty pass.
*
* @type {UniformNode<float>}
*/
this.opacity = uniform( 1 );
/**
* This parameter controls how detailed the raymarching process works.
* The value ranges is `[0,1]` where `1` means best quality (the maximum number
* of raymarching iterations/samples) and `0` means no samples at all.
*
* A quality of `0.5` is usually sufficient for most use cases. Try to keep
* this parameter as low as possible. Larger values result in noticeable more
* overhead.
*
* @type {UniformNode<float>}
*/
this.quality = uniform( 0.5 );
/**
* The quality of the blur. Must be an integer in the range `[1,3]`.
*
* @type {UniformNode<int>}
*/
this.blurQuality = uniform( 2 );
//
if ( camera === null ) {
if ( this.colorNode.passNode && this.colorNode.passNode.isPassNode === true ) {
camera = this.colorNode.passNode.camera;
} else {
throw new Error( 'THREE.TSL: No camera found. ssr() requires a camera.' );
}
}
/**
* The camera the scene is rendered with.
*
* @type {Camera}
*/
this.camera = camera;
/**
* The spread of the blur. Automatically set when generating mips.
*
* @private
* @type {UniformNode<int>}
*/
this._blurSpread = uniform( 1 );
/**
* Represents the projection matrix of the scene's camera.
*
* @private
* @type {UniformNode<mat4>}
*/
this._cameraProjectionMatrix = uniform( camera.projectionMatrix );
/**
* Represents the inverse projection matrix of the scene's camera.
*
* @private
* @type {UniformNode<mat4>}
*/
this._cameraProjectionMatrixInverse = uniform( camera.projectionMatrixInverse );
/**
* Represents the near value of the scene's camera.
*
* @private
* @type {ReferenceNode<float>}
*/
this._cameraNear = reference( 'near', 'float', camera );
/**
* Represents the far value of the scene's camera.
*
* @private
* @type {ReferenceNode<float>}
*/
this._cameraFar = reference( 'far', 'float', camera );
/**
* Whether the scene's camera is perspective or orthographic.
*
* @private
* @type {UniformNode<bool>}
*/
this._isPerspectiveCamera = uniform( camera.isPerspectiveCamera );
/**
* The resolution of the pass.
*
* @private
* @type {UniformNode<vec2>}
*/
this._resolution = uniform( new Vector2() );
/**
* The render target the SSR is rendered into.
*
* @private
* @type {RenderTarget}
*/
this._ssrRenderTarget = new RenderTarget( 1, 1, { depthBuffer: false, type: HalfFloatType } );
this._ssrRenderTarget.texture.name = 'SSRNode.SSR';
/**
* The render target for the blurred SSR reflections.
*
* @private
* @type {RenderTarget}
*/
this._blurRenderTarget = new RenderTarget( 1, 1, { depthBuffer: false, type: HalfFloatType, minFilter: LinearMipmapLinearFilter, magFilter: LinearFilter } );
this._blurRenderTarget.texture.name = 'SSRNode.Blur';
this._blurRenderTarget.texture.mipmaps.push( {}, {}, {}, {}, {} );
/**
* The material that is used to render the effect.
*
* @private
* @type {NodeMaterial}
*/
this._ssrMaterial = new NodeMaterial();
this._ssrMaterial.name = 'SSRNode.SSR';
/**
* The blur material.
*
* @private
* @type {NodeMaterial}
*/
this._blurMaterial = new NodeMaterial();
this._blurMaterial.name = 'SSRNode.Blur';
/**
* The copy material.
*
* @private
* @type {NodeMaterial}
*/
this._copyMaterial = new NodeMaterial();
this._copyMaterial.name = 'SSRNode.Copy';
/**
* The result of the effect is represented as a separate texture node.
*
* @private
* @type {PassTextureNode}
*/
this._textureNode = passTexture( this, this._ssrRenderTarget.texture );
let blurredTextureNode = null;
if ( this.roughnessNode !== null ) {
const mips = this._blurRenderTarget.texture.mipmaps.length - 1;
const lod = float( this.roughnessNode ).mul( mips ).clamp( 0, mips );
blurredTextureNode = passTexture( this, this._blurRenderTarget.texture ).level( lod );
}
/**
* Holds the blurred SSR reflections.
*
* @private
* @type {?PassTextureNode}
*/
this._blurredTextureNode = blurredTextureNode;
}
/**
* Returns the result of the effect as a texture node.
*
* @return {PassTextureNode} A texture node that represents the result of the effect.
*/
getTextureNode() {
return this.roughnessNode !== null ? this._blurredTextureNode : this._textureNode;
}
/**
* Sets the size of the effect.
*
* @param {number} width - The width of the effect.
* @param {number} height - The height of the effect.
*/
setSize( width, height ) {
width = Math.round( this.resolutionScale * width );
height = Math.round( this.resolutionScale * height );
this._resolution.value.set( width, height );
this._ssrRenderTarget.setSize( width, height );
this._blurRenderTarget.setSize( width, height );
}
/**
* This method is used to render the effect once per frame.
*
* @param {NodeFrame} frame - The current node frame.
*/
updateBefore( frame ) {
const { renderer } = frame;
_rendererState = RendererUtils.resetRendererState( renderer, _rendererState );
const ssrRenderTarget = this._ssrRenderTarget;
const blurRenderTarget = this._blurRenderTarget;
const size = renderer.getDrawingBufferSize( _size );
_quadMesh.material = this._ssrMaterial;
this.setSize( size.width, size.height );
// clear
renderer.setMRT( null );
renderer.setClearColor( 0x000000, 0 );
// ssr
renderer.setRenderTarget( ssrRenderTarget );
_quadMesh.render( renderer );
// blur (optional)
if ( this.roughnessNode !== null ) {
// blur mips but leave the base mip unblurred
for ( let i = 0; i < blurRenderTarget.texture.mipmaps.length; i ++ ) {
_quadMesh.material = ( i === 0 ) ? this._copyMaterial : this._blurMaterial;
this._blurSpread.value = i;
renderer.setRenderTarget( blurRenderTarget, 0, i );
_quadMesh.render( renderer );
}
}
// restore
RendererUtils.restoreRendererState( renderer, _rendererState );
}
/**
* This method is used to setup the effect's TSL code.
*
* @param {NodeBuilder} builder - The current node builder.
* @return {PassTextureNode}
*/
setup( builder ) {
const uvNode = uv();
const pointToLineDistance = Fn( ( [ point, linePointA, linePointB ] )=> {
// https://mathworld.wolfram.com/Point-LineDistance3-Dimensional.html
return cross( point.sub( linePointA ), point.sub( linePointB ) ).length().div( linePointB.sub( linePointA ).length() );
} );
const pointPlaneDistance = Fn( ( [ point, planePoint, planeNormal ] )=> {
// https://mathworld.wolfram.com/Point-PlaneDistance.html
// https://en.wikipedia.org/wiki/Plane_(geometry)
// http://paulbourke.net/geometry/pointlineplane/
const d = mul( planeNormal.x, planePoint.x ).add( mul( planeNormal.y, planePoint.y ) ).add( mul( planeNormal.z, planePoint.z ) ).negate().toVar();
const denominator = sqrt( mul( planeNormal.x, planeNormal.x, ).add( mul( planeNormal.y, planeNormal.y ) ).add( mul( planeNormal.z, planeNormal.z ) ) ).toVar();
const distance = div( mul( planeNormal.x, point.x ).add( mul( planeNormal.y, point.y ) ).add( mul( planeNormal.z, point.z ) ).add( d ), denominator );
return distance;
} );
const getViewZ = Fn( ( [ depth ] ) => {
let viewZNode;
if ( this.camera.isPerspectiveCamera ) {
viewZNode = perspectiveDepthToViewZ( depth, this._cameraNear, this._cameraFar );
} else {
viewZNode = orthographicDepthToViewZ( depth, this._cameraNear, this._cameraFar );
}
return viewZNode;
} );
const sampleDepth = ( uv ) => {
const depth = this.depthNode.sample( uv ).r;
if ( builder.renderer.logarithmicDepthBuffer === true ) {
const viewZ = logarithmicDepthToViewZ( depth, this._cameraNear, this._cameraFar );
return viewZToPerspectiveDepth( viewZ, this._cameraNear, this._cameraFar );
}
return depth;
};
const ssr = Fn( () => {
const metalness = float( this.metalnessNode );
// fragments with no metalness do not reflect their environment
metalness.equal( 0.0 ).discard();
// compute some standard FX entities
const depth = sampleDepth( uvNode ).toVar();
const viewPosition = getViewPosition( uvNode, depth, this._cameraProjectionMatrixInverse ).toVar();
const viewNormal = this.normalNode.rgb.normalize().toVar();
// compute the direction from the position in view space to the camera
const viewIncidentDir = ( ( this.camera.isPerspectiveCamera ) ? normalize( viewPosition ) : vec3( 0, 0, - 1 ) ).toVar();
// compute the direction in which the light is reflected on the surface
const viewReflectDir = reflect( viewIncidentDir, viewNormal ).toVar();
// adapt maximum distance to the local geometry (see https://www.mathsisfun.com/algebra/vectors-dot-product.html)
const maxReflectRayLen = this.maxDistance.div( dot( viewIncidentDir.negate(), viewNormal ) ).toVar();
// compute the maximum point of the reflection ray in view space
const d1viewPosition = viewPosition.add( viewReflectDir.mul( maxReflectRayLen ) ).toVar();
// check if d1viewPosition lies behind the camera near plane
If( this._isPerspectiveCamera.and( d1viewPosition.z.greaterThan( this._cameraNear.negate() ) ), () => {
// if so, ensure d1viewPosition is clamped on the near plane.
// this prevents artifacts during the ray marching process
const t = sub( this._cameraNear.negate(), viewPosition.z ).div( viewReflectDir.z );
d1viewPosition.assign( viewPosition.add( viewReflectDir.mul( t ) ) );
} );
// d0 and d1 are the start and maximum points of the reflection ray in screen space
const d0 = screenCoordinate.xy.toVar();
const d1 = getScreenPosition( d1viewPosition, this._cameraProjectionMatrix ).mul( this._resolution ).toVar();
// below variables are used to control the raymarching process
// total length of the ray
const totalLen = d1.sub( d0 ).length().toVar();
// offset in x and y direction
const xLen = d1.x.sub( d0.x ).toVar();
const yLen = d1.y.sub( d0.y ).toVar();
// determine the larger delta
// The larger difference will help to determine how much to travel in the X and Y direction each iteration and
// how many iterations are needed to travel the entire ray
const totalStep = int( max( abs( xLen ), abs( yLen ) ).mul( this.quality.clamp() ) ).toConst();
// step sizes in the x and y directions
const xSpan = xLen.div( totalStep ).toVar();
const ySpan = yLen.div( totalStep ).toVar();
const output = vec4( 0 ).toVar();
// the actual ray marching loop
// starting from d0, the code gradually travels along the ray and looks for an intersection with the geometry.
// it does not exceed d1 (the maximum ray extend)
Loop( totalStep, ( { i } ) => {
// advance on the ray by computing a new position in screen coordinates
const xy = vec2( d0.x.add( xSpan.mul( float( i ) ) ), d0.y.add( ySpan.mul( float( i ) ) ) ).toVar();
// stop processing if the new position lies outside of the screen
If( xy.x.lessThan( 0 ).or( xy.x.greaterThan( this._resolution.x ) ).or( xy.y.lessThan( 0 ) ).or( xy.y.greaterThan( this._resolution.y ) ), () => {
Break();
} );
// compute new uv, depth and viewZ for the next fragment
const uvNode = xy.div( this._resolution );
const d = sampleDepth( uvNode ).toVar();
const vZ = getViewZ( d ).toVar();
const viewReflectRayZ = float( 0 ).toVar();
// normalized distance between the current position xy and the starting point d0
const s = xy.sub( d0 ).length().div( totalLen );
// depending on the camera type, we now compute the z-coordinate of the reflected ray at the current step in view space
If( this._isPerspectiveCamera, () => {
const recipVPZ = float( 1 ).div( viewPosition.z ).toVar();
viewReflectRayZ.assign( float( 1 ).div( recipVPZ.add( s.mul( float( 1 ).div( d1viewPosition.z ).sub( recipVPZ ) ) ) ) );
} ).Else( () => {
viewReflectRayZ.assign( viewPosition.z.add( s.mul( d1viewPosition.z.sub( viewPosition.z ) ) ) );
} );
// if viewReflectRayZ is less or equal than the real z-coordinate at this place, it potentially intersects the geometry
If( viewReflectRayZ.lessThanEqual( vZ ), () => {
// compute the distance of the new location to the ray in view space
// to clarify vP is the fragment's view position which is not an exact point on the ray
const vP = getViewPosition( uvNode, d, this._cameraProjectionMatrixInverse ).toVar();
const away = pointToLineDistance( vP, viewPosition, d1viewPosition ).toVar();
// compute the minimum thickness between the current fragment and its neighbor in the x-direction.
const xyNeighbor = vec2( xy.x.add( 1 ), xy.y ).toVar(); // move one pixel
const uvNeighbor = xyNeighbor.div( this._resolution );
const vPNeighbor = getViewPosition( uvNeighbor, d, this._cameraProjectionMatrixInverse ).toVar();
const minThickness = vPNeighbor.x.sub( vP.x ).toVar();
minThickness.mulAssign( 3 ); // expand a bit to avoid errors
const tk = max( minThickness, this.thickness ).toVar();
If( away.lessThanEqual( tk ), () => { // hit
const vN = this.normalNode.sample( uvNode ).rgb.normalize().toVar();
If( dot( viewReflectDir, vN ).greaterThanEqual( 0 ), () => {
// the reflected ray is pointing towards the same side as the fragment's normal (current ray position),
// which means it wouldn't reflect off the surface. The loop continues to the next step for the next ray sample.
Continue();
} );
// this distance represents the depth of the intersection point between the reflected ray and the scene.
const distance = pointPlaneDistance( vP, viewPosition, viewNormal ).toVar();
If( distance.greaterThan( this.maxDistance ), () => {
// Distance exceeding limit: The reflection is potentially too far away and
// might not contribute significantly to the final color
Break();
} );
const op = this.opacity.mul( metalness ).toVar();
// distance attenuation (the reflection should fade out the farther it is away from the surface)
const ratio = float( 1 ).sub( distance.div( this.maxDistance ) ).toVar();
const attenuation = ratio.mul( ratio );
op.mulAssign( attenuation );
// fresnel (reflect more light on surfaces that are viewed at grazing angles)
const fresnelCoe = div( dot( viewIncidentDir, viewReflectDir ).add( 1 ), 2 );
op.mulAssign( fresnelCoe );
// output
const reflectColor = this.colorNode.sample( uvNode );
output.assign( vec4( reflectColor.rgb, op ) );
Break();
} );
} );
} );
return output;
} );
this._ssrMaterial.fragmentNode = ssr().context( builder.getSharedContext() );
this._ssrMaterial.needsUpdate = true;
// below materials are used for blurring
const reflectionBuffer = texture( this._ssrRenderTarget.texture );
this._blurMaterial.fragmentNode = boxBlur( reflectionBuffer, { size: this.blurQuality, separation: this._blurSpread } );
this._blurMaterial.needsUpdate = true;
this._copyMaterial.fragmentNode = reflectionBuffer;
this._copyMaterial.needsUpdate = true;
//
return this.getTextureNode();
}
/**
* Frees internal resources. This method should be called
* when the effect is no longer required.
*/
dispose() {
this._ssrRenderTarget.dispose();
this._blurRenderTarget.dispose();
this._ssrMaterial.dispose();
this._blurMaterial.dispose();
this._copyMaterial.dispose();
}
}
export default SSRNode;
/**
* TSL function for creating screen space reflections (SSR).
*
* @tsl
* @function
* @param {Node<vec4>} colorNode - The node that represents the beauty pass.
* @param {Node<float>} depthNode - A node that represents the beauty pass's depth.
* @param {Node<vec3>} normalNode - A node that represents the beauty pass's normals.
* @param {Node<float>} metalnessNode - A node that represents the beauty pass's metalness.
* @param {?Node<float>} [roughnessNode=null] - A node that represents the beauty pass's roughness.
* @param {?Camera} [camera=null] - The camera the scene is rendered with.
* @returns {SSRNode}
*/
export const ssr = ( colorNode, depthNode, normalNode, metalnessNode, roughnessNode = null, camera = null ) => nodeObject( new SSRNode( nodeObject( colorNode ), nodeObject( depthNode ), nodeObject( normalNode ), nodeObject( metalnessNode ), nodeObject( roughnessNode ), camera ) );

24
node_modules/three/examples/jsm/tsl/display/Sepia.js generated vendored Normal file
View File

@@ -0,0 +1,24 @@
import { dot, Fn, vec3, vec4 } from 'three/tsl';
/**
* Applies a sepia effect to the given color node.
*
* @tsl
* @function
* @param {Node<vec4>} color - The color node to apply the sepia for.
* @return {Node<vec4>} The updated color node.
*/
export const sepia = /*@__PURE__*/ Fn( ( [ color ] ) => {
const c = vec3( color );
// https://github.com/evanw/glfx.js/blob/master/src/filters/adjust/sepia.js
return vec4(
dot( c, vec3( 0.393, 0.769, 0.189 ) ),
dot( c, vec3( 0.349, 0.686, 0.168 ) ),
dot( c, vec3( 0.272, 0.534, 0.131 ) ),
color.a
);
} );

View File

@@ -0,0 +1,168 @@
import { Vector2, TempNode, NodeUpdateType } from 'three/webgpu';
import { nodeObject, Fn, uv, uniform, convertToTexture, vec2, vec3, vec4, mat3, luminance, add } from 'three/tsl';
/**
* Post processing node for detecting edges with a sobel filter.
* A sobel filter should be applied after tone mapping and output color
* space conversion.
*
* @augments TempNode
* @three_import import { sobel } from 'three/addons/tsl/display/SobelOperatorNode.js';
*/
class SobelOperatorNode extends TempNode {
static get type() {
return 'SobelOperatorNode';
}
/**
* Constructs a new sobel operator node.
*
* @param {TextureNode} textureNode - The texture node that represents the input of the effect.
*/
constructor( textureNode ) {
super( 'vec4' );
/**
* The texture node that represents the input of the effect.
*
* @type {TextureNode}
*/
this.textureNode = textureNode;
/**
* The `updateBeforeType` is set to `NodeUpdateType.FRAME` since the node updates
* its internal uniforms once per frame in `updateBefore()`.
*
* @type {string}
* @default 'frame'
*/
this.updateBeforeType = NodeUpdateType.FRAME;
/**
* A uniform node holding the inverse resolution value.
*
* @private
* @type {UniformNode<vec2>}
*/
this._invSize = uniform( new Vector2() );
}
/**
* This method is used to update the effect's uniforms once per frame.
*
* @param {NodeFrame} frame - The current node frame.
*/
updateBefore( /* frame */ ) {
const map = this.textureNode.value;
this._invSize.value.set( 1 / map.image.width, 1 / map.image.height );
}
/**
* This method is used to setup the effect's TSL code.
*
* @param {NodeBuilder} builder - The current node builder.
* @return {ShaderCallNodeInternal}
*/
setup( /* builder */ ) {
const { textureNode } = this;
const uvNode = textureNode.uvNode || uv();
const sampleTexture = ( uv ) => textureNode.sample( uv );
const sobel = Fn( () => {
// Sobel Edge Detection (see https://youtu.be/uihBwtPIBxM)
const texel = this._invSize;
// kernel definition (in glsl matrices are filled in column-major order)
const Gx = mat3( - 1, - 2, - 1, 0, 0, 0, 1, 2, 1 ); // x direction kernel
const Gy = mat3( - 1, 0, 1, - 2, 0, 2, - 1, 0, 1 ); // y direction kernel
// fetch the 3x3 neighbourhood of a fragment
// first column
const tx0y0 = luminance( sampleTexture( uvNode.add( texel.mul( vec2( - 1, - 1 ) ) ) ).xyz );
const tx0y1 = luminance( sampleTexture( uvNode.add( texel.mul( vec2( - 1, 0 ) ) ) ).xyz );
const tx0y2 = luminance( sampleTexture( uvNode.add( texel.mul( vec2( - 1, 1 ) ) ) ).xyz );
// second column
const tx1y0 = luminance( sampleTexture( uvNode.add( texel.mul( vec2( 0, - 1 ) ) ) ).xyz );
const tx1y1 = luminance( sampleTexture( uvNode.add( texel.mul( vec2( 0, 0 ) ) ) ).xyz );
const tx1y2 = luminance( sampleTexture( uvNode.add( texel.mul( vec2( 0, 1 ) ) ) ).xyz );
// third column
const tx2y0 = luminance( sampleTexture( uvNode.add( texel.mul( vec2( 1, - 1 ) ) ) ).xyz );
const tx2y1 = luminance( sampleTexture( uvNode.add( texel.mul( vec2( 1, 0 ) ) ) ).xyz );
const tx2y2 = luminance( sampleTexture( uvNode.add( texel.mul( vec2( 1, 1 ) ) ) ).xyz );
// gradient value in x direction
const valueGx = add(
Gx[ 0 ][ 0 ].mul( tx0y0 ),
Gx[ 1 ][ 0 ].mul( tx1y0 ),
Gx[ 2 ][ 0 ].mul( tx2y0 ),
Gx[ 0 ][ 1 ].mul( tx0y1 ),
Gx[ 1 ][ 1 ].mul( tx1y1 ),
Gx[ 2 ][ 1 ].mul( tx2y1 ),
Gx[ 0 ][ 2 ].mul( tx0y2 ),
Gx[ 1 ][ 2 ].mul( tx1y2 ),
Gx[ 2 ][ 2 ].mul( tx2y2 )
);
// gradient value in y direction
const valueGy = add(
Gy[ 0 ][ 0 ].mul( tx0y0 ),
Gy[ 1 ][ 0 ].mul( tx1y0 ),
Gy[ 2 ][ 0 ].mul( tx2y0 ),
Gy[ 0 ][ 1 ].mul( tx0y1 ),
Gy[ 1 ][ 1 ].mul( tx1y1 ),
Gy[ 2 ][ 1 ].mul( tx2y1 ),
Gy[ 0 ][ 2 ].mul( tx0y2 ),
Gy[ 1 ][ 2 ].mul( tx1y2 ),
Gy[ 2 ][ 2 ].mul( tx2y2 )
);
// magnitude of the total gradient
const G = valueGx.mul( valueGx ).add( valueGy.mul( valueGy ) ).sqrt();
return vec4( vec3( G ), 1 );
} );
const outputNode = sobel();
return outputNode;
}
}
export default SobelOperatorNode;
/**
* TSL function for creating a sobel operator node which performs edge detection with a sobel filter.
*
* @tsl
* @function
* @param {Node<vec4>} node - The node that represents the input of the effect.
* @returns {SobelOperatorNode}
*/
export const sobel = ( node ) => nodeObject( new SobelOperatorNode( convertToTexture( node ) ) );

View File

@@ -0,0 +1,185 @@
import { RenderTarget, StereoCamera, HalfFloatType, LinearFilter, NearestFilter, Vector2, PassNode, QuadMesh, RendererUtils } from 'three/webgpu';
import { texture } from 'three/tsl';
const _size = /*@__PURE__*/ new Vector2();
const _quadMesh = /*@__PURE__*/ new QuadMesh();
let _rendererState;
/**
* A special (abstract) render pass node that renders the scene
* as a stereoscopic image. Unlike {@link StereoPassNode}, this
* node merges the image for the left and right eye
* into a single one. That is required for effects like
* anaglyph or parallax barrier.
*
* @abstract
* @augments PassNode
* @three_import import { StereoCompositePassNode } from 'three/addons/tsl/display/StereoCompositePassNode.js';
*/
class StereoCompositePassNode extends PassNode {
static get type() {
return 'StereoCompositePassNode';
}
/**
* Constructs a new stereo composite pass node.
*
* @param {Scene} scene - The scene to render.
* @param {Camera} camera - The camera to render the scene with.
*/
constructor( scene, camera ) {
super( PassNode.COLOR, scene, camera );
/**
* This flag can be used for type testing.
*
* @type {boolean}
* @readonly
* @default true
*/
this.isStereoCompositePassNode = true;
/**
* The internal stereo camera that is used to render the scene.
*
* @type {StereoCamera}
*/
this.stereo = new StereoCamera();
const _params = { minFilter: LinearFilter, magFilter: NearestFilter, type: HalfFloatType };
/**
* The render target for rendering the left eye's view.
*
* @type {RenderTarget}
*/
this._renderTargetL = new RenderTarget( 1, 1, _params );
/**
* The render target for rendering the right eye's view.
*
* @type {RenderTarget}
*/
this._renderTargetR = new RenderTarget( 1, 1, _params );
/**
* A texture node representing the left's eye view.
*
* @type {TextureNode}
*/
this._mapLeft = texture( this._renderTargetL.texture );
/**
* A texture node representing the right's eye view.
*
* @type {TextureNode}
*/
this._mapRight = texture( this._renderTargetR.texture );
/**
* The node material that implements the composite. All
* derived effect passes must provide an instance for rendering.
*
* @type {NodeMaterial}
*/
this._material = null;
}
/**
* Updates the internal stereo camera.
*
* @param {number} coordinateSystem - The current coordinate system.
*/
updateStereoCamera( coordinateSystem ) {
this.stereo.cameraL.coordinateSystem = coordinateSystem;
this.stereo.cameraR.coordinateSystem = coordinateSystem;
this.stereo.update( this.camera );
}
/**
* Sets the size of the pass.
*
* @param {number} width - The width of the pass.
* @param {number} height - The height of the pass.
*/
setSize( width, height ) {
super.setSize( width, height );
this._renderTargetL.setSize( this.renderTarget.width, this.renderTarget.height );
this._renderTargetR.setSize( this.renderTarget.width, this.renderTarget.height );
}
/**
* This method is used to render the effect once per frame.
*
* @param {NodeFrame} frame - The current node frame.
*/
updateBefore( frame ) {
const { renderer } = frame;
const { scene, stereo, renderTarget } = this;
_rendererState = RendererUtils.resetRendererState( renderer, _rendererState );
//
this._pixelRatio = renderer.getPixelRatio();
this.updateStereoCamera( renderer.coordinateSystem );
const size = renderer.getSize( _size );
this.setSize( size.width, size.height );
// left
renderer.setRenderTarget( this._renderTargetL );
renderer.render( scene, stereo.cameraL );
// right
renderer.setRenderTarget( this._renderTargetR );
renderer.render( scene, stereo.cameraR );
// composite
renderer.setRenderTarget( renderTarget );
_quadMesh.material = this._material;
_quadMesh.render( renderer );
// restore
RendererUtils.restoreRendererState( renderer, _rendererState );
}
/**
* Frees internal resources. This method should be called
* when the pass is no longer required.
*/
dispose() {
super.dispose();
this._renderTargetL.dispose();
this._renderTargetR.dispose();
if ( this._material !== null ) {
this._material.dispose();
}
}
}
export default StereoCompositePassNode;

View File

@@ -0,0 +1,120 @@
import { StereoCamera, Vector2, PassNode, RendererUtils } from 'three/webgpu';
import { nodeObject } from 'three/tsl';
const _size = /*@__PURE__*/ new Vector2();
let _rendererState;
/**
* A special render pass node that renders the scene as a stereoscopic image.
*
* @augments PassNode
* @three_import import { stereoPass } from 'three/addons/tsl/display/StereoPassNode.js';
*/
class StereoPassNode extends PassNode {
static get type() {
return 'StereoPassNode';
}
/**
* Constructs a new stereo pass node.
*
* @param {Scene} scene - The scene to render.
* @param {Camera} camera - The camera to render the scene with.
*/
constructor( scene, camera ) {
super( PassNode.COLOR, scene, camera );
/**
* This flag can be used for type testing.
*
* @type {boolean}
* @readonly
* @default true
*/
this.isStereoPassNode = true;
/**
* The internal stereo camera that is used to render the scene.
*
* @type {StereoCamera}
*/
this.stereo = new StereoCamera();
this.stereo.aspect = 0.5;
}
/**
* This method is used to render the stereo effect once per frame.
*
* @param {NodeFrame} frame - The current node frame.
*/
updateBefore( frame ) {
const { renderer } = frame;
const { scene, camera, stereo, renderTarget } = this;
_rendererState = RendererUtils.resetRendererState( renderer, _rendererState );
//
this._pixelRatio = renderer.getPixelRatio();
stereo.cameraL.coordinateSystem = renderer.coordinateSystem;
stereo.cameraR.coordinateSystem = renderer.coordinateSystem;
stereo.update( camera );
const size = renderer.getSize( _size );
this.setSize( size.width, size.height );
renderer.autoClear = false;
this._cameraNear.value = camera.near;
this._cameraFar.value = camera.far;
for ( const name in this._previousTextures ) {
this.toggleTexture( name );
}
renderer.setRenderTarget( renderTarget );
renderer.setMRT( this._mrt );
renderer.clear();
renderTarget.scissorTest = true;
renderTarget.scissor.set( 0, 0, renderTarget.width / 2, renderTarget.height );
renderTarget.viewport.set( 0, 0, renderTarget.width / 2, renderTarget.height );
renderer.render( scene, stereo.cameraL );
renderTarget.scissor.set( renderTarget.width / 2, 0, renderTarget.width / 2, renderTarget.height );
renderTarget.viewport.set( renderTarget.width / 2, 0, renderTarget.width / 2, renderTarget.height );
renderer.render( scene, stereo.cameraR );
renderTarget.scissorTest = false;
// restore
RendererUtils.restoreRendererState( renderer, _rendererState );
}
}
export default StereoPassNode;
/**
* TSL function for creating a stereo pass node for stereoscopic rendering.
*
* @tsl
* @function
* @param {Scene} scene - The scene to render.
* @param {Camera} camera - The camera to render the scene with.
* @returns {StereoPassNode}
*/
export const stereoPass = ( scene, camera ) => nodeObject( new StereoPassNode( scene, camera ) );

461
node_modules/three/examples/jsm/tsl/display/TRAANode.js generated vendored Normal file
View File

@@ -0,0 +1,461 @@
import { HalfFloatType, Vector2, RenderTarget, RendererUtils, QuadMesh, NodeMaterial, TempNode, NodeUpdateType, Matrix4 } from 'three/webgpu';
import { add, float, If, Loop, int, Fn, min, max, clamp, nodeObject, texture, uniform, uv, vec2, vec4, luminance, convertToTexture, passTexture, velocity } from 'three/tsl';
const _quadMesh = /*@__PURE__*/ new QuadMesh();
const _size = /*@__PURE__*/ new Vector2();
let _rendererState;
/**
* A special node that applies TRAA (Temporal Reprojection Anti-Aliasing).
*
* References:
* - {@link https://alextardif.com/TAA.html}
* - {@link https://www.elopezr.com/temporal-aa-and-the-quest-for-the-holy-trail/}
*
* @augments TempNode
* @three_import import { traa } from 'three/addons/tsl/display/TRAANode.js';
*/
class TRAANode extends TempNode {
static get type() {
return 'TRAANode';
}
/**
* Constructs a new TRAA node.
*
* @param {TextureNode} beautyNode - The texture node that represents the input of the effect.
* @param {TextureNode} depthNode - A node that represents the scene's depth.
* @param {TextureNode} velocityNode - A node that represents the scene's velocity.
* @param {Camera} camera - The camera the scene is rendered with.
*/
constructor( beautyNode, depthNode, velocityNode, camera ) {
super( 'vec4' );
/**
* This flag can be used for type testing.
*
* @type {boolean}
* @readonly
* @default true
*/
this.isTRAANode = true;
/**
* The `updateBeforeType` is set to `NodeUpdateType.FRAME` since the node renders
* its effect once per frame in `updateBefore()`.
*
* @type {string}
* @default 'frame'
*/
this.updateBeforeType = NodeUpdateType.FRAME;
/**
* The texture node that represents the input of the effect.
*
* @type {TextureNode}
*/
this.beautyNode = beautyNode;
/**
* A node that represents the scene's velocity.
*
* @type {TextureNode}
*/
this.depthNode = depthNode;
/**
* A node that represents the scene's velocity.
*
* @type {TextureNode}
*/
this.velocityNode = velocityNode;
/**
* The camera the scene is rendered with.
*
* @type {Camera}
*/
this.camera = camera;
/**
* The jitter index selects the current camera offset value.
*
* @private
* @type {number}
* @default 0
*/
this._jitterIndex = 0;
/**
* A uniform node holding the inverse resolution value.
*
* @private
* @type {UniformNode<vec2>}
*/
this._invSize = uniform( new Vector2() );
/**
* The render target that represents the history of frame data.
*
* @private
* @type {?RenderTarget}
*/
this._historyRenderTarget = new RenderTarget( 1, 1, { depthBuffer: false, type: HalfFloatType } );
this._historyRenderTarget.texture.name = 'TRAANode.history';
/**
* The render target for the resolve.
*
* @private
* @type {?RenderTarget}
*/
this._resolveRenderTarget = new RenderTarget( 1, 1, { depthBuffer: false, type: HalfFloatType } );
this._resolveRenderTarget.texture.name = 'TRAANode.resolve';
/**
* Material used for the resolve step.
*
* @private
* @type {NodeMaterial}
*/
this._resolveMaterial = new NodeMaterial();
this._resolveMaterial.name = 'TRAA.resolve';
/**
* The result of the effect is represented as a separate texture node.
*
* @private
* @type {PassTextureNode}
*/
this._textureNode = passTexture( this, this._resolveRenderTarget.texture );
/**
* Used to save the original/unjittered projection matrix.
*
* @private
* @type {Matrix4}
*/
this._originalProjectionMatrix = new Matrix4();
/**
* Sync the post processing stack with the TRAA node.
* @private
* @type {boolean}
*/
this._needsPostProcessingSync = false;
}
/**
* Returns the result of the effect as a texture node.
*
* @return {PassTextureNode} A texture node that represents the result of the effect.
*/
getTextureNode() {
return this._textureNode;
}
/**
* Sets the size of the effect.
*
* @param {number} width - The width of the effect.
* @param {number} height - The height of the effect.
*/
setSize( width, height ) {
this._historyRenderTarget.setSize( width, height );
this._resolveRenderTarget.setSize( width, height );
this._invSize.value.set( 1 / width, 1 / height );
}
/**
* Defines the TRAA's current jitter as a view offset
* to the scene's camera.
*
* @param {number} width - The width of the effect.
* @param {number} height - The height of the effect.
*/
setViewOffset( width, height ) {
// save original/unjittered projection matrix for velocity pass
this.camera.updateProjectionMatrix();
this._originalProjectionMatrix.copy( this.camera.projectionMatrix );
velocity.setProjectionMatrix( this._originalProjectionMatrix );
//
const viewOffset = {
fullWidth: width,
fullHeight: height,
offsetX: 0,
offsetY: 0,
width: width,
height: height
};
const jitterOffset = _JitterVectors[ this._jitterIndex ];
this.camera.setViewOffset(
viewOffset.fullWidth, viewOffset.fullHeight,
viewOffset.offsetX + jitterOffset[ 0 ] * 0.0625, viewOffset.offsetY + jitterOffset[ 1 ] * 0.0625, // 0.0625 = 1 / 16
viewOffset.width, viewOffset.height
);
}
/**
* Clears the view offset from the scene's camera.
*/
clearViewOffset() {
this.camera.clearViewOffset();
velocity.setProjectionMatrix( null );
// update jitter index
this._jitterIndex ++;
this._jitterIndex = this._jitterIndex % ( _JitterVectors.length - 1 );
}
/**
* This method is used to render the effect once per frame.
*
* @param {NodeFrame} frame - The current node frame.
*/
updateBefore( frame ) {
const { renderer } = frame;
// keep the TRAA in sync with the dimensions of the beauty node
const beautyRenderTarget = ( this.beautyNode.isRTTNode ) ? this.beautyNode.renderTarget : this.beautyNode.passNode.renderTarget;
const width = beautyRenderTarget.texture.width;
const height = beautyRenderTarget.texture.height;
//
if ( this._needsPostProcessingSync === true ) {
this.setViewOffset( width, height );
this._needsPostProcessingSync = false;
}
_rendererState = RendererUtils.resetRendererState( renderer, _rendererState );
//
const needsRestart = this._historyRenderTarget.width !== width || this._historyRenderTarget.height !== height;
this.setSize( width, height );
// every time when the dimensions change we need fresh history data
if ( needsRestart === true ) {
// bind and clear render target to make sure they are initialized after the resize which triggers a dispose()
renderer.setRenderTarget( this._historyRenderTarget );
renderer.clear();
renderer.setRenderTarget( this._resolveRenderTarget );
renderer.clear();
// make sure to reset the history with the contents of the beauty buffer otherwise subsequent frames after the
// resize will fade from a darker color to the correct one because the history was cleared with black.
renderer.copyTextureToTexture( beautyRenderTarget.texture, this._historyRenderTarget.texture );
}
// resolve
renderer.setRenderTarget( this._resolveRenderTarget );
_quadMesh.material = this._resolveMaterial;
_quadMesh.render( renderer );
renderer.setRenderTarget( null );
// update history
renderer.copyTextureToTexture( this._resolveRenderTarget.texture, this._historyRenderTarget.texture );
// restore
RendererUtils.restoreRendererState( renderer, _rendererState );
}
/**
* This method is used to setup the effect's render targets and TSL code.
*
* @param {NodeBuilder} builder - The current node builder.
* @return {PassTextureNode}
*/
setup( builder ) {
const postProcessing = builder.context.postProcessing;
if ( postProcessing ) {
this._needsPostProcessingSync = true;
postProcessing.context.onBeforePostProcessing = () => {
const size = builder.renderer.getDrawingBufferSize( _size );
this.setViewOffset( size.width, size.height );
};
postProcessing.context.onAfterPostProcessing = () => {
this.clearViewOffset();
};
}
const historyTexture = texture( this._historyRenderTarget.texture );
const sampleTexture = this.beautyNode;
const depthTexture = this.depthNode;
const velocityTexture = this.velocityNode;
const resolve = Fn( () => {
const uvNode = uv();
const minColor = vec4( 10000 ).toVar();
const maxColor = vec4( - 10000 ).toVar();
const closestDepth = float( 1 ).toVar();
const closestDepthPixelPosition = vec2( 0 ).toVar();
// sample a 3x3 neighborhood to create a box in color space
// clamping the history color with the resulting min/max colors mitigates ghosting
Loop( { start: int( - 1 ), end: int( 1 ), type: 'int', condition: '<=', name: 'x' }, ( { x } ) => {
Loop( { start: int( - 1 ), end: int( 1 ), type: 'int', condition: '<=', name: 'y' }, ( { y } ) => {
const uvNeighbor = uvNode.add( vec2( float( x ), float( y ) ).mul( this._invSize ) ).toVar();
const colorNeighbor = max( vec4( 0 ), sampleTexture.sample( uvNeighbor ) ).toVar(); // use max() to avoid propagate garbage values
minColor.assign( min( minColor, colorNeighbor ) );
maxColor.assign( max( maxColor, colorNeighbor ) );
const currentDepth = depthTexture.sample( uvNeighbor ).r.toVar();
// find the sample position of the closest depth in the neighborhood (used for velocity)
If( currentDepth.lessThan( closestDepth ), () => {
closestDepth.assign( currentDepth );
closestDepthPixelPosition.assign( uvNeighbor );
} );
} );
} );
// sampling/reprojection
const offset = velocityTexture.sample( closestDepthPixelPosition ).xy.mul( vec2( 0.5, - 0.5 ) ); // NDC to uv offset
const currentColor = sampleTexture.sample( uvNode );
const historyColor = historyTexture.sample( uvNode.sub( offset ) );
// clamping
const clampedHistoryColor = clamp( historyColor, minColor, maxColor );
// flicker reduction based on luminance weighing
const currentWeight = float( 0.05 ).toVar();
const historyWeight = currentWeight.oneMinus().toVar();
const compressedCurrent = currentColor.mul( float( 1 ).div( ( max( currentColor.r, currentColor.g, currentColor.b ).add( 1.0 ) ) ) );
const compressedHistory = clampedHistoryColor.mul( float( 1 ).div( ( max( clampedHistoryColor.r, clampedHistoryColor.g, clampedHistoryColor.b ).add( 1.0 ) ) ) );
const luminanceCurrent = luminance( compressedCurrent.rgb );
const luminanceHistory = luminance( compressedHistory.rgb );
currentWeight.mulAssign( float( 1.0 ).div( luminanceCurrent.add( 1 ) ) );
historyWeight.mulAssign( float( 1.0 ).div( luminanceHistory.add( 1 ) ) );
return add( currentColor.mul( currentWeight ), clampedHistoryColor.mul( historyWeight ) ).div( max( currentWeight.add( historyWeight ), 0.00001 ) );
} );
// materials
this._resolveMaterial.colorNode = resolve();
return this._textureNode;
}
/**
* Frees internal resources. This method should be called
* when the effect is no longer required.
*/
dispose() {
this._historyRenderTarget.dispose();
this._resolveRenderTarget.dispose();
this._resolveMaterial.dispose();
}
}
export default TRAANode;
// These jitter vectors are specified in integers because it is easier.
// I am assuming a [-8,8) integer grid, but it needs to be mapped onto [-0.5,0.5)
// before being used, thus these integers need to be scaled by 1/16.
//
// Sample patterns reference: https://msdn.microsoft.com/en-us/library/windows/desktop/ff476218%28v=vs.85%29.aspx?f=255&MSPPError=-2147217396
const _JitterVectors = [
[ - 4, - 7 ], [ - 7, - 5 ], [ - 3, - 5 ], [ - 5, - 4 ],
[ - 1, - 4 ], [ - 2, - 2 ], [ - 6, - 1 ], [ - 4, 0 ],
[ - 7, 1 ], [ - 1, 2 ], [ - 6, 3 ], [ - 3, 3 ],
[ - 7, 6 ], [ - 3, 6 ], [ - 5, 7 ], [ - 1, 7 ],
[ 5, - 7 ], [ 1, - 6 ], [ 6, - 5 ], [ 4, - 4 ],
[ 2, - 3 ], [ 7, - 2 ], [ 1, - 1 ], [ 4, - 1 ],
[ 2, 1 ], [ 6, 2 ], [ 0, 4 ], [ 4, 4 ],
[ 2, 5 ], [ 7, 5 ], [ 5, 6 ], [ 3, 7 ]
];
/**
* TSL function for creating a TRAA node for Temporal Reprojection Anti-Aliasing.
*
* @tsl
* @function
* @param {TextureNode} beautyNode - The texture node that represents the input of the effect.
* @param {TextureNode} depthNode - A node that represents the scene's depth.
* @param {TextureNode} velocityNode - A node that represents the scene's velocity.
* @param {Camera} camera - The camera the scene is rendered with.
* @returns {TRAANode}
*/
export const traa = ( beautyNode, depthNode, velocityNode, camera ) => nodeObject( new TRAANode( convertToTexture( beautyNode ), depthNode, velocityNode, camera ) );

View File

@@ -0,0 +1,141 @@
import { TempNode } from 'three/webgpu';
import { nodeObject, Fn, float, uv, convertToTexture, vec4, If, int, clamp, sub, mix } from 'three/tsl';
/**
* Post processing node for creating a transition effect between scenes.
*
* @augments TempNode
* @three_import import { transition } from 'three/addons/tsl/display/TransitionNode.js';
*/
class TransitionNode extends TempNode {
static get type() {
return 'TransitionNode';
}
/**
* Constructs a new transition node.
*
* @param {TextureNode} textureNodeA - A texture node that represents the beauty pass of the first scene.
* @param {TextureNode} textureNodeB - A texture node that represents the beauty pass of the second scene.
* @param {TextureNode} mixTextureNode - A texture node that defines how the transition effect should look like.
* @param {Node<float>} mixRatioNode - The interpolation factor that controls the mix.
* @param {Node<float>} thresholdNode - Can be used to tweak the linear interpolation.
* @param {Node<float>} useTextureNode - Whether `mixTextureNode` should influence the transition or not.
*/
constructor( textureNodeA, textureNodeB, mixTextureNode, mixRatioNode, thresholdNode, useTextureNode ) {
super( 'vec4' );
/**
* A texture node that represents the beauty pass of the first scene.
*
* @type {TextureNode}
*/
this.textureNodeA = textureNodeA;
/**
* A texture node that represents the beauty pass of the second scene.
*
* @type {TextureNode}
*/
this.textureNodeB = textureNodeB;
/**
* A texture that defines how the transition effect should look like.
*
* @type {TextureNode}
*/
this.mixTextureNode = mixTextureNode;
/**
* The interpolation factor that controls the mix.
*
* @type {Node<float>}
*/
this.mixRatioNode = mixRatioNode;
/**
* Can be used to tweak the linear interpolation.
*
* @type {Node<float>}
*/
this.thresholdNode = thresholdNode;
/**
* Whether `mixTextureNode` should influence the transition or not.
*
* @type {Node<float>}
*/
this.useTextureNode = useTextureNode;
}
/**
* This method is used to setup the effect's TSL code.
*
* @param {NodeBuilder} builder - The current node builder.
* @return {ShaderCallNodeInternal}
*/
setup() {
const { textureNodeA, textureNodeB, mixTextureNode, mixRatioNode, thresholdNode, useTextureNode } = this;
const sampleTexture = ( textureNode ) => {
const uvNodeTexture = textureNode.uvNode || uv();
return textureNode.sample( uvNodeTexture );
};
const transition = Fn( () => {
const texelOne = sampleTexture( textureNodeA );
const texelTwo = sampleTexture( textureNodeB );
const color = vec4().toVar();
If( useTextureNode.equal( int( 1 ) ), () => {
const transitionTexel = sampleTexture( mixTextureNode );
const r = mixRatioNode.mul( thresholdNode.mul( 2.0 ).add( 1.0 ) ).sub( thresholdNode );
const mixf = clamp( sub( transitionTexel.r, r ).mul( float( 1.0 ).div( thresholdNode ) ), 0.0, 1.0 );
color.assign( mix( texelOne, texelTwo, mixf ) );
} ).Else( () => {
color.assign( mix( texelTwo, texelOne, mixRatioNode ) );
} );
return color;
} );
const outputNode = transition();
return outputNode;
}
}
export default TransitionNode;
/**
* TSL function for creating a transition node for post processing.
*
* @tsl
* @function
* @param {Node<vec4>} nodeA - A texture node that represents the beauty pass of the first scene.
* @param {Node<vec4>} nodeB - A texture node that represents the beauty pass of the second scene.
* @param {Node<vec4>} mixTextureNode - A texture that defines how the transition effect should look like.
* @param {Node<float> | number} mixRatio - The interpolation factor that controls the mix.
* @param {Node<float> | number} threshold - Can be used to tweak the linear interpolation.
* @param {Node<float> | number} useTexture - Whether `mixTextureNode` should influence the transition or not.
* @returns {TransitionNode}
*/
export const transition = ( nodeA, nodeB, mixTextureNode, mixRatio, threshold, useTexture ) => nodeObject( new TransitionNode( convertToTexture( nodeA ), convertToTexture( nodeB ), convertToTexture( mixTextureNode ), nodeObject( mixRatio ), nodeObject( threshold ), nodeObject( useTexture ) ) );

64
node_modules/three/examples/jsm/tsl/display/boxBlur.js generated vendored Normal file
View File

@@ -0,0 +1,64 @@
import { Fn, vec2, uv, Loop, vec4, premultiplyAlpha, unpremultiplyAlpha, max, int, textureSize, nodeObject, convertToTexture } from 'three/tsl';
/**
* Applies a box blur effect to the given texture node.
*
* Compared to Gaussian blur, box blur produces a more blocky result but with better performance when correctly
* configured. It is intended for mobile devices or performance restricted use cases where Gaussian is too heavy.
*
* The (kernel) `size` parameter should be small (1, 2 or 3) since it determines the number of samples based on (size * 2 + 1)^2.
* This implementation uses a single pass approach so the kernel is not applied as a separable filter. That means larger
* kernels won't perform well. Use Gaussian instead if you need a more high-quality blur.
*
* To produce wider blurs, increase the `separation` parameter instead which has no influence on the performance.
*
* Reference: {@link https://github.com/lettier/3d-game-shaders-for-beginners/blob/master/demonstration/shaders/fragment/box-blur.frag}.
*
* @function
* @param {Node<vec4>} textureNode - The texture node that should be blurred.
* @param {Object} [options={}] - Additional options for the hash blur effect.
* @param {Node<int>} [options.size=int(1)] - Controls the blur's kernel. For performant results, the range should within [1, 3].
* @param {Node<int>} [options.separation=int(1)] - Spreads out the blur without having to sample additional fragments. Ranges from [1, Infinity].
* @param {boolean} [options.premultipliedAlpha=false] - Whether to use premultiplied alpha for the blur effect.
* @return {Node<vec4>} The blurred texture node.
*/
export const boxBlur = /*#__PURE__*/ Fn( ( [ textureNode, options = {} ] ) => {
textureNode = convertToTexture( textureNode );
const size = nodeObject( options.size ) || int( 1 );
const separation = nodeObject( options.separation ) || int( 1 );
const premultipliedAlpha = options.premultipliedAlpha || false;
const tap = ( uv ) => {
const sample = textureNode.sample( uv );
return premultipliedAlpha ? premultiplyAlpha( sample ) : sample;
};
const targetUV = textureNode.uvNode || uv();
const result = vec4( 0 );
const sep = max( separation, 1 );
const count = int( 0 );
const pixelStep = vec2( 1 ).div( textureSize( textureNode ) );
Loop( { start: size.negate(), end: size, name: 'i', condition: '<=' }, ( { i } ) => {
Loop( { start: size.negate(), end: size, name: 'j', condition: '<=' }, ( { j } ) => {
const uvs = targetUV.add( vec2( i, j ).mul( pixelStep ).mul( sep ) );
result.addAssign( tap( uvs ) );
count.addAssign( 1 );
} );
} );
result.divAssign( count );
return premultipliedAlpha ? unpremultiplyAlpha( result ) : result;
} );

View File

@@ -0,0 +1,53 @@
import { float, Fn, vec2, uv, sin, rand, degrees, cos, Loop, vec4, premultiplyAlpha, unpremultiplyAlpha, convertToTexture, nodeObject } from 'three/tsl';
/**
* Applies a hash blur effect to the given texture node.
*
* The approach of this blur is different compared to Gaussian and box blur since
* it does not rely on a kernel to apply a convolution. Instead, it reads the base
* texture multiple times in a random pattern and then averages the samples. A
* typical artifact of this technique is a slightly noisy appearance of the blur which
* can be mitigated by increasing the number of iterations (see `repeats` parameter).
* Compared to Gaussian blur, hash blur requires just a single pass.
*
* Reference: {@link https://www.shadertoy.com/view/4lXXWn}.
*
* @function
* @param {Node<vec4>} textureNode - The texture node that should be blurred.
* @param {Node<float>} [bluramount=float(0.1)] - This node determines the amount of blur.
* @param {Object} [options={}] - Additional options for the hash blur effect.
* @param {Node<float>} [options.repeats=float(45)] - The number of iterations for the blur effect.
* @param {boolean} [options.premultipliedAlpha=false] - Whether to use premultiplied alpha for the blur effect.
* @return {Node<vec4>} The blurred texture node.
*/
export const hashBlur = /*#__PURE__*/ Fn( ( [ textureNode, bluramount = float( 0.1 ), options = {} ] ) => {
textureNode = convertToTexture( textureNode );
const repeats = nodeObject( options.repeats ) || float( 45 );
const premultipliedAlpha = options.premultipliedAlpha || false;
const tap = ( uv ) => {
const sample = textureNode.sample( uv );
return premultipliedAlpha ? premultiplyAlpha( sample ) : sample;
};
const targetUV = textureNode.uvNode || uv();
const blurred_image = vec4( 0. );
Loop( { start: 0., end: repeats, type: 'float' }, ( { i } ) => {
const q = vec2( vec2( cos( degrees( i.div( repeats ).mul( 360. ) ) ), sin( degrees( i.div( repeats ).mul( 360. ) ) ) ).mul( rand( vec2( i, targetUV.x.add( targetUV.y ) ) ).add( bluramount ) ) );
const uv2 = vec2( targetUV.add( q.mul( bluramount ) ) );
blurred_image.addAssign( tap( uv2 ) );
} );
blurred_image.divAssign( repeats );
return premultipliedAlpha ? unpremultiplyAlpha( blurred_image ) : blurred_image;
} );