簡體   English   中英

SSAO的depthTexture錯誤

[英]Incorrect depthTexture with SSAO

最近,當我嘗試獲取THREE.DepthTexture與Ambient Occlusion着色器一起使用時,我一直感到困惑。 在進行RGBA解壓縮之前,我已經使用過它,但是在閱讀了有關Matt Deslauriers的Audiograph項目之后,我決定嘗試使用他描述的方法來提高性能:

從歷史上講,在ThreeJS中,您將使用MeshDepthMaterial將場景渲染到WebGLRenderTarget,然后在從深度目標采樣時解壓縮為線性深度值。 由於許多環境都支持WEBGL_depth_texture擴展,因此這是相當昂貴的並且通常是不必要的。

在嘗試了這種方法之后,我最終以某種奇怪的不需要的效果結束了,其中的線條遍布整個地形:

地形上的線

我在下面設置了一個小示例,在其中復制了問題。 我覺得很簡單,我只是在掩飾一下。

我希望這里的人能夠指出我所缺少的東西,這樣我就可以以一種性能更高的方式使環境光遮擋工作!

提前謝謝了。

 const scene = new THREE.Scene(); const camera = new THREE.PerspectiveCamera(75, window.innerWidth/window.innerHeight, 0.1, 2000); const pivot = new THREE.Object3D(); pivot.add(camera); scene.add(pivot); camera.position.set(0, 250, 500); camera.lookAt(pivot.position); const renderer = new THREE.WebGLRenderer(); renderer.setSize(window.innerWidth, window.innerHeight); renderer.gammaInput = true; renderer.gammaOutput = true; renderer.gammaFactor = 2.2; let supportsExtension = false; if (renderer.extensions.get('WEBGL_depth_texture')) { supportsExtension = true; } document.body.appendChild(renderer.domElement); const createCube = () => { const geo = new THREE.BoxGeometry(500, 500, 500); const mat = new THREE.MeshBasicMaterial({ color: 0x00ff00 }); const obj = new THREE.Mesh(geo, mat); obj.position.y = -(obj.geometry.parameters.height / 2); scene.add(obj); } const createSphere = () => { const geo = new THREE.SphereGeometry(100, 12, 8); const mat = new THREE.MeshBasicMaterial({ color: 0xff00ff }); const obj = new THREE.Mesh(geo, mat); obj.position.y = obj.geometry.parameters.radius; scene.add(obj); } // Create objects createCube(); createSphere(); const composer = new THREE.EffectComposer(renderer); const target = new THREE.WebGLRenderTarget( window.innerWidth, window.innerHeight ); target.texture.format = THREE.RGBFormat; target.texture.minFilter = THREE.NearestFilter; target.texture.magFilter = THREE.NearestFilter; target.texture.generateMipmaps = false; target.stencilBuffer = false; target.depthBuffer = true; target.depthTexture = new THREE.DepthTexture(); target.depthTexture.type = THREE.UnsignedShortType; function initPostProcessing() { composer.addPass(new THREE.RenderPass( scene, camera )); const pass = new THREE.ShaderPass({ uniforms: { "tDiffuse": { value: null }, "tDepth": { value: target.depthTexture }, "resolution": { value: new THREE.Vector2( 512, 512 ) }, "cameraNear": { value: 1 }, "cameraFar": { value: 100 }, "onlyAO": { value: 0 }, "aoClamp": { value: 0.5 }, "lumInfluence": { value: 0.5 } }, vertexShader: document.getElementById('vertexShader').textContent, fragmentShader: document.getElementById('fragmentShader').textContent, }); pass.material.precision = 'highp'; composer.addPass(pass); pass.uniforms.tDepth.value = target.depthTexture; pass.uniforms.cameraNear.value = camera.near; pass.uniforms.cameraFar.value = camera.far; composer.passes[composer.passes.length - 1].renderToScreen = true; } initPostProcessing(); const animate = () => { requestAnimationFrame( animate ); pivot.rotation.y += 0.01; renderer.render( scene, camera, target ); composer.render(); } animate(); 
 html, body { margin: 0; } canvas { display: block; width: 100%; height: 100%; } 
 <script src="https://cdnjs.cloudflare.com/ajax/libs/three.js/86/three.js"></script> <script src="https://cdn.rawgit.com/mrdoob/three.js/dev/examples/js/postprocessing/EffectComposer.js"></script> <script src="https://cdn.rawgit.com/mrdoob/three.js/dev/examples/js/postprocessing/RenderPass.js"></script> <script src="https://cdn.rawgit.com/mrdoob/three.js/dev/examples/js/postprocessing/ShaderPass.js"></script> <script src="https://cdn.rawgit.com/mrdoob/three.js/dev/examples/js/shaders/CopyShader.js"></script> <script id="vertexShader" type="x-shader/x-vertex"> varying vec2 vUv; void main() { vUv = uv; gl_Position = projectionMatrix * modelViewMatrix * vec4( position, 1.0 ); } </script> <script id="fragmentShader" type="x-shader/x-fragment"> uniform float cameraNear; uniform float cameraFar; uniform bool onlyAO; // use only ambient occlusion pass? uniform vec2 resolution; // texture width, height uniform float aoClamp; // depth clamp - reduces haloing at screen edges uniform float lumInfluence; // how much luminance affects occlusion uniform sampler2D tDiffuse; uniform highp sampler2D tDepth; varying vec2 vUv; // #define PI 3.14159265 #define DL 2.399963229728653 // PI * ( 3.0 - sqrt( 5.0 ) ) #define EULER 2.718281828459045 // user variables const int samples = 4; // ao sample count const float radius = 5.0; // ao radius const bool useNoise = false; // use noise instead of pattern for sample dithering const float noiseAmount = 0.0003; // dithering amount const float diffArea = 0.4; // self-shadowing reduction const float gDisplace = 0.4; // gauss bell center highp vec2 rand( const vec2 coord ) { highp vec2 noise; if ( useNoise ) { float nx = dot ( coord, vec2( 12.9898, 78.233 ) ); float ny = dot ( coord, vec2( 12.9898, 78.233 ) * 2.0 ); noise = clamp( fract ( 43758.5453 * sin( vec2( nx, ny ) ) ), 0.0, 1.0 ); } else { highp float ff = fract( 1.0 - coord.s * ( resolution.x / 2.0 ) ); highp float gg = fract( coord.t * ( resolution.y / 2.0 ) ); noise = vec2( 0.25, 0.75 ) * vec2( ff ) + vec2( 0.75, 0.25 ) * gg; } return ( noise * 2.0 - 1.0 ) * noiseAmount; } float readDepth( const in vec2 coord ) { float cameraFarPlusNear = cameraFar + cameraNear; float cameraFarMinusNear = cameraFar - cameraNear; float cameraCoef = 2.0 * cameraNear; return cameraCoef / ( cameraFarPlusNear - texture2D( tDepth, coord ).x * cameraFarMinusNear ); } float compareDepths( const in float depth1, const in float depth2, inout int far ) { float garea = 2.0; // gauss bell width float diff = ( depth1 - depth2 ) * 100.0; // depth difference (0-100) // reduce left bell width to avoid self-shadowing if ( diff < gDisplace ) { garea = diffArea; } else { far = 1; } float dd = diff - gDisplace; float gauss = pow( EULER, -2.0 * dd * dd / ( garea * garea ) ); return gauss; } float calcAO( float depth, float dw, float dh ) { float dd = radius - depth * radius; vec2 vv = vec2( dw, dh ); vec2 coord1 = vUv + dd * vv; vec2 coord2 = vUv - dd * vv; float temp1 = 0.0; float temp2 = 0.0; int far = 0; temp1 = compareDepths( depth, readDepth( coord1 ), far ); // DEPTH EXTRAPOLATION if ( far > 0 ) { temp2 = compareDepths( readDepth( coord2 ), depth, far ); temp1 += ( 1.0 - temp1 ) * temp2; } return temp1; } void main() { highp vec2 noise = rand( vUv ); float depth = readDepth( vUv ); float tt = clamp( depth, aoClamp, 1.0 ); float w = ( 1.0 / resolution.x ) / tt + ( noise.x * ( 1.0 - noise.x ) ); float h = ( 1.0 / resolution.y ) / tt + ( noise.y * ( 1.0 - noise.y ) ); float ao = 0.0; float dz = 1.0 / float( samples ); float z = 1.0 - dz / 2.0; float l = 0.0; for ( int i = 0; i <= samples; i ++ ) { float r = sqrt( 1.0 - z ); float pw = cos( l ) * r; float ph = sin( l ) * r; ao += calcAO( depth, pw * w, ph * h ); z = z - dz; l = l + DL; } ao /= float( samples ); ao = 1.0 - ao; vec3 color = texture2D( tDiffuse, vUv ).rgb; vec3 lumcoeff = vec3( 0.299, 0.587, 0.114 ); float lum = dot( color.rgb, lumcoeff ); vec3 luminance = vec3( lum ); vec3 final = vec3( color * mix( vec3( ao ), vec3( 1.0 ), luminance * lumInfluence ) ); // mix( color * ao, white, luminance ) float depth2 = readDepth(vUv); if ( onlyAO ) { final = vec3( mix( vec3( ao ), vec3( 1.0 ), luminance * lumInfluence ) ); // ambient occlusion only } // gl_FragColor = vec4( vec3( readDepth( vUv) ), 1.0 ); // Depth gl_FragColor = vec4( final, 1.0 ); } </script> 

我很想聽聽是什么原因導致環境光遮擋無法正常渲染!

如果您使用透視相機並且出於任何目的都依賴深度圖(包括SSAO和陰影),請謹慎選擇camera.nearcamera.far ,尤其是near (如果要處理陰影, shadow.camera.near 。)

根據您的使用情況,將近平面盡可能地推出。 如果您的場景位於視錐的前部,則將獲得最佳效果。

three.js r.86

暫無
暫無

聲明:本站的技術帖子網頁,遵循CC BY-SA 4.0協議,如果您需要轉載,請注明本站網址或者原文地址。任何問題請咨詢:yoyou2525@163.com.

 
粵ICP備18138465號  © 2020-2024 STACKOOM.COM