我对环境遮挡有问题。当我无法正确获得正确的结果时,我试图同时遵循约翰·查普曼对Crytek AO的改进和学习of的教程。http://i.imgur.com/FbNsq9X.png?1
当相机转动或移动时,屏幕会持续闪烁,尽管它看起来是任意的。有人能告诉我我做错了什么吗?
#version 140
in vec2 UV;
in vec4 vRay;
out vec3 SSAOout;
const int kernelSize = 64;
uniform mat4 projection;
uniform sampler2D gbuffer0;
uniform sampler2D gbuffer1;
uniform sampler2D gbuffer2;
uniform sampler2D gbuffer3;
uniform sampler2D noiseTex;
uniform vec3 kernels[kernelSize];
vec4 ViewPosFromDepth(float depth, vec2 TexCoord) {
float z = depth * 2.0 - 1.0;
vec4 clipSpacePosition = vec4(TexCoord * 2.0 - 1.0, z, 1.0);
vec4 viewSpacePosition = invProjMat * clipSpacePosition;
viewSpacePosition /= viewSpacePosition.w;
return viewSpacePosition;
}
const vec2 noiseScale = vec2(1366.0/4.0, 768.0/4.0);
const float near = 0.1;
const float far = 100.0;
float LinearizeDepth(float depth)
{
float z = depth * 2.0 - 1.0; // back to NDC
return (2.0 * near * far) / (far + near - z * (far - near));
}
void main() {
vec4 gbuffer0Val = texture(gbuffer0, UV);
vec4 gbuffer1Val = texture(gbuffer1, UV);
vec4 gbuffer2Val = texture(gbuffer2, UV);
vec4 gbuffer3Val = texture(gbuffer3, UV);
vec3 fragPos = ViewPosFromDepth(gbuffer0Val.r, UV).rgb;
float dist = LinearizeDepth(gbuffer0Val.r);
vec3 normal = gbuffer1Val.rgb;
normal = decodeNormal(normal);
vec3 randomVec = vec3(2 * texture(noiseTex, UV * noiseScale).rg - 1, 0);
vec3 tangent = normalize(randomVec - normal * dot(randomVec, normal));
vec3 bitangent = cross(normal, tangent);
mat3 TBN = mat3(tangent, bitangent, normal);
float bias = 0.025;
float radius = 3.0;
float occlusion = 0.0;
for(int i = 0; i < kernelSize; i++) {
vec3 sampleKernel = TBN * kernels[i];
sampleKernel = fragPos + sampleKernel * radius;
vec4 offset = vec4(sampleKernel, 1.0);
offset = projection * offset; // from view to clip-space
offset.xyz /= offset.w; // perspective divide
offset.xyz = offset.xyz * 0.5 + 0.5; // transform to range 0.0 - 1.0
float sampleDepth = texture(gbuffer0, 1-offset.xy).r;
sampleDepth = LinearizeDepth(sampleDepth);
float rangeCheck = smoothstep(0.0, 1.0, radius / abs(fragPos.z - sampleDepth));
occlusion += (sampleDepth >= sampleKernel.z + bias ? 1.0 : 0.0) * rangeCheck;
}
occlusion = 1.0 - (occlusion / kernelSize);
SSAOout = vec3(occlusion);
}发布于 2017-06-02 13:56:28
你需要小心的空间-位置和正常需要在视图空间。
为了将深度转换为视点空间位置,我使用了这方法。所以:
vec3 viewPositionFromDepth(vec2 vTexCoord)
{
float z = texture2D(depthTexture, vTexCoord).r;
// Get x/w and y/w from the viewport position
float x = vTexCoord.x * 2.0 - 1.0;
float y = vTexCoord.y * 2.0 - 1.0;
vec4 vProjectedPos = vec4(x, y, z, 1.0f);
// Transform by the inverse projection matrix
vec4 vPositionVS = invProjection*vProjectedPos;
// Divide by w to get the view-space position
return vPositionVS.xyz / vPositionVS.w;
}要将普通视图转换为视图空间,需要将其乘以视图矩阵的逆(应该在CPU上完成):
vec4 n = transpose(inverse(ViewMatrix)) * normalize(texture2D(normalTexture, texCoord)*2.0 - 1.0 );在循环中,当采样深度与偏移量相比较时,这个深度应该与位置相同,所以我也是这样做的:
float sampleDepth = viewPositionFromDepth(offset.xy).z;这并不是最理想的方法,但它对我有效。
https://computergraphics.stackexchange.com/questions/5194
复制相似问题