我正在创建DXR PathTracer,它受到Matt的one - https://github.com/TheRealMJP/DXRPathTracer的高度影响,相关的HLSL代码如下:
[shader("raygeneration")]
void RayGen()
{
// Accumulate for limited amount of frames
if (g_giCB.maxFrames > 0 && g_giCB.accFrames >= g_giCB.maxFrames)
{
return;
}
uint2 LaunchIndex = DispatchRaysIndex().xy;
uint2 LaunchDimensions = DispatchRaysDimensions().xy;
if (g_giCB.samplingType == SAMPLE_MJ)
{
const uint pixelIdx = LaunchIndex.y * LaunchDimensions.x + LaunchIndex.x;
uint sampleSetIdx = 0;
offset = SamplePoint(pixelIdx, sampleSetIdx);
seed = pixelIdx;
seed2 = sampleSetIdx;
}
float3 primaryRayOrigin = g_sceneCB.cameraPosition.xyz;
float3 primaryRayDirection;
GenerateCameraRay(LaunchIndex, LaunchDimensions, g_sceneCB.projectionToWorld, primaryRayOrigin, primaryRayDirection, offset);
// Prepare payload
PayloadIndirect indirectPayload;
indirectPayload.color = float3(0, 0, 0);
indirectPayload.roughness = 0;
indirectPayload.rndSeed = seed;
indirectPayload.rndSeed2 = seed2;
indirectPayload.pathLength = 0;
indirectPayload.diffusePath = false;
// Calculate pixel color in current pass and merge with previous frames
float4 finalColor = float4(shootIndirectRay(primaryRayOrigin, primaryRayDirection, 1e-3f, indirectPayload), 1.0f);
bool colorsNan = any(isnan(finalColor));
if (colorsNan)
{
finalColor = float4(0, 0, 0, 1);
}
const float FP16Max = 65000.0f;
finalColor = clamp(finalColor, 0.0f, FP16Max);
if (g_giCB.accFrames > 0)
{
float4 prevScene = RTOutput[LaunchIndex];
finalColor = ((float) g_giCB.accFrames * prevScene + finalColor) / ((float) g_giCB.accFrames + 1.0f);
}
RTOutput[LaunchIndex] = finalColor;
}
[shader("miss")]
void Miss(inout RayPayload payload : SV_RayPayload)
{
payload.vis = 1.0f;
}
[shader("closesthit")]
void ClosestHit(inout PayloadIndirect payload, in BuiltInTriangleIntersectionAttributes attribs)
{
}
[shader("miss")]
void MissIndirect(inout PayloadIndirect payload : SV_RayPayload)
{
// Use skybox as contribution if ray failed to hit geometry (right now, disabled for debug purposes)
float3 rayDir = WorldRayDirection();
rayDir.z = -rayDir.z;
if (g_giCB.useSkybox)
{
payload.color += skyboxTexture.SampleLevel(g_sampler, rayDir, 0).rgb;
}
}
float3 CalcLighting(in float3 normal, in float3 lightDir, in float3 peakIrradiance,
in float3 diffuseAlbedo, in float3 specularAlbedo, in float roughness,
in float3 positionWS, in float3 cameraPosWS, in float3 msEnergyCompensation)
{
float3 lighting = diffuseAlbedo * INV_PI;
float3 view = normalize(cameraPosWS - positionWS);
const float NoL = saturate(dot(normal, lightDir));
if (NoL > 0.0f)
{
const float NoV = saturate(dot(normal, view));
float3 h = normalize(view + lightDir);
float3 fresnel = Fresnel(specularAlbedo, h, lightDir);
float specular = GGXSpecular(roughness, normal, h, view, lightDir);
lighting += specular * fresnel * msEnergyCompensation;
}
return lighting * NoL * peakIrradiance;
}
float3 CalculateRadiance(inout PayloadIndirect payload, in BuiltInTriangleIntersectionAttributes attribs)
{
/* Prepare input data */
float3 hitPos = WorldRayOrigin() + WorldRayDirection() * RayTCurrent();
float3 triangleNormal, triangleTangent, triangleBitangent;
loadHitData(triangleNormal, triangleTangent, triangleBitangent, attribs);
float4 albedo = albedoTexture.Load(int3(DispatchRaysIndex().xy, 0));
float roughness = specularTexture.Load(int3(DispatchRaysIndex().xy, 0)).g;
roughness = max(roughness, payload.roughness);
float metallic = specularTexture.Load(int3(DispatchRaysIndex().xy, 0)).b;
float3 normalTextureData = NormalTextureInput.Load(int3(DispatchRaysIndex().xy, 0)).rgb;
float3x3 tangentToWorld = float3x3(triangleTangent, triangleBitangent, triangleNormal);
float3 normalWS = triangleNormal;
bool enableDiffuse = metallic < 1.0f;
bool enableSpecular = !payload.diffusePath;
if (enableDiffuse == false && enableSpecular == false)
return float3(0, 0, 0);
const float3 diffuseAlbedo = lerp(albedo.rgb, 0.0f, metallic) * (enableDiffuse ? 1.0f : 0.0f);
const float3 specularAlbedo = lerp(0.03f, albedo.rgb, metallic) * (enableSpecular ? 1.0f : 0.0f);
float3 msEnergyCompensation = float3(1, 1, 1);
float2 DFG = dfgTexture.SampleLevel(g_sampler, float2(saturate(dot(triangleNormal, -WorldRayDirection())), roughness), 0.0f).xy;
float Ess = DFG.x;
msEnergyCompensation = float3(1, 1, 1) + specularAlbedo * (1.0f / Ess - 1.0f);
float3 worldOrigin = WorldRayOrigin();
/*********************************************/
// Calculate directional light
{
float3 V = g_sceneCB.cameraPosition.xyz;
float3 N = triangleNormal.xyz;
float3 L = normalize(g_giCB.sunDirection);
float distToLight = 1e+38f;
// Check visibility
float NoL = saturate(dot(N, L));
float visibility = shadowRayVisibility(hitPos, L, 1e-3f, distToLight);
// Calculate light contribution to point in world (diffuse lambertian term)
payload.color += CalcLighting(triangleNormal, L, 1.0f, diffuseAlbedo, specularAlbedo,
roughness, hitPos, worldOrigin, msEnergyCompensation) * visibility;
}
float2 brdfSample = SamplePoint(payload.rndSeed, payload.rndSeed2);
float selector = brdfSample.x;
if (enableSpecular == false)
selector = 0.0f;
else if (enableDiffuse == false)
selector = 1.0f;
if (g_giCB.useIndirect == 1)
{
float3 throughput = float3(0, 0, 0);
// Find next direction
float3 rayDirWS = float3(0, 0, 0);
if (g_giCB.samplingType == SAMPLE_MJ)
{
if (selector < 0.5f)
{
if (enableSpecular)
brdfSample.x *= 2.0f;
float3 rayDirTS = SampleDirectionCosineHemisphere(brdfSample.x, brdfSample.y);
rayDirWS = normalize(mul(rayDirTS, tangentToWorld));
throughput = diffuseAlbedo;
}
else
{
if (enableDiffuse)
brdfSample.x = (brdfSample.x - 0.5f) * 2.0f;
float3 incomingRayDirWS = WorldRayDirection();
float3 incomingRayDirTS = normalize(mul(incomingRayDirWS, transpose(tangentToWorld)));
float3 wo = incomingRayDirTS;
float3 wm = SampleGGXVisibleNormal(-wo, roughness, roughness, brdfSample.x, brdfSample.y);
float3 wi = reflect(wo, wm);
float3 normalTS = float3(0.0f, 0.0f, 1.0f);
float3 F = Fresnel(specularAlbedo.rgb, wm, wi);
float G1 = SmithGGXMasking(normalTS, wi, -wo, roughness * roughness);
float G2 = SmithGGXMaskingShadowing(normalTS, wi, -wo, roughness * roughness);
throughput = (F * (G2 / G1));
float3 rayDirTS = wi;
rayDirWS = normalize(mul(rayDirTS, tangentToWorld));
#if 1
DFG = dfgTexture.SampleLevel(g_sampler, float2(saturate(dot(triangleNormal, -WorldRayDirection())), roughness), 0.0f).xy;
Ess = DFG.x;
throughput *= float3(1, 1, 1) + specularAlbedo * (1.0f / Ess - 1.0f);
#endif
}
}
if (enableDiffuse && enableSpecular)
throughput *= 2.0f;
if (payload.pathLength < g_giCB.bounceCount)
{
// Prepare payload
PayloadIndirect newPayload;
newPayload.pathLength = payload.pathLength + 1;
newPayload.rndSeed = payload.rndSeed;
newPayload.rndSeed2 = payload.rndSeed2;
newPayload.color = float3(0, 0, 0);
newPayload.diffusePath = (selector < 0.5f);
newPayload.roughness = roughness;
// Calculate next ray bounce color contribution
float3 bounceColor = shootIndirectRay(hitPos, rayDirWS, 1e-3f, newPayload);
// Check to make sure our randomly selected, normal mapped diffuse ray didn't go below the surface.
if (dot(normalWS, rayDirWS) <= 0.0f)
bounceColor = float3(0, 0, 0);
payload.color += bounceColor * throughput; //* albedo.rgb;
}
}
return payload.color;
}
[shader("closesthit")]
void ClosestHitIndirect(inout PayloadIndirect payload, in BuiltInTriangleIntersectionAttributes attribs)
{
payload.color = CalculateRadiance(payload, attribs);
}当使用长度大于1的路径(间接光不止一次)时,我的代码会出现问题。阴影开始消失,物体看起来不再接地。现在将出现一系列的图像。打开全尺寸,以更好的比较。在第一个图像中用红色标记的兴趣点。有关路径长度的信息可以在左边的GUI中看到。所有图像都由1024 spp生成。
这是我的渲染(1-4-8间接反弹按这个顺序):



如您所见,有关阴影的信息在增加路径长度的过程中丢失。现在,我尝试跳过skybox,所以任何遗漏的着色器都将返回(0,0,0)而不是skybox值。结果稍好(4,8次间接反弹):


这里引用了MJP上面提到的路径跟踪器(3-6路径长度,但计算方法不同)。在我的术语中,实际上是1-4路径长度)。没有镜面路径。看看你感兴趣的地方。阴影有很好的根底,你可以看到折痕并没有得到多少光:


下面是在MJP的路径跟踪器中创建的地面真相引用,其中包含5000 spp,8(用我的话说是- 6)路径长度:

发布于 2020-10-15 08:31:11
为了跟进我的问题并提供部分答案:首先,我要感谢灯泡提供了帮助我找到解决方案的评论。
这是一个基本的图像,用1024 spp来反射4次光:

这里的问题是每件事都很轻。为什么?因为我的HLSL代码的这一部分是基于MJP的路径跟踪器( https://github.com/TheRealMJP/DXRPathTracer )的,它假设如果path允许扩散和镜像,那么吞吐量*= 2.0。然而,镜面项只允许第一次反弹的概率为50%。我决定这个乘积是基于在他的引擎测试,它没有物理基础,所以我摆脱了它,得到了更好的结果。镜面/金属部分基本上是一样的,但是你可以看到背景中的暗影,在它们的位置上放置物体。

然而,我注意到吞吐量并不是可变的。MJP的算法是吞吐量= 0,然后是吞吐量+=漫反射/镜面BRDF。他不会像PBRT书( http://www.pbr-book.org/3ed-2018/Light_运输_我_面形_反射/路径_Tracing.html )中提到的那样增加BRDF的结果。所以我增加了有效负载的吞吐量。它以值(1.0、1.0、1.0)开始,并在路径的每个点乘以BRDF,得到如下图像:

根据我的知识,从物理学的角度来看,最后的图像应该是最接近地面真理的。此外,我认为这是最接近我的参考,我已经创建了MJP的引擎,我觉得这是最令人愉快的。我知道这些缺陷。一个是对于许多样本(3000-5000 spp),漫射纹理开始有奇怪的对比,我还不知道原因。
另外,俄罗斯轮盘赌在我的渐进路径追踪中效果不佳。http://www.pbr-book.org/3ed-2018/Monte_卡洛_一体化/俄语_轮盘赌_和_Splitting.html --也许在试图优化它以减少方差之后,它是值得的,但是我担心它仍然会引入太多的噪音,而且速度很小。在添加了去噪器之后,也许值得一试,但就目前而言,我认为这不是一个好主意。
https://computergraphics.stackexchange.com/questions/10302
复制相似问题