我正在使用C++进行raytracer的研究,到目前为止,我已经能够基于漫反射,镜面反射和环境分量来计算照明模型。当我尝试在场景中添加阴影时,我的问题出现了:场景真的被弄乱了:
我的代码划分如下:
这是我的派生类之一Triangle:
class Triangle: public SceneObject{
public:
Triangle (vec3 a, vec3 b, vec3 c, Material mat){
name = "Triangle";
p0 = a;
p1 = b;
p2 = c;
objectMaterial = mat;
normal = normalize(cross(p0-p1, p0-p2));
}
//Möller-Trumbore algorithm
bool intersect(Ray aRay, float &t, vec3 &hitPoint, vec3 &n){//we will use ray-plane intersection and barycentric coords:
bool returnValue = false;
//first we need to get a t of intersection between the passed ray and the triangle
vec3 v0v1 = p1-p0;
vec3 v0v2 = p2-p0;
vec3 pvec = cross(aRay.getDirection(), v0v2);
float det = dot(v0v1, pvec);
if ( det >= 1e-6 ){ // Only draw if not backfacing
float invDet = 1/det;
float u = dot(-p0, pvec) * invDet;
// No intersection if u < 0 or u > 1
if (u >=0 && u <= 1) {
vec3 qvec = cross(-p0, v0v1);
float v = dot(aRay.getDirection(), qvec) * invDet;
// No intersection if v < 0 or u + v > 1
if (v >=0 && (u + v) <= 1){
t = dot(v0v2, qvec) * invDet;
returnValue = true;
hitPoint = aRay.getOrigin() + (t*aRay.getDirection());
n = normal;
//calculated_Out_Colour = calculateOutputColour(normal, aRay, lightSource, objectMaterial, t, hitPoint);
}
}
}
return returnValue;
}
private:
vec3 p0;
vec3 p1;
vec3 p2;
vec3 normal;
};
这是我的主循环,在该循环中,我为窗口的每个像素生成所有光线,并确定颜色以及当前位置是否处于阴影中:
for(int i=0;i<imageBuffer.Height();i++){
for(int j=0;j<imageBuffer.Width();j++){
float currentX = ((float)i-256);
float currentY = ((float)j-256);
//cout << currentX << ", " << currentY << ", " << currentZ << endl;
//make a ray for this pixel (i,j)
glm::vec3 rayDirection = glm::normalize(glm::vec3(currentX, currentY, -d));
//make a ray for this pixel (i,j)
Ray currentRay(vec3(0,0,0), rayDirection);
vec3 hitPoint;
vec3 normalAtHit;
float tnear = 999; // closest intersection, set to INFINITY to start with
SceneObject* object = NULL;
for (int k = 0; k < objects.size(); k++) {
float t; // intersection to the current object if any
if (objects[k]->intersect(currentRay, t, hitPoint, normalAtHit) && t < tnear) {
object = objects[k].get();
tnear = t;
vec3 shadowRayDirection = normalize(light1.getLightOrigin()-hitPoint);
Ray shadowRay(hitPoint+vec3(0.03, 0.03,0.03), shadowRayDirection);
float shadowT;
vec3 shadowHitPoint;
vec3 shadowN;
for (int m = 0; m < objects.size(); ++m) {
if (objects[m]->intersect(shadowRay, shadowT, shadowHitPoint, shadowN)) {
imageBuffer.SetPixel(i, j, ambientColour*ambientIntensity);
break;
} else {
imageBuffer.SetPixel(i, j, calculateOutputColour(normalAtHit, currentRay, light1, objects[k]->getMaterial(), hitPoint));
}
}
}
}
}
}
老实说,我在这里很茫然,我不知道为什么会这样。我尝试使用描述的算法here,但是它会产生图像中显示的相同结果。作为引用,如果我循环播放并检查阴影,则我的场景如下所示:
感谢您在尝试调试此东西方面的帮助。谢谢。
最佳答案
Cornstalks是正确的,但重要的一点是,如果您使用hitPoint + 0.03 * normalAtHit,则需要确保法线是与入射光线匹配的法线,根据我们在代码中看到的内容,您仅采用原始图元的法线,但每张脸都有2个法线(一个相对于另一个相反)。
视情况而定,您的光线可能从错误的一侧开始,从而可能导致无效的阴影。
为此,您可以根据结果(大于0或小于0)来检查入射光线与法线的点积,您将知道是否需要反转法线。
关于c++ - 在光线跟踪器中计算阴影时出现意外结果,我们在Stack Overflow上找到一个类似的问题:https://stackoverflow.com/questions/36255098/