Hi there, I have been having issues in calculating world to screen, below is the sample code I have come up with so far,
Please can someone point me in the right direction because I can't find where my calculations are going wrong.
Sample Result / Output below, showing incorrect X,Y co-ords.
I think the issue is with my assumption that viewProjMatrix = the camera matrix position, because I should also be taking the camera position and field of view into account, but no idea how to do this.
I think it should be viewProjMatrix = some calculation on (camera position) and (matrix position) and (field of view)
Please can someone point me in the right direction because I can't find where my calculations are going wrong.
Code:
Vector2 xy = WorldSpaceToScreenSpace(new Vector3 { X = wow.LocalPlayer.PlayerX, Y = wow.LocalPlayer.PlayerY, Z = wow.LocalPlayer.PlayerZ });
log.LogActivity("Result (x, y) = " + xy.X + ", " + xy.Y);
Code:
Vector2 WorldSpaceToScreenSpace(Vector3 worldSpacePos)
{
Vector3 viewProjMatrix = new Vector3
{
X = wow.CameraMatrixX,
Y = wow.CameraMatrixY,
Z = wow.CameraMatrixZ
};
Vector4 clipSpacePos = Vector4.Transform(new Vector4(worldSpacePos, 1f), viewProjMatrix, log);
Vector2 screenSpacePos;
screenSpacePos.X = (clipSpacePos.X / clipSpacePos.W) * 0.5f + 0.5f;
screenSpacePos.Y = 1.0f - ((clipSpacePos.Y / clipSpacePos.W) * 0.5f + 0.5f);
screenSpacePos.X *= 1920;
screenSpacePos.Y *= 1080;
return screenSpacePos;
}
Code:
private struct Vector2
{
public float X;
public float Y;
}
private struct Vector3
{
public float X;
public float Y;
public float Z;
}
private class Vector4
{
public float W;
public float X;
public float Y;
public float Z;
public Vector4()
{
}
public Vector4(Vector3 worldSpacePosition, float w)
{
X = worldSpacePosition.X;
Y = worldSpacePosition.Y;
Z = worldSpacePosition.Z;
W = w;
}
public static Vector4 Transform(Vector4 worldSpace, Vector3 viewProjMatrix, Logging log)
{
// http://www.opengl-tutorial.org/beginners-tutorials/tutorial-3-matrices/#The_Model__View_and_Projection_matrices
float[][] ws = new float[4][];
ws[0] = new float[4];
ws[1] = new float[4];
ws[2] = new float[4];
ws[3] = new float[4];
ws[0][0] = 1f; ws[0][1] = 0f; ws[0][2] = 0f; ws[0][3] = worldSpace.X;
ws[1][0] = 0f; ws[1][1] = 1f; ws[1][2] = 0f; ws[1][3] = worldSpace.Y;
ws[2][0] = 0f; ws[2][1] = 0f; ws[2][2] = 1f; ws[2][3] = worldSpace.Z;
ws[3][0] = 0f; ws[3][1] = 0f; ws[3][2] = 0f; ws[3][3] = 1f;
log.LogActivityNoTime(string.Format("World Space Matrix\r\n" +
"{0}\t{1}\t{2}\t{3}\r\n" +
"{4}\t{5}\t{6}\t{7}\r\n" +
"{8}\t{9}\t{10}\t{11}\r\n" +
"{12}\t{13}\t{14}\t{15}\r\n",
ws[0][0], ws[0][1], ws[0][2], ws[0][3],
ws[1][0], ws[1][1], ws[1][2], ws[1][3],
ws[2][0], ws[2][1], ws[2][2], ws[2][3],
ws[3][0], ws[3][1], ws[3][2], ws[3][3]));
float[][] vp = new float[4][];
vp[0] = new float[1];
vp[1] = new float[1];
vp[2] = new float[1];
vp[3] = new float[1];
vp[0][0] = viewProjMatrix.X;
vp[1][0] = viewProjMatrix.Y;
vp[2][0] = viewProjMatrix.Z;
vp[3][0] = 1f;
log.LogActivityNoTime(string.Format("View Proj Matrix\r\n" +
"{0}\r\n" +
"{1}\r\n" +
"{2}\r\n" +
"{3}", vp[0][0], vp[1][0], vp[2][0], vp[3][0]));
float[][] res = new float[4][];
res[0] = new float[1];
res[1] = new float[1];
res[2] = new float[1];
res[3] = new float[1];
res[0][0] = (ws[0][0] * vp[0][0]) + (ws[0][1] * vp[1][0]) + (ws[0][2] * vp[2][0]) + (ws[0][3] * vp[3][0]);
res[1][0] = (ws[1][0] * vp[0][0]) + (ws[1][1] * vp[1][0]) + (ws[1][2] * vp[2][0]) + (ws[1][3] * vp[3][0]);
res[2][0] = (ws[2][0] * vp[0][0]) + (ws[2][1] * vp[1][0]) + (ws[2][2] * vp[2][0]) + (ws[2][3] * vp[3][0]);
res[3][0] = (ws[3][0] * vp[0][0]) + (ws[3][1] * vp[1][0]) + (ws[3][2] * vp[2][0]) + (ws[3][3] * vp[3][0]);
return new Vector4
{
X = res[0][0],
Y = res[1][0],
Z = res[2][0],
W = res[3][0]
};
}
}
I think the issue is with my assumption that viewProjMatrix = the camera matrix position, because I should also be taking the camera position and field of view into account, but no idea how to do this.
I think it should be viewProjMatrix = some calculation on (camera position) and (matrix position) and (field of view)