Hey guys, I can read from the documentation that it's possible to camera map textures, however it doesn't say how. It would be great with a screenshot on how to hook it up.
cheers
camera mapping
Moderator: juanjgon
- davidgidali
- Posts: 1
- Joined: Mon Nov 07, 2016 4:53 am
Would love to see it done properly in Max. All I was able to do is have it use the active camera as the projector - but I need a separate (static in my case) projection camera.
Thanks!
Thanks!
Yes, sorry. I'm still trying to figure how to configure it. Some maths are to be computed to get the camera transformation and projection parameters needed to configure the projection node, and it is not easy. I hope to have a solution soon.
Thanks,
-Juanjo
Thanks,
-Juanjo
- Nicolas_ts
- Posts: 1
- Joined: Mon Mar 12, 2018 4:44 pm
You can do camera mapping through UV texture node. Choose texture type - Perspective from camera, and select your camera. But this method doesn't solve problem with flat environment textures.
I ended up doing my own camera projector osl node for this. Hopefully it will help others too.

Code: Select all
// Camera projector osl by Vicente Lucendo - https://vlucendo.com
shader CameraProjector
[[
string label = "Perspective Camera Projector",
string help = "<h3>Perspective Camera Projector</h3> Node to project from a perspective camera",
string version = "0.1"
]]
(
// inputs
int projectBackface = 0 [[ string widget = "checkBox" ]],
point position = point(0, 0, 10),
vector rotation = vector(0, 0, 0),
float width = 1280 [[ float min = 1, float max = 999999 ]],
float height = 720 [[ float min = 1, float max = 999999 ]],
float pixelAspect = 1.0 [[ float min = 0.0001, int max = 999999 ]],
float focalLength = 50.0 [[ float min = 1, float max = 180 ]],
float aperture = 41.4214 [[ float min = 1, float max = 180 ]],
// output
output point uvw = 0
)
{
point meshPosition = P;
vector offset = meshPosition - position;
vector dir = normalize(offset);
// don't project backfaces
if (projectBackface == 0 && dot(transform("world", N), dir) > 0.0) return;
// calculate the camera rotation as a matrix4. we assume rotation is in default order
vector rotRad = radians(rotation);
float a = cos(rotRad.x);
float b = sin(rotRad.x);
float c = cos(rotRad.y);
float d = sin(rotRad.y);
float e = cos(rotRad.z);
float f = sin(rotRad.z);
float ae = a * e;
float af = a * f;
float be = b * e;
float bf = b * f;
// create inverted matrix directly
matrix rot = matrix (
c * e, c * f, - d, 0,
be * d - af, bf * d + ae, b * c, 0,
ae * d + bf, af * d - be, a * c, 0,
0, 0, 0, 1
);
// position in camera space
point pos = transform(rot, offset);
// houdini's camera fov
float apy = (height * aperture) / (width * pixelAspect);
float fov = 2 * atan((apy / 2) / focalLength);
// project the point
float ff = 1.0 / tan(fov / 2);
float aspect = width / height;
ff /= pos.z;
pos.x *= ff / aspect;
pos.y *= ff;
// change range from -1..1 to 0..1
pos.x = 1.0 - (pos.x * 0.5 + 0.5);
pos.y = 1.0 - (pos.y * 0.5 + 0.5);
// dont project off limits
if (pos.x < 0.0 || pos.x > 1.0 || pos.y < 0.0 || pos.y > 1.0) return;
uvw.x = pos.x;
uvw.y = pos.y;
}
