34
loading...
This website collects cookies to deliver better user experience
requestAnimationFrame
to sync the frame up to the refresh rate of the browser.this.render()
with the sole exception of the one in connectedCallback
which will bootstrap the draw loop. We'll make a new method called renderLoop
that will call render
in a requestAnimationFrame loop.async connectedCallback() {
this.createShadowDom();
this.cacheDom();
this.attachEvents();
await this.bootGpu();
this.createCameras();
this.createMeshes();
this.createLights();
await this.loadTextures();
this.renderLoop();
}
renderLoop(){
requestAnimationFrame(() => {
this.render();
this.renderLoop();
});
}
render
? The main reason is flexibility. render
is still only responsible for 1 frame. We're also likely to add new timing dependent information like the current time for animation and it makes sense to have this as an input to render
.//data.js
export const quad = {
positions: [
-0.5, -0.5, 0,
0.5, -0.5, 0,
0.5, 0.5, 0,
-0.5, 0.5, 0
],
colors: [
1, 0, 0,
1, 0, 0,
1, 0, 0,
1, 0, 0
],
uvs: [
0, 1,
1, 1,
1, 0,
0, 0
],
normals: [
0, 0, -1,
0, 0, -1,
0, 0, -1,
0, 0, -1
],
triangles: [
0, 1, 2,
0, 2, 3
],
textureName: "grass"
}
bindNormals(normals) {
const normalsBuffer = this.context.createBuffer();
this.context.bindBuffer(this.context.ARRAY_BUFFER, normalsBuffer);
this.context.bufferData(this.context.ARRAY_BUFFER, normals, this.context.STATIC_DRAW);
const vertexNormalLocation = this.context.getAttribLocation(this.program, "aVertexNormal");
this.context.enableVertexAttribArray(vertexNormalLocation);
this.context.vertexAttribPointer(vertexNormalLocation, 3, this.context.FLOAT, false, 0, 0);
}
lib
folder, I'm going to start using a new folder called "Actor" which represents things that make up a scene like a Mesh
, Camera
and now a Light
.export class Light {
#type;
#position;
#direction;
#color;
constructor(light){
this.#type = light.type ?? "directional";
this.#position = light.position ?? [0,0,0];
this.#direction = light.direction;
this.#color = light.color ?? [1,1,1,1];
}
getInfoMatrix(){
return new Float32Array([
...this.#position, 1,
...invertVector(this.#direction), 1,
...this.#color,
0, 0, 0, this.#type === "directional" ? 0 : 1
])
}
}
mat4
to send it to the shader since it allows me to cram everything into it. The first row will be the position, the second the direction, the third the color and the final row is some meta data. We won't have many lights in a scene, at least with the algorithms I'm going with as it'll be quite expensive so we can just use global uniforms as fixed-name variables.#direction
is inverted. This is because we'll be using dot-products between the normal and the light direction but for that to work correctly they need to be in the same direction, so what we're really looking at is the light bouncing off the surface into the camera.//vector.js
export function invertVector(vec){
return vec.map(x => -x);
}
setupGlobalUniforms(){
const projectionMatrix = this.cameras.default.getProjectionMatrix();
const projectionLocation = this.context.getUniformLocation(this.program, "uProjectionMatrix");
this.context.uniformMatrix4fv(projectionLocation, false, projectionMatrix);
const viewMatrix = this.cameras.default.getViewMatrix();
const viewLocation = this.context.getUniformLocation(this.program, "uViewMatrix");
this.context.uniformMatrix4fv(viewLocation, false, viewMatrix);
const light1Matrix = this.lights[0].getInfoMatrix();
const light1Location = this.context.getUniformLocation(this.program, "uLight1");
this.context.uniformMatrix4fv(light1Location, false, light1Matrix);
}
uniform mat4 uProjectionMatrix;
uniform mat4 uModelMatrix;
uniform mat4 uViewMatrix;
attribute vec3 aVertexPosition;
attribute vec3 aVertexColor;
attribute vec2 aVertexUV;
attribute vec3 aVertexNormal;
varying mediump vec4 vColor;
varying mediump vec2 vUV;
varying mediump vec3 vNormal;
void main(){
gl_Position = uProjectionMatrix * uViewMatrix * uModelMatrix * vec4(aVertexPosition, 1.
0);
vColor = vec4(aVertexColor, 1.0);
vUV = aVertexUV;
vNormal = vec3(uModelMatrix * vec4(aVertexNormal, 1.0));
}
varying lowp vec4 vColor;
varying lowp vec2 vUV;
varying lowp vec3 vNormal;
uniform lowp mat4 uLight1;
uniform sampler2D uSampler;
void main() {
//gl_FragColor = texture2D(uSampler, vUV);
gl_FragColor = vColor;
}
createLights(){
this.lights = [
new Light({
type: "directional",
direction: [0,0,1],
color: [1,1,1,1]
})
]
}
connectedCallback
.//fragment shader
//definitions....
void main() {
gl_FragColor = uLight1[1];
}
void main() {
mediump float light = dot(vNormal, uLight1[1].xyz);
gl_FragColor = vec4(light, light, light, 1);
}
//fragment shader
void main() {
mediump float light = dot(vNormal, uLight1[1].xyz);
gl_FragColor = vColor * vec4(light, light, light, 1);
}
spot
to point
and give it a position. But we do need to add some stuff to the vertex shader:uniform mat4 uProjectionMatrix;
uniform mat4 uModelMatrix;
uniform mat4 uViewMatrix;
attribute vec3 aVertexPosition;
attribute vec3 aVertexColor;
attribute vec2 aVertexUV;
attribute vec3 aVertexNormal;
varying mediump vec4 vColor;
varying mediump vec2 vUV;
varying mediump vec3 vNormal;
varying mediump vec3 vPosition;
void main(){
gl_Position = uProjectionMatrix * uViewMatrix * uModelMatrix * vec4(aVertexPosition, 1.0);
vUV = aVertexUV;
vColor = vec4(aVertexColor, 1.0);
vNormal = vec3(uModelMatrix * vec4(aVertexNormal, 1.0));
vPosition = vec3(uModelMatrix * vec4(aVertexPosition, 1.0));
}
vPosition
. This happens after the model has been moved around but before the camera transforms happen and it represents the world space coordinates of the pixel. We can use this in the fragment shader:varying mediump vec4 vColor;
varying mediump vec2 vUV;
varying mediump vec3 vNormal;
varying mediump vec3 vPosition;
uniform lowp mat4 uLight1;
uniform sampler2D uSampler;
void main() {
bool isPoint = uLight1[3][3] == 1.0;
if(isPoint){
//point light + color
mediump vec3 toLight = normalize(uLight1[0].xyz - vPosition);
mediump float light = dot(normalize(vNormal), toLight);
gl_FragColor = vColor * vec4(light, light, light, 1);
} else {
//directional light + color
mediump float light = dot(normalize(vNormal), uLight1[1].xyz);
gl_FragColor = vColor * vec4(light, light, light, 1);
}
vPosition
vector and then we do the same calculation. It's important here that we normalize things here too. I forgot to do that last time but the interpolated values are not normalized because WebGL has no clue how they are used so we need to remember to do that. Failure to do so will cause things to be brighter or dimmer than you expect.[1,1,1]
and we cast green light [0,1,0]
, we want it to be green. But if we have a red object [1,0,0]
and cast green light then it should be black because the object doesn't reflect back green. Using a white light should retain all colors of the object.void main() {
bool isPoint = uLight1[3][3] == 1.0;
if(isPoint){
//point light + color
mediump vec3 toLight = normalize(uLight1[0].xyz - vPosition);
mediump float light = dot(normalize(vNormal), toLight);
gl_FragColor = vColor * uLight1[2] * vec4(light, light, light, 1);
} else {
//directional light + color
mediump float light = dot(normalize(vNormal), uLight1[1].xyz);
gl_FragColor = vColor * uLight1[2] * vec4(light, light, light, 1);
}
}
[1,0.5,1]
:[1,1,0]
light on it:34