diff --git a/Capture.JPG b/Capture.JPG new file mode 100644 index 0000000..4fa30c2 Binary files /dev/null and b/Capture.JPG differ diff --git a/README.md b/README.md index c636328..56860f0 100644 --- a/README.md +++ b/README.md @@ -5,6 +5,13 @@

(source: Ken Perlin)

+## Demo +![screenshot](capture2.JPG) +I created five biomes, including ocean, plain, mountain, alpine, cities. The ocean is animated fbm, so it simulates waves. Cities are generated with perlin noise, and only light up in the night. Biomes are generated with fbm and elevated by the noise. For tool box functions, I used bias and gain, sin, mix, and triangle wave to adjust the planet. +Users can control the ambient light, the deformation level, and which reflection model to render the planet. +Link: https://effieyanfei.github.io/hw00-webgl-intro/ + + ## Objective - Check that the tools and build configuration we will be using for the class works. - Start learning Typescript and WebGL2 @@ -75,3 +82,5 @@ To check if everything is on the right track: - Interfaces https://developer.mozilla.org/en-US/docs/Web/API/WebGL_API - Types https://developer.mozilla.org/en-US/docs/Web/API/WebGL_API/Types - Constants https://developer.mozilla.org/en-US/docs/Web/API/WebGL_API/Constants + + diff --git a/capture2.JPG b/capture2.JPG new file mode 100644 index 0000000..5da2b36 Binary files /dev/null and b/capture2.JPG differ diff --git a/src/.vs/ProjectSettings.json b/src/.vs/ProjectSettings.json new file mode 100644 index 0000000..f8b4888 --- /dev/null +++ b/src/.vs/ProjectSettings.json @@ -0,0 +1,3 @@ +{ + "CurrentProjectSetting": null +} \ No newline at end of file diff --git a/src/.vs/VSWorkspaceState.json b/src/.vs/VSWorkspaceState.json new file mode 100644 index 0000000..6b61141 --- /dev/null +++ b/src/.vs/VSWorkspaceState.json @@ -0,0 +1,6 @@ +{ + "ExpandedNodes": [ + "" + ], + "PreviewInSolutionExplorer": false +} \ No newline at end of file diff --git a/src/.vs/slnx.sqlite b/src/.vs/slnx.sqlite new file mode 100644 index 0000000..86a0f48 Binary files /dev/null and b/src/.vs/slnx.sqlite differ diff --git a/src/.vs/src/v15/.suo b/src/.vs/src/v15/.suo new file mode 100644 index 0000000..3fcd18d Binary files /dev/null and b/src/.vs/src/v15/.suo differ diff --git a/src/geometry/Cube.ts b/src/geometry/Cube.ts new file mode 100644 index 0000000..fa45161 --- /dev/null +++ b/src/geometry/Cube.ts @@ -0,0 +1,103 @@ +import {vec3, vec4} from 'gl-matrix'; +import Drawable from '../rendering/gl/Drawable'; +import {gl} from '../globals'; + +class Cube extends Drawable { + indices: Uint32Array; + positions: Float32Array; + normals: Float32Array; + center: vec4; + + constructor(center: vec3) { + super(); // Call the constructor of the super class. This is required. + this.center = vec4.fromValues(center[0], center[1], center[2], 1); + } + + create() { + + this.indices = new Uint32Array([0, 1, 2, + 0, 2, 3, + 4, 5, 6, + 4, 6, 7, + 8, 9, 10, + 8, 10, 11, + 12, 13, 14, + 12, 14, 15, + 16, 17, 18, + 16, 18, 19, + 20, 21, 22, + 20, 22, 23 + ]); + this.normals = new Float32Array([0, 0, 1, 0, + 0, 0, 1, 0, + 0, 0, 1, 0, + 0, 0, 1, 0, + 0, 1, 0, 0, + 0, 1, 0, 0, + 0, 1, 0, 0, + 0, 1, 0, 0, + 0, 0, 1, 0, + 0, 0, 1, 0, + 0, 0, 1, 0, + 0, 0, 1, 0, + 0, 1, 0, 0, + 0, 1, 0, 0, + 0, 1, 0, 0, + 0, 1, 0, 0, + 1, 0, 0, 0, + 1, 0, 0, 0, + 1, 0, 0, 0, + 1, 0, 0, 0, + 1, 0, 0, 0, + 1, 0, 0, 0, + 1, 0, 0, 0, + 1, 0, 0, 0 + ]); + this.positions = new Float32Array([-1, -1, 1, 1, + 1, -1, 1, 1, + 1, 1, 1, 1, + -1, 1, 1, 1, + -1, 1, 1, 1, + 1, 1, 1, 1, + 1, 1, -1, 1, + -1, 1, -1, 1, + + -1, -1, -1, 1, + 1, -1, -1, 1, + 1, 1, -1, 1, + -1, 1, -1, 1, + -1, -1, 1, 1, + 1, -1, 1, 1, + 1, -1, -1, 1, + -1, -1, -1, 1, + + -1, -1, -1, 1, + -1, -1, 1, 1, + -1, 1, 1, 1, + -1, 1, -1, 1, + + 1, -1, -1, 1, + 1, -1, 1, 1, + 1, 1, 1, 1, + 1, 1, -1, 1 + ]); + + this.generateIdx(); + this.generatePos(); + this.generateNor(); + + this.count = this.indices.length; + gl.bindBuffer(gl.ELEMENT_ARRAY_BUFFER, this.bufIdx); + gl.bufferData(gl.ELEMENT_ARRAY_BUFFER, this.indices, gl.STATIC_DRAW); + + gl.bindBuffer(gl.ARRAY_BUFFER, this.bufNor); + gl.bufferData(gl.ARRAY_BUFFER, this.normals, gl.STATIC_DRAW); + + gl.bindBuffer(gl.ARRAY_BUFFER, this.bufPos); + gl.bufferData(gl.ARRAY_BUFFER, this.positions, gl.STATIC_DRAW); + + console.log(`Created cube`); + } +}; + +export default Cube; diff --git a/src/main.ts b/src/main.ts index 65a9461..949e8af 100644 --- a/src/main.ts +++ b/src/main.ts @@ -1,8 +1,10 @@ import {vec3} from 'gl-matrix'; +import {vec4} from 'gl-matrix'; const Stats = require('stats-js'); import * as DAT from 'dat.gui'; import Icosphere from './geometry/Icosphere'; import Square from './geometry/Square'; +import Cube from './geometry/Cube'; import OpenGLRenderer from './rendering/gl/OpenGLRenderer'; import Camera from './Camera'; import {setGL} from './globals'; @@ -13,17 +15,49 @@ import ShaderProgram, {Shader} from './rendering/gl/ShaderProgram'; const controls = { tesselations: 5, 'Load Scene': loadScene, // A function pointer, essentially + 'ambient light': 2, + 'deform': 0, + 'lambert': lambert, + 'gradient': gradient, + 'lit': lit, +}; + + + +var palette = { + color1: [0, 128, 255], // RGB array }; let icosphere: Icosphere; let square: Square; +let cube: Cube; let prevTesselations: number = 5; +let r: number = 0; +let g: number = 0; +let b: number = 0; +let t: number = 0; +let increase: boolean = true; +let reflectionModel: number = 0; + +function lambert() { + reflectionModel = 0; +} + +function gradient() { + reflectionModel = 1; +} + +function lit() { + reflectionModel = 2; +} function loadScene() { icosphere = new Icosphere(vec3.fromValues(0, 0, 0), 1, controls.tesselations); icosphere.create(); - square = new Square(vec3.fromValues(0, 0, 0)); - square.create(); + //square = new Square(vec3.fromValues(0, 0, 0)); + //square.create(); + //cube = new Cube(vec3.fromValues(0, 0, 0)); + //cube.create(); } function main() { @@ -38,7 +72,17 @@ function main() { // Add controls to the gui const gui = new DAT.GUI(); gui.add(controls, 'tesselations', 0, 8).step(1); + gui.add(controls, 'lambert'); + gui.add(controls, 'gradient'); + gui.add(controls, 'lit'); gui.add(controls, 'Load Scene'); + gui.add(controls, 'ambient light', 0, 10).step(1); + gui.add(controls, 'deform', 0, 10).step(1); + + // Add color controller + //var colorController = gui.addColor(palette, 'color1'); + //colorChange(); + //colorController.onFinishChange(colorChange); // get canvas and webgl context const canvas = document.getElementById('canvas'); @@ -64,26 +108,53 @@ function main() { new Shader(gl.FRAGMENT_SHADER, require('./shaders/lambert-frag.glsl')), ]); + const custom = new ShaderProgram([ + new Shader(gl.VERTEX_SHADER, require('./shaders/custom-vert.glsl')), + new Shader(gl.FRAGMENT_SHADER, require('./shaders/custom-frag.glsl')), + ]); + + const planetLambert = new ShaderProgram([ + new Shader(gl.VERTEX_SHADER, require('./shaders/planet-vert.glsl')), + new Shader(gl.FRAGMENT_SHADER, require('./shaders/planet-frag.glsl')), +]); + + + + /*function colorChange() { + var newColor = colorController.getValue(); + r = newColor[0] / 255; + g = newColor[1] / 255; + b = newColor[2] / 255; + } + */ + // This function will be called every frame function tick() { camera.update(); stats.begin(); gl.viewport(0, 0, window.innerWidth, window.innerHeight); renderer.clear(); + //var color = vec4.fromValues(r, g, b, 1); + var color = vec4.fromValues(116 / 255, 184 / 255, 121 / 255, 1); if(controls.tesselations != prevTesselations) { prevTesselations = controls.tesselations; icosphere = new Icosphere(vec3.fromValues(0, 0, 0), 1, prevTesselations); icosphere.create(); } - renderer.render(camera, lambert, [ - icosphere, - // square, - ]); + renderer.render(camera, planetLambert, [ + icosphere, + //cube, + //square, + ], color, t, controls['ambient light'], reflectionModel, controls.deform); + + //change t very tick + t = t + 1; stats.end(); // Tell the browser to call `tick` again whenever it renders a new frame requestAnimationFrame(tick); + } window.addEventListener('resize', function() { diff --git a/src/rendering/gl/OpenGLRenderer.ts b/src/rendering/gl/OpenGLRenderer.ts index 7e527c2..0f31a58 100644 --- a/src/rendering/gl/OpenGLRenderer.ts +++ b/src/rendering/gl/OpenGLRenderer.ts @@ -22,16 +22,19 @@ class OpenGLRenderer { gl.clear(gl.COLOR_BUFFER_BIT | gl.DEPTH_BUFFER_BIT); } - render(camera: Camera, prog: ShaderProgram, drawables: Array) { + render(camera: Camera, prog: ShaderProgram, drawables: Array, color: vec4, time: number, ambient: number, mode: number, deform: number) { let model = mat4.create(); let viewProj = mat4.create(); - let color = vec4.fromValues(1, 0, 0, 1); mat4.identity(model); mat4.multiply(viewProj, camera.projectionMatrix, camera.viewMatrix); prog.setModelMatrix(model); prog.setViewProjMatrix(viewProj); prog.setGeometryColor(color); + prog.setTime(time); + prog.setAmbient(ambient); + prog.setDeform(deform); + prog.setMode(mode); for (let drawable of drawables) { prog.draw(drawable); diff --git a/src/rendering/gl/ShaderProgram.ts b/src/rendering/gl/ShaderProgram.ts index 67fef40..c0eb785 100644 --- a/src/rendering/gl/ShaderProgram.ts +++ b/src/rendering/gl/ShaderProgram.ts @@ -29,6 +29,11 @@ class ShaderProgram { unifModelInvTr: WebGLUniformLocation; unifViewProj: WebGLUniformLocation; unifColor: WebGLUniformLocation; + unifTime: WebGLUniformLocation; + unifMode: WebGLUniformLocation; + unifAmbient: WebGLUniformLocation; + unifDeform: WebGLUniformLocation; + constructor(shaders: Array) { this.prog = gl.createProgram(); @@ -44,10 +49,15 @@ class ShaderProgram { this.attrPos = gl.getAttribLocation(this.prog, "vs_Pos"); this.attrNor = gl.getAttribLocation(this.prog, "vs_Nor"); this.attrCol = gl.getAttribLocation(this.prog, "vs_Col"); + this.unifModel = gl.getUniformLocation(this.prog, "u_Model"); this.unifModelInvTr = gl.getUniformLocation(this.prog, "u_ModelInvTr"); this.unifViewProj = gl.getUniformLocation(this.prog, "u_ViewProj"); - this.unifColor = gl.getUniformLocation(this.prog, "u_Color"); + this.unifColor = gl.getUniformLocation(this.prog, "u_Color"); + this.unifTime = gl.getUniformLocation(this.prog, "u_Time"); + this.unifAmbient = gl.getUniformLocation(this.prog, "u_Ambient"); + this.unifDeform = gl.getUniformLocation(this.prog, "u_Deform"); + this.unifMode = gl.getUniformLocation(this.prog, "u_Mode"); } use() { @@ -85,6 +95,34 @@ class ShaderProgram { } } + setTime(time: number) { + this.use(); + if (this.unifTime !== -1) { + gl.uniform1i(this.unifTime, time); + } + } + + setMode(mode: number) { + this.use(); + if (this.unifMode !== -1) { + gl.uniform1i(this.unifMode, mode); + } + } + + setAmbient(ambient: number) { + this.use(); + if (this.unifAmbient !== -1) { + gl.uniform1i(this.unifAmbient, ambient); + } + } + + setDeform(deform: number) { + this.use(); + if (this.unifDeform !== -1) { + gl.uniform1i(this.unifDeform, deform); + } + } + draw(d: Drawable) { this.use(); diff --git a/src/shaders/custom-frag.glsl b/src/shaders/custom-frag.glsl new file mode 100644 index 0000000..df66291 --- /dev/null +++ b/src/shaders/custom-frag.glsl @@ -0,0 +1,91 @@ +#version 300 es + +// This is a fragment shader. If you've opened this file first, please +// open and read lambert.vert.glsl before reading on. +// Unlike the vertex shader, the fragment shader actually does compute +// the shading of geometry. For every pixel in your program's output +// screen, the fragment shader is run for every bit of geometry that +// particular pixel overlaps. By implicitly interpolating the position +// data passed into the fragment shader by the vertex shader, the fragment shader +// can compute what color to apply to its pixel based on things like vertex +// position, light position, and vertex color. +precision highp float; + +uniform vec4 u_Color; // The color with which to render this instance of geometry. + +uniform highp int u_Time; + +// These are the interpolated values out of the rasterizer, so you can't know +// their specific values without knowing the vertices that contributed to them +in vec4 fs_Nor; +in vec4 fs_LightVec; +in vec4 fs_Col; +in vec4 fs_Pos; + + +out vec4 out_Col; // This is the final output color that you will see on your + // screen for the pixel that is currently being processed. +vec3 random3( vec3 p ) { + return fract(sin(vec3(dot(p,vec3(127.1, 311.7, 456.9)), + dot(p,vec3(269.5, 183.3, 236.6)), + dot(p, vec3(420.6, 631.2, 235.1)) + )) * 438648.5453); +} +float surflet(vec3 p, vec3 gridPoint) { + // Compute the distance between p and the grid point along each axis, and warp it with a + // quintic function so we can smooth our cells + vec3 t2 = abs(p - gridPoint); + vec3 pow5 = vec3(pow(t2.x, 5.0), pow(t2.y, 5.0), pow(t2.z, 5.0)); + vec3 pow4 = vec3(pow(t2.x, 4.0), pow(t2.y, 4.0), pow(t2.z, 4.0)); + vec3 pow3 = vec3(pow(t2.x, 3.0), pow(t2.y, 3.0), pow(t2.z, 3.0)); + vec3 t = vec3(1.f, 1.f, 1.f) - 6.f * pow5 + 15.f * pow4 - 10.f * pow3; + // Get the random vector for the grid point (assume we wrote a function random2 + // that returns a vec2 in the range [0, 1]) + vec3 gradient1 = random3(gridPoint) * 2. - vec3(1., 1., 1.); + vec3 gradient2 = random3(gridPoint) * 3. - vec3(1., 1., 1.); + vec3 gradient = gradient1 + sin(float(u_Time) * 0.02) * (gradient2 - gradient1); + // Get the vector from the grid point to P + vec3 diff = p - gridPoint; + // Get the value of our height field by dotting grid->P with our gradient + float height = dot(diff, gradient); + //Scale our height field (i.e. reduce it) by our polynomial falloff function + return height * t.x * t.y * t.z; +} + +float perlinNoise3D(vec3 p) { + float surfletSum = 0.f; + // Iterate over the four integer corners surrounding uv + for(int dx = 0; dx <= 1; ++dx) { + for(int dy = 0; dy <= 1; ++dy) { + for(int dz = 0; dz <= 1; ++dz) { + surfletSum += surflet(p, floor(p) + vec3(dx, dy, dz)); + } + } + } + return surfletSum; +} + +void main() +{ + // Material base color (before shading) + vec4 diffuseColor = u_Color; + + // Calculate the diffuse term for Lambert shading + float diffuseTerm = dot(normalize(fs_Nor), normalize(fs_LightVec)); + // Avoid negative lighting values + // diffuseTerm = clamp(diffuseTerm, 0, 1); + + float ambientTerm = 0.2; + + float lightIntensity = diffuseTerm + ambientTerm; //Add a small float value to the color multiplier + //to simulate ambient lighting. This ensures that faces that are not + //lit by our point light are not completely black. + + // Compute final shaded color + float n1 = perlinNoise3D(fs_Pos.xyz * 20.0); + float n2 = perlinNoise3D(fs_Pos.xyz * 5.0); + vec4 layer = 0.4 * vec4(n1, n1, n1, 1.0) + 0.6 * vec4(n2, n2, n2, 1.0); + vec4 layer2 = 0.5 * diffuseColor / vec4(n1, n1, n1, 1.0) + 0.5 * diffuseColor / vec4(n2, n2, n2, 1.0); + diffuseColor = diffuseColor + diffuseColor * layer + 0.5 * diffuseColor * layer2 * layer2 / 100.0; + out_Col = vec4(diffuseColor.rgb * lightIntensity, diffuseColor.a); +} diff --git a/src/shaders/custom-vert.glsl b/src/shaders/custom-vert.glsl new file mode 100644 index 0000000..1f69dfb --- /dev/null +++ b/src/shaders/custom-vert.glsl @@ -0,0 +1,59 @@ +#version 300 es + +//This is a vertex shader. While it is called a "shader" due to outdated conventions, this file +//is used to apply matrix transformations to the arrays of vertex data passed to it. +//Since this code is run on your GPU, each vertex is transformed simultaneously. +//If it were run on your CPU, each vertex would have to be processed in a FOR loop, one at a time. +//This simultaneous transformation allows your program to run much faster, especially when rendering +//geometry with millions of vertices. + +uniform mat4 u_Model; // The matrix that defines the transformation of the + // object we're rendering. In this assignment, + // this will be the result of traversing your scene graph. + +uniform mat4 u_ModelInvTr; // The inverse transpose of the model matrix. + // This allows us to transform the object's normals properly + // if the object has been non-uniformly scaled. + +uniform mat4 u_ViewProj; // The matrix that defines the camera's transformation. + // We've written a static matrix for you to use for HW2, + // but in HW3 you'll have to generate one yourself +uniform highp int u_Time; + +in vec4 vs_Pos; // The array of vertex positions passed to the shader + +in vec4 vs_Nor; // The array of vertex normals passed to the shader + +in vec4 vs_Col; // The array of vertex colors passed to the shader. + +out vec4 fs_Nor; // The array of normals that has been transformed by u_ModelInvTr. This is implicitly passed to the fragment shader. +out vec4 fs_LightVec; // The direction in which our virtual light lies, relative to each vertex. This is implicitly passed to the fragment shader. +out vec4 fs_Col; // The color of each vertex. This is implicitly passed to the fragment shader. +out vec4 fs_Pos; + +const vec4 lightPos = vec4(5, 5, 3, 1); //The position of our virtual light, which is used to compute the shading of + //the geometry in the fragment shader. + +void main() +{ + fs_Col = vs_Col; // Pass the vertex colors to the fragment shader for interpolation + fs_Pos = vs_Pos; + mat3 invTranspose = mat3(u_ModelInvTr); + fs_Nor = vec4(invTranspose * vec3(vs_Nor), 0); // Pass the vertex normals to the fragment shader for interpolation. + // Transform the geometry's normals by the inverse transpose of the + // model matrix. This is necessary to ensure the normals remain + // perpendicular to the surface after the surface is transformed by + // the model matrix. + //vec4 modelposition = u_Model * vs_Pos; // Temporarily store the transformed vertex positions for use below + vec3 r1 = vec3(cos(-1.0 * vs_Pos.y / 2.0 * cos( 0.02 * float(u_Time))), 0.0, sin(-1.0 * vs_Pos.y / 2.0 * cos( 0.02 * float(u_Time)))); + vec3 r2 = vec3(0., 1., 0.); + vec3 r3 = vec3(-1.0 * sin(-1.0 * vs_Pos.y / 2.0 * cos( 0.02 * float(u_Time))), 0.0, cos(-1.0 * vs_Pos.y / 2.0 * cos( 0.02 * float(u_Time)))); + mat3 rotY = mat3(r1, r2, r3); + vec3 pos = vs_Pos.xyz * rotY; + vec4 modelposition = vec4(pos, 1.0); + + fs_LightVec = lightPos - modelposition; // Compute the direction in which the light source lies + + gl_Position = u_ViewProj * modelposition;// gl_Position is a built-in variable of OpenGL which is + // used to render the final positions of the geometry's vertice +} diff --git a/src/shaders/lambert-frag.glsl b/src/shaders/lambert-frag.glsl index 2b8e11b..a1385c8 100644 --- a/src/shaders/lambert-frag.glsl +++ b/src/shaders/lambert-frag.glsl @@ -40,4 +40,4 @@ void main() // Compute final shaded color out_Col = vec4(diffuseColor.rgb * lightIntensity, diffuseColor.a); -} +} \ No newline at end of file diff --git a/src/shaders/lambert-vert.glsl b/src/shaders/lambert-vert.glsl index 7f95a37..3aeffd5 100644 --- a/src/shaders/lambert-vert.glsl +++ b/src/shaders/lambert-vert.glsl @@ -50,4 +50,4 @@ void main() gl_Position = u_ViewProj * modelposition;// gl_Position is a built-in variable of OpenGL which is // used to render the final positions of the geometry's vertices -} +} \ No newline at end of file diff --git a/src/shaders/planet-frag.glsl b/src/shaders/planet-frag.glsl new file mode 100644 index 0000000..181d258 --- /dev/null +++ b/src/shaders/planet-frag.glsl @@ -0,0 +1,238 @@ +#version 300 es + +// This is a fragment shader. If you've opened this file first, please +// open and read lambert.vert.glsl before reading on. +// Unlike the vertex shader, the fragment shader actually does compute +// the shading of geometry. For every pixel in your program's output +// screen, the fragment shader is run for every bit of geometry that +// particular pixel overlaps. By implicitly interpolating the position +// data passed into the fragment shader by the vertex shader, the fragment shader +// can compute what color to apply to its pixel based on things like vertex +// position, light position, and vertex color. +precision highp float; + +uniform vec4 u_Color; // The color with which to render this instance of geometry. + +uniform highp int u_Time; + +uniform highp int u_Mode; +uniform highp int u_Ambient; +uniform highp int u_Deform; + +// These are the interpolated values out of the rasterizer, so you can't know +// their specific values without knowing the vertices that contributed to them +in vec4 fs_Nor; +in vec4 fs_LightVec; +in vec4 fs_Col; +in vec4 fs_Pos; + +out vec4 out_Col; // This is the final output color that you will see on your + // screen for the pixel that is currently being processed. + +float bias(float b, float t) { + return (pow(t, log(b)) / log(0.5)); +} + +float gain(float g, float t) { + if (t < 0.5f) { + return (bias(1.0 - g, 2.0 * t) / 2.0); + } + else { + return (1.0 - bias(1.0 - g, 2.0 - 2.0 * t) / 2.0); + } +} + +float rand3D(vec3 p) { + return fract(sin(dot(p, vec3(dot(p,vec3(127.1, 311.7, 456.9)), + dot(p,vec3(269.5, 183.3, 236.6)), + dot(p, vec3(420.6, 631.2, 235.1)) + ))) * 438648.5453); +} + +vec3 random3( vec3 p ) { + return fract(sin(vec3(dot(p,vec3(127.1, 311.7, 456.9)), + dot(p,vec3(269.5, 183.3, 236.6)), + dot(p, vec3(420.6, 631.2, 235.1)) + )) * 438648.5453); +} + +float surflet(vec3 p, vec3 gridPoint) { + // Compute the distance between p and the grid point along each axis, and warp it with a + // quintic function so we can smooth our cells + vec3 t2 = abs(p - gridPoint); + vec3 pow5 = vec3(pow(t2.x, 5.0), pow(t2.y, 5.0), pow(t2.z, 5.0)); + vec3 pow4 = vec3(pow(t2.x, 4.0), pow(t2.y, 4.0), pow(t2.z, 4.0)); + vec3 pow3 = vec3(pow(t2.x, 3.0), pow(t2.y, 3.0), pow(t2.z, 3.0)); + vec3 t = vec3(1.f, 1.f, 1.f) - 6.f * pow5 + 15.f * pow4 - 10.f * pow3; + // Get the random vector for the grid point (assume we wrote a function random2 + // that returns a vec2 in the range [0, 1]) + vec3 gradient1 = random3(gridPoint) * 2. - vec3(1., 1., 1.); + vec3 gradient = gradient1; + // Get the vector from the grid point to P + vec3 diff = p - gridPoint; + // Get the value of our height field by dotting grid->P with our gradient + float height = dot(diff, gradient); + //Scale our height field (i.e. reduce it) by our polynomial falloff function + return height * t.x * t.y * t.z; +} + +float perlinNoise3D(vec3 p) { + float surfletSum = 0.f; + // Iterate over the four integer corners surrounding uv + for(int dx = 0; dx <= 1; ++dx) { + for(int dy = 0; dy <= 1; ++dy) { + for(int dz = 0; dz <= 1; ++dz) { + surfletSum += surflet(p, floor(p) + vec3(dx, dy, dz)); + } + } + } + return surfletSum; +} + +float interpNoise3D(float x, float y, float z) { + int intX = int(floor(x)); + float fractX = fract(x); + int intY = int(floor(y)); + float fractY = fract(y); + int intZ = int(floor(z)); + float fractZ = fract(z); + + float v1 = rand3D(vec3(intX, intY, intZ)); + float v2 = rand3D(vec3(intX + 1, intY, intZ)); + float v3 = rand3D(vec3(intX, intY + 1, intZ)); + float v4 = rand3D(vec3(intX + 1, intY + 1, intZ)); + + float v5 = rand3D(vec3(intX, intY, intZ + 1)); + float v6 = rand3D(vec3(intX + 1, intY, intZ + 1)); + float v7 = rand3D(vec3(intX, intY + 1, intZ + 1)); + float v8 = rand3D(vec3(intX + 1, intY + 1, intZ + 1)); + + float i1 = mix(v1, v2, fractX); + float i2 = mix(v3, v4, fractX); + + float i3 = mix(v5, v6, fractX); + float i4 = mix(v7, v8, fractX); + + float i5 = mix(i1, i2, fractY); + float i6 = mix(i3, i4, fractY); + + float i7 = mix(i5, i6, fractZ); + + return i7; +} + +float fbm(float x, float y, float z) { + float total = 0.0; + float persistence = 0.5; + int octaves = 8; + + for(int i = 1; i <= octaves; i++) { + float freq = pow(2.0, float(i)); + float amp = pow(persistence, float(i)); + + total += interpNoise3D(x * freq, + y * freq, + z * freq) * amp; + } + return total; +} + +float triangle_wave(float x, float freq, float amp) { + float a = abs(x * freq); + float r = a - amp * floor(a/amp); + return (r - (0.5 * amp)); +} + +void main() +{ + // Material base color (before shading) + vec4 diffuseColor = u_Color; + + // Calculate the diffuse term for Lambert shading + float diffuseTerm = dot(normalize(fs_Nor), normalize(fs_LightVec)); + // Avoid negative lighting values + diffuseTerm = clamp(diffuseTerm, 0.0, 1.0); + + float ambientTerm = float(u_Ambient) / 10.0; + + float lightIntensity = diffuseTerm + ambientTerm; //Add a small float value to the color multiplier + //to simulate ambient lighting. This ensures that faces that are not + + //lit by our point light are not completely black. + //initial colors + vec3 snow = vec3(0.78); + vec3 alpine = vec3(0.64, 0.701, 0.57); + vec3 ocean = vec3(0.4, 0.7, 1.0); + vec3 water = vec3(0.2, 1.0, 1.0); + vec3 sand = vec3(0.96, 0.93, 0.78); + + + //forest color + float p1 = perlinNoise3D(fs_Pos.xyz * 45.0); + float p2 = perlinNoise3D(fs_Pos.xyz * 55.0); + float p3 = perlinNoise3D(fs_Pos.xyz * 25.0); + vec3 layer1 = vec3(p1 * vec3(0.49, 0.90, 0.38)); + vec3 layer2 = vec3(p2 * vec3(0.56, 0.58, 0.19)); + vec3 layer3 = vec3(p3 * alpine.rgb); + vec3 forest = diffuseColor.rgb * (diffuseColor.rgb + 0.5 * layer1 + 0.3 * layer2 + 0.2 * layer3); + + //ocean color + vec3 noiseInput = triangle_wave(float(u_Time), 0.025, 100.0) * 0.15 * fs_Pos.xyz; + float f = fbm(noiseInput.x, noiseInput.y, noiseInput.z); + float f1 = fbm(noiseInput.x + f, noiseInput.y + f, noiseInput.z + f); + ocean = ocean + 6.0 * f1 * ocean + 6.0 * f1 * f1 * f1 * f1 * water; + + //find layers + float n = 1.0 - fbm(fs_Pos.x, fs_Pos.y, fs_Pos.z); + float extrusion = 2.0 * n*n*n; + vec3 oceanLayer = clamp((2.0 * (0.3 - extrusion)), 0.0, 1.0) * ocean; + vec3 forestLayer = 5.0 * clamp(extrusion, 0.0, 1.0) * forest; + vec3 sandLayer = 6.0 * clamp(2.0 * (0.22 - clamp((extrusion - 0.20), 0.0, 1.0)), 0.0, 1.0) * sand; + vec3 alpineLayer = 2.0 * clamp(10.0 * (extrusion- 0.53), 0.0, 1.0) * alpine; + vec3 snowLayer = 1.1 * clamp(10.0 * (extrusion- 0.75), 0.0, 1.0) * snow; + + //combine layers + float r = mix(oceanLayer.r, mix(mix(alpineLayer.r, mix(forestLayer.r, sandLayer.r, 0.5), 0.7), snowLayer.r, 0.5), 0.55); + float g = mix(oceanLayer.g, mix(mix(alpineLayer.g, mix(forestLayer.g, sandLayer.g, 0.5), 0.7), snowLayer.g, 0.5), 0.55); + float b = mix(oceanLayer.b, mix(mix(alpineLayer.b, mix(forestLayer.b, sandLayer.b, 0.5), 0.7), snowLayer.b, 0.5), 0.55); + + vec3 final = vec3(r, g, b); + + //building + float n1 = perlinNoise3D(fs_Pos.xyz * 20.0); + float n2 = perlinNoise3D(fs_Pos.xyz * 30.0); + vec3 cityColor = vec3(1, 1, 0.91); + vec3 layerb2 = 0.5 / n1 * cityColor + 0.5 / n2 * cityColor; + float n3 = rand3D(fs_Pos.xyz * 100.0); + + n = gain(n, 0.45); + if(u_Mode == 0) { + if(n > -0.79 && n < -0.78) { + final = final + 0.5 * cityColor * layerb2 * layerb2 / 100.0 * pow((1.0 - lightIntensity), 6.0); + } + + out_Col = vec4(final.rgb * lightIntensity, diffuseColor.a); + } + + if(u_Mode == 1) { + //gradient + float PI = 3.141592653589793; + r = 0.7 + 0.5 * cos(2.0 * PI * (1.0 * diffuseTerm + 3.0)); + g = 0.3 + 0.7 * cos(2.0 * PI * (1.0 * diffuseTerm + 2.25)); + b = 0.5 + 0.5 * cos(2.0 * PI * (1.0 * diffuseTerm + 2.25)); + out_Col = vec4(r, g, b, 1.0); + + } + + if(u_Mode == 2) { + if(n > -0.79 && n < -0.78) { + final = final + 0.5 * cityColor * layerb2 * layerb2 / 100.0 * pow((1.0 - ambientTerm), 6.0); + } + + out_Col = vec4(final.rgb * ambientTerm, diffuseColor.a); + } + + + + +} diff --git a/src/shaders/planet-vert.glsl b/src/shaders/planet-vert.glsl new file mode 100644 index 0000000..4b3856f --- /dev/null +++ b/src/shaders/planet-vert.glsl @@ -0,0 +1,154 @@ +#version 300 es + +//This is a vertex shader. While it is called a "shader" due to outdated conventions, this file +//is used to apply matrix transformations to the arrays of vertex data passed to it. +//Since this code is run on your GPU, each vertex is transformed simultaneously. +//If it were run on your CPU, each vertex would have to be processed in a FOR loop, one at a time. +//This simultaneous transformation allows your program to run much faster, especially when rendering +//geometry with millions of vertices. + +uniform mat4 u_Model; // The matrix that defines the transformation of the + // object we're rendering. In this assignment, + // this will be the result of traversing your scene graph. + +uniform mat4 u_ModelInvTr; // The inverse transpose of the model matrix. + // This allows us to transform the object's normals properly + // if the object has been non-uniformly scaled. + +uniform mat4 u_ViewProj; // The matrix that defines the camera's transformation. + // We've written a static matrix for you to use for HW2, + // but in HW3 you'll have to generate one yourself +uniform highp int u_Time; +uniform highp int u_Deform; + +in vec4 vs_Pos; // The array of vertex positions passed to the shader + +in vec4 vs_Nor; // The array of vertex normals passed to the shader + +in vec4 vs_Col; // The array of vertex colors passed to the shader. + + +out vec4 fs_Nor; // The array of normals that has been transformed by u_ModelInvTr. This is implicitly passed to the fragment shader. +out vec4 fs_LightVec; // The direction in which our virtual light lies, relative to each vertex. This is implicitly passed to the fragment shader. +out vec4 fs_Col; // The color of each vertex. This is implicitly passed to the fragment shader. +out vec4 fs_Pos; + + +const vec4 lightPos = vec4(5, 5, 3, 1); //The position of our virtual light, which is used to compute the shading of + //the geometry in the fragment shader. + +float bias(float b, float t) { + return (pow(t, log(b)) / log(0.5)); +} + +float gain(float g, float t) { + if (t < 0.5f) { + return (bias(1.0 - g, 2.0 * t) / 2.0); + } + else { + return (1.0 - bias(1.0 - g, 2.0 - 2.0 * t) / 2.0); + } +} +float rand3D(vec3 p) { + return fract(sin(dot(p, vec3(dot(p,vec3(127.1, 311.7, 456.9)), + dot(p,vec3(269.5, 183.3, 236.6)), + dot(p, vec3(420.6, 631.2, 235.1)) + ))) * 438648.5453); +} + +float interpNoise3D(float x, float y, float z) { + int intX = int(floor(x)); + float fractX = fract(x); + int intY = int(floor(y)); + float fractY = fract(y); + int intZ = int(floor(z)); + float fractZ = fract(z); + + float v1 = rand3D(vec3(intX, intY, intZ)); + float v2 = rand3D(vec3(intX + 1, intY, intZ)); + float v3 = rand3D(vec3(intX, intY + 1, intZ)); + float v4 = rand3D(vec3(intX + 1, intY + 1, intZ)); + + float v5 = rand3D(vec3(intX, intY, intZ + 1)); + float v6 = rand3D(vec3(intX + 1, intY, intZ + 1)); + float v7 = rand3D(vec3(intX, intY + 1, intZ + 1)); + float v8 = rand3D(vec3(intX + 1, intY + 1, intZ + 1)); + + float i1 = mix(v1, v2, fractX); + float i2 = mix(v3, v4, fractX); + + float i3 = mix(v5, v6, fractX); + float i4 = mix(v7, v8, fractX); + + float i5 = mix(i1, i2, fractY); + float i6 = mix(i3, i4, fractY); + + float i7 = mix(i5, i6, fractZ); + + return i7; +} + +float fbm(float x, float y, float z) { + float total = 0.0; + float persistence = 0.5; + int octaves = 8; + + for(int i = 1; i <= octaves; i++) { + float freq = pow(2.0, float(i)); + float amp = pow(persistence, float(i)); + + total += interpNoise3D(x * freq, + y * freq, + z * freq) * amp; + } + return total; +} + +vec3 modifyPoint(vec3 p) { + float n = 1.0 - fbm(p.x, p.y, p.z); + n = gain(n, 0.45 + float(u_Deform) * 0.002); + vec3 extrusion = vec3(2.0 * n*n*n); + if (n < -0.8) { + p += 0.55 * extrusion * extrusion * vs_Nor.xyz; + } else { + p += 0.5 * vs_Nor.xyz; + } + return p; +} + +void main() +{ + fs_Col = vs_Col; // Pass the vertex colors to the fragment shader for interpolation + fs_Pos = vs_Pos; + mat3 invTranspose = mat3(u_ModelInvTr); + + + vec4 newPos = vec4(modifyPoint(vs_Pos.xyz), 1.0); + + vec3 tangent = cross(vec3(0.0, 1.0, 0.0), vs_Nor.xyz); + vec3 bitangent = cross(vs_Nor.xyz, tangent); + float alpha = 0.001; + vec3 p1 = vs_Pos.xyz + alpha * tangent; + vec3 p2 = vs_Pos.xyz + alpha * bitangent; + vec3 p3 = vs_Pos.xyz - alpha * tangent; + vec3 p4 = vs_Pos.xyz - alpha * bitangent; + + p1 = modifyPoint(p1); + p2 = modifyPoint(p2); + p3 = modifyPoint(p3); + p4 = modifyPoint(p4); + + fs_Nor = vec4(cross(p2 - p4, p1 - p3), 1.0); + fs_Nor = vec4(invTranspose * vec3(fs_Nor), 0.0); // Pass the vertex normals to the fragment shader for interpolation. + // Transform the geometry's normals by the inverse transpose of the + // model matrix. This is necessary to ensure the normals remain + // perpendicular to the surface after the surface is transformed by + // the model matrix. + + + vec4 modelposition = u_Model * newPos; // Temporarily store the transformed vertex positions for use below + fs_LightVec = lightPos - modelposition; // Compute the direction in which the light source lies + + gl_Position = u_ViewProj * modelposition;// gl_Position is a built-in variable of OpenGL which is + // used to render the final positions of the geometry's vertice +}