diff --git a/README.md b/README.md index c636328..915f1c0 100644 --- a/README.md +++ b/README.md @@ -5,6 +5,21 @@

(source: Ken Perlin)

+## Process +-------------------- +In this project I created a cube object and used a mix of fbm and perlin noise to color the cube in a fragment shader and sin functions with Time to modifuly the shape of the cube in a new fragment shader. I also modified the gui by adding a color variable that controls the main color used in the noise function. + +![](images/cube1.png) + +![](images/cube2.png) + +![](images/cube3.png) + +Here is the link to my project! + +https://emmaholthouser16.github.io/hw00-webgl-intro/ + + ## Objective - Check that the tools and build configuration we will be using for the class works. - Start learning Typescript and WebGL2 diff --git a/images/cube1.png b/images/cube1.png new file mode 100644 index 0000000..cc6311c Binary files /dev/null and b/images/cube1.png differ diff --git a/images/cube2.png b/images/cube2.png new file mode 100644 index 0000000..2afbb72 Binary files /dev/null and b/images/cube2.png differ diff --git a/images/cube3.png b/images/cube3.png new file mode 100644 index 0000000..4829810 Binary files /dev/null and b/images/cube3.png differ diff --git a/src/geometry/Cube.ts b/src/geometry/Cube.ts new file mode 100644 index 0000000..516d467 --- /dev/null +++ b/src/geometry/Cube.ts @@ -0,0 +1,99 @@ +import {vec3, vec4} from 'gl-matrix'; +import Drawable from '../rendering/gl/Drawable'; +import {gl} from '../globals'; + +class Cube extends Drawable { + indices: Uint32Array; + positions: Float32Array; + normals: Float32Array; + center: vec4; + + constructor(center: vec3) { + super(); // Call the constructor of the super class. This is required. + this.center = vec4.fromValues(center[0], center[1], center[2], 1); + } + + create() { + + //indices for individual triangles + this.indices = new Uint32Array([0, 1, 2, + 0, 2, 3, + 4, 5, 6, + 4, 6, 7, + 8, 9, 10, + 8, 10, 11, + 12, 13, 14, + 12, 14, 15, + 16, 17, 18, + 16, 18, 19, + 20, 21, 22, + 20, 22, 23]); + this.normals = new Float32Array([0, 0, 1, 0, + 0, 0, 1, 0, + 0, 0, 1, 0, + 0, 0, 1, 0, + -1, 0, 0, 0, + -1, 0, 0, 0, + -1, 0, 0, 0, + -1, 0, 0, 0, + 0, 1, 0, 0, + 0, 1, 0, 0, + 0, 1, 0, 0, + 0, 1, 0, 0, + 1, 0, 0, 0, + 1, 0, 0, 0, + 1, 0, 0, 0, + 1, 0, 0, 0, + 0, -1, 0, 0, + 0, -1, 0, 0, + 0, -1, 0, 0, + 0, -1, 0, 0, + 0, 0, -1, 0, + 0, 0, -1, 0, + 0, 0, -1, 0, + 0, 0, -1, 0]); + this.positions = new Float32Array([-1, -1, 0, 1, + 1, -1, 0, 1, + 1, 1, 0, 1, + -1, 1, 0, 1, + -1, -1, -2, 1, //left side + -1, 1, -2, 1, + -1, 1, 0, 1, + -1, -1, 0, 1, + -1, 1, -2, 1, //top side + 1, 1, -2, 1, + 1, 1, 0, 1, + -1, 1, 0, 1, //right side + 1, -1, 0, 1, + 1, -1, -2, 1, + 1, 1, -2, 1, + 1, 1, 0, 1, //bottom side + -1, -1, -2, 1, + 1, -1, -2, 1, + 1, -1, 0, 1, + -1, -1, 0, 1, //back side + 1, -1, -2, 1, + -1, -1, -2, 1, + -1, 1, -2, 1, + 1, 1, -2, 1]); + + this.generateIdx(); + this.generatePos(); + this.generateNor(); + + this.count = this.indices.length; + gl.bindBuffer(gl.ELEMENT_ARRAY_BUFFER, this.bufIdx); + gl.bufferData(gl.ELEMENT_ARRAY_BUFFER, this.indices, gl.STATIC_DRAW); + + gl.bindBuffer(gl.ARRAY_BUFFER, this.bufNor); + gl.bufferData(gl.ARRAY_BUFFER, this.normals, gl.STATIC_DRAW); + + gl.bindBuffer(gl.ARRAY_BUFFER, this.bufPos); + gl.bufferData(gl.ARRAY_BUFFER, this.positions, gl.STATIC_DRAW); + + console.log(`Created Cube`); + } +}; + +export default Cube; + diff --git a/src/main.ts b/src/main.ts index 65a9461..6e2e57f 100644 --- a/src/main.ts +++ b/src/main.ts @@ -1,8 +1,10 @@ import {vec3} from 'gl-matrix'; +import {vec4} from 'gl-matrix'; const Stats = require('stats-js'); import * as DAT from 'dat.gui'; import Icosphere from './geometry/Icosphere'; import Square from './geometry/Square'; +import Cube from './geometry/Cube'; import OpenGLRenderer from './rendering/gl/OpenGLRenderer'; import Camera from './Camera'; import {setGL} from './globals'; @@ -13,20 +15,39 @@ import ShaderProgram, {Shader} from './rendering/gl/ShaderProgram'; const controls = { tesselations: 5, 'Load Scene': loadScene, // A function pointer, essentially + colorR: 1, + colorG: 0, + colorB: 0, + color3: [0, 128, 255, 1], + moonSize: .2, + lightColor: [255, 255, 255, 1], + shadowColor: [255, 255, 255, 1], }; let icosphere: Icosphere; +let moon: Icosphere; let square: Square; -let prevTesselations: number = 5; - +let cube: Cube; +let prevTesselations: number = 6; +let iTime: number = 0.0; +let prevMoon: number = 0.2; +//let lightColo function loadScene() { icosphere = new Icosphere(vec3.fromValues(0, 0, 0), 1, controls.tesselations); icosphere.create(); square = new Square(vec3.fromValues(0, 0, 0)); square.create(); + cube = new Cube(vec3.fromValues(0, 0, 0)); + cube.create(); + moon = new Icosphere(vec3.fromValues(3.5, 0, 0), controls.moonSize, 7.0); + moon.create(); + + } function main() { + + //this.iTime = 0.0; // Initial display for framerate const stats = Stats(); stats.setMode(0); @@ -39,6 +60,10 @@ function main() { const gui = new DAT.GUI(); gui.add(controls, 'tesselations', 0, 8).step(1); gui.add(controls, 'Load Scene'); + gui.addColor(controls, "color3"); + gui.add(controls, 'moonSize'); + gui.addColor(controls, "lightColor") + gui.addColor(controls, "shadowColor") // get canvas and webgl context const canvas = document.getElementById('canvas'); @@ -64,8 +89,25 @@ function main() { new Shader(gl.FRAGMENT_SHADER, require('./shaders/lambert-frag.glsl')), ]); + const cool = new ShaderProgram([ + //new Shader(gl.VERTEX_SHADER, require('./shaders/cool-vert.glsl')), + // new Shader(gl.FRAGMENT_SHADER, require('./shaders/cool-frag.glsl')), + new Shader(gl.VERTEX_SHADER, require('./shaders/cool-vert.glsl')), + new Shader(gl.FRAGMENT_SHADER, require('./shaders/cool-frag.glsl')), + ]); + + const planet = new ShaderProgram([ + new Shader(gl.VERTEX_SHADER, require('./shaders/planet-vert.glsl')), + new Shader(gl.FRAGMENT_SHADER, require('./shaders/cool-frag.glsl')), + ]); + + const moonShader = new ShaderProgram([ + new Shader(gl.VERTEX_SHADER, require('./shaders/moon-vert.glsl')), + new Shader(gl.FRAGMENT_SHADER, require('./shaders/moon-frag.glsl')), + ]) // This function will be called every frame function tick() { + iTime += 1; camera.update(); stats.begin(); gl.viewport(0, 0, window.innerWidth, window.innerHeight); @@ -76,9 +118,23 @@ function main() { icosphere = new Icosphere(vec3.fromValues(0, 0, 0), 1, prevTesselations); icosphere.create(); } - renderer.render(camera, lambert, [ + if(controls.moonSize != prevMoon) + { + prevMoon = controls.moonSize; + moon = new Icosphere(vec3.fromValues(3.5, 0, 0), controls.moonSize, 7.0); + moon.create(); + + } + let R = controls.colorB; + let color = vec4.fromValues((controls.color3[0] / 255), (controls.color3[1] / 255), (controls.color3[2] / 255), 1); + let light = vec4.fromValues((controls.lightColor[0] / 255), (controls.lightColor[1] / 255), (controls.lightColor[2] / 255), 1); + let shadow = vec4.fromValues((controls.shadowColor[0] / 255), (controls.shadowColor[1] / 255), (controls.shadowColor[2] / 255), 1); + renderer.render(iTime, color, camera, planet, light, shadow,[ icosphere, - // square, + + ]); + renderer.render(iTime, color, camera, moonShader, light, shadow,[ + moon, ]); stats.end(); diff --git a/src/rendering/gl/OpenGLRenderer.ts b/src/rendering/gl/OpenGLRenderer.ts index 7e527c2..b7ab846 100644 --- a/src/rendering/gl/OpenGLRenderer.ts +++ b/src/rendering/gl/OpenGLRenderer.ts @@ -5,8 +5,11 @@ import {gl} from '../../globals'; import ShaderProgram from './ShaderProgram'; // In this file, `gl` is accessible because it is imported above + class OpenGLRenderer { + iTime: number; constructor(public canvas: HTMLCanvasElement) { + this.iTime = 0; } setClearColor(r: number, g: number, b: number, a: number) { @@ -22,17 +25,19 @@ class OpenGLRenderer { gl.clear(gl.COLOR_BUFFER_BIT | gl.DEPTH_BUFFER_BIT); } - render(camera: Camera, prog: ShaderProgram, drawables: Array) { + render(newTime: number, setColor: vec4, camera: Camera, prog: ShaderProgram, lightCol: vec4, shadowCol: vec4, drawables: Array) { let model = mat4.create(); let viewProj = mat4.create(); - let color = vec4.fromValues(1, 0, 0, 1); - + let color = vec4.fromValues(1, 1, 0, 1); mat4.identity(model); mat4.multiply(viewProj, camera.projectionMatrix, camera.viewMatrix); prog.setModelMatrix(model); prog.setViewProjMatrix(viewProj); - prog.setGeometryColor(color); - + prog.setGeometryColor(setColor); + prog.setTime(newTime); + prog.setLightCol(lightCol); + prog.setShadowCol(shadowCol); + prog.setCamPos(camera.controls.eye); for (let drawable of drawables) { prog.draw(drawable); } diff --git a/src/rendering/gl/ShaderProgram.ts b/src/rendering/gl/ShaderProgram.ts index 67fef40..ccdbb6d 100644 --- a/src/rendering/gl/ShaderProgram.ts +++ b/src/rendering/gl/ShaderProgram.ts @@ -1,4 +1,4 @@ -import {vec4, mat4} from 'gl-matrix'; +import {vec4, mat4, vec3} from 'gl-matrix'; import Drawable from './Drawable'; import {gl} from '../../globals'; @@ -29,6 +29,10 @@ class ShaderProgram { unifModelInvTr: WebGLUniformLocation; unifViewProj: WebGLUniformLocation; unifColor: WebGLUniformLocation; + unifLight: WebGLUniformLocation; + unifShadow: WebGLUniformLocation; + unifTime: WebGLUniformLocation; + unifCamPos: WebGLUniformLocation; constructor(shaders: Array) { this.prog = gl.createProgram(); @@ -48,6 +52,10 @@ class ShaderProgram { this.unifModelInvTr = gl.getUniformLocation(this.prog, "u_ModelInvTr"); this.unifViewProj = gl.getUniformLocation(this.prog, "u_ViewProj"); this.unifColor = gl.getUniformLocation(this.prog, "u_Color"); + this.unifTime = gl.getUniformLocation(this.prog, "u_Time"); + this.unifCamPos = gl.getUniformLocation(this.prog, "u_CamPos"); + this.unifLight = gl.getUniformLocation(this.prog, "u_Light"); + this.unifShadow = gl.getUniformLocation(this.prog, "u_Shadow"); } use() { @@ -85,6 +93,39 @@ class ShaderProgram { } } + setTime(t: number) { + this.use; + if(this.unifTime != -1) + { + gl.uniform1f(this.unifTime, t); + } + } + + setCamPos(pos: vec3) + { + this.use; + if(this.unifCamPos != -1) + { + gl.uniform3fv(this.unifCamPos, pos); + } + } + + setLightCol(col: vec4) + { + this.use(); + if (this.unifColor !== -1) { + gl.uniform4fv(this.unifLight, col); + } + } + + setShadowCol(col: vec4) + { + this.use(); + if (this.unifColor !== -1) { + gl.uniform4fv(this.unifShadow, col); + } + } + draw(d: Drawable) { this.use(); diff --git a/src/shaders/cool-frag.glsl b/src/shaders/cool-frag.glsl new file mode 100644 index 0000000..09a1052 --- /dev/null +++ b/src/shaders/cool-frag.glsl @@ -0,0 +1,243 @@ +#version 300 es + +// This is a fragment shader. If you've opened this file first, please +// open and read lambert.vert.glsl before reading on. +// Unlike the vertex shader, the fragment shader actually does compute +// the shading of geometry. For every pixel in your program's output +// screen, the fragment shader is run for every bit of geometry that +// particular pixel overlaps. By implicitly interpolating the position +// data passed into the fragment shader by the vertex shader, the fragment shader +// can compute what color to apply to its pixel based on things like vertex +// position, light position, and vertex color. +precision highp float; + +uniform vec4 u_Color; // The color with which to render this instance of geometry. +uniform float u_Time; +uniform vec4 u_Light; +uniform vec4 u_Shadow; +//uniform vec3 u_CamPos; +// These are the interpolated values out of the rasterizer, so you can't know +// their specific values without knowing the vertices that contributed to them +in vec4 fs_Nor; +in vec4 fs_Pos; +in vec4 fs_LightVec; +in vec4 fs_Col; +in vec4 old; +in vec4 fs_CamPos; +out vec4 out_Col; // This is the final output color that you will see on your + // screen for the pixel that is currently being processed. +float random1( vec3 p ) { + return fract(sin(dot(p, vec3(127.1, 311.7, 191.999))) * 43758.5453); +} + + +float mySmootherStep(float a, float b, float t) { + t = t*t*t*(t*(t*6.0 - 15.0) + 10.0); + return mix(a, b, t); +} + +float interpNoise3D1(vec3 p) { + vec3 pFract = fract(p); + float llb = random1(floor(p)); + float lrb = random1(floor(p) + vec3(1.0,0.0,0.0)); + float ulb = random1(floor(p) + vec3(0.0,1.0,0.0)); + float urb = random1(floor(p) + vec3(1.0,1.0,0.0)); + + float llf = random1(floor(p) + vec3(0.0,0.0,1.0)); + float lrf = random1(floor(p) + vec3(1.0,0.0,1.0)); + float ulf = random1(floor(p) + vec3(0.0,1.0,1.0)); + float urf = random1(floor(p) + vec3(1.0,1.0,1.0)); + + float lerpXLB = mySmootherStep(llb, lrb, pFract.x); + float lerpXHB = mySmootherStep(ulb, urb, pFract.x); + float lerpXLF = mySmootherStep(llf, lrf, pFract.x); + float lerpXHF = mySmootherStep(ulf, urf, pFract.x); + + float lerpYB = mySmootherStep(lerpXLB, lerpXHB, pFract.y); + float lerpYF = mySmootherStep(lerpXLF, lerpXHF, pFract.y); + + return mySmootherStep(lerpYB, lerpYF, pFract.z); +} + +vec3 random3( vec3 p ) { + return fract(sin(vec3(dot(p,vec3(127.1, 311.7, 191.999)), + dot(p,vec3(269.5, 183.3, 765.54)), + dot(p, vec3(420.69, 631.2,109.21)))) + *43758.5453); +} + + +float surflet(vec3 p, vec3 gridPoint) { + // Compute the distance between p and the grid point along each axis, and warp it with a + // quintic function so we can smooth our cells + vec3 t2 = abs(p - gridPoint); + vec3 pow1 = vec3(pow(t2.x, 5.f), pow(t2.y, 5.f), pow(t2.z, 5.f)); + vec3 pow2 = vec3(pow(t2.x, 4.f), pow(t2.y, 4.f), pow(t2.z, 4.f)); + vec3 pow3 = vec3(pow(t2.x, 3.f), pow(t2.y, 3.f), pow(t2.z, 3.f)); + vec3 t = vec3(1.f) - 6.f * pow1 + + 15.f * pow2 + - 10.f * pow3; + // Get the random vector for the grid point (assume we wrote a function random2 + // that returns a vec2 in the range [0, 1]) + vec3 gradient = random3(gridPoint) * 2.f - vec3(1,1,1); + // Get the vector from the grid point to P + vec3 diff = p - gridPoint; + + float height = dot(diff, gradient); + // Scale our height field (i.e. reduce it) by our polynomial falloff function + return height * t.x * t.y * t.z; +} + + +//PERLIN NOISE + +float perlinNoise3D(vec3 p) { + float surfletSum = 0.f; + // Iterate over the four integer corners surrounding uv + for(int dx = 0; dx <= 1; ++dx) { + for(int dy = 0; dy <= 1; ++dy) { + for (int dz = 0; dz <= 1; ++dz) { + surfletSum += surflet(p, floor(p) + vec3(dx, dy, dz)); + } + } + } + return surfletSum; +} + + +float fbm(vec3 newP, float octaves) { + float amp = 0.5; + float freq = 4.0; + float sum = 0.0; + float maxSum = 0.0; + for(float i = 0.0; i < 10.0; ++i) { + if(i == octaves) + break; + maxSum += amp; + sum += interpNoise3D1(newP * freq) * amp; + amp *= 0.5; + freq *= 2.0; + } + return (sum / maxSum); +} +vec3 palette(float t, vec3 a, vec3 b, vec3 c, vec3 d ) +{ + return a + b * cos( 6.28318*(c*t+d) ); +} + + float remapTo01(float min, float max, float t) + { + float difference = max - min; + float scaleFactor = 1.0 / difference; + t *= scaleFactor; + t -= (min * scaleFactor); + return t; + } + +void main() +{ + //color options + vec4 blue1 = vec4(-0.171, 0.688, 0.868, 1.f); + vec4 blue2 = vec4(-0.072, .1584, -0.132, 1.f); + vec4 blue3 = vec4(0.748, 1.508, 1.578, 1.f); + vec4 blue4 = vec4(-0.322, 0.498, 0.198, 1.f); + float fbmBlue = perlinNoise3D(fs_Pos.xyz * 1.3) * cos(u_Time * .005); + vec3 oceanCol = palette(fbmBlue, blue1.xyz, blue2.xyz, blue3.xyz, blue4.xyz); + + vec4 green1 = vec4(0.158, 0.508, -0.332, 1.f); + vec4 green2 = vec4(0.478, 0.188, 0.638, 1.f); + vec4 green3 = vec4(1.308, 0.948, 1.000, 1.f); + vec4 green4 = vec4(-0.692, 0.328, 1.178, 1.f); + + float greenFBM = fbm(fs_Pos.xyz * .4, 3.0); + float g = clamp(length(fs_Pos.xyz) / 1.2, 0.0, 1.0); + + vec3 grassCol = palette(greenFBM, green1.xyz, green2.xyz, green3.xyz, green4.xyz); + // Material base color (before shading) + vec4 greenColor = vec4(grassCol, 1.f); + //= vec4(104.f / 255.f, 163.f /255.f, 59.f / 255.f, 1.0); + vec4 blueColor = vec4(oceanCol, 1.f); + vec4 greyMt = vec4(.3, .3, .3, 1.f); + vec4 diffuseColor = blueColor; + + + + float diffuseTerm = 0.f; + vec4 av = normalize(fs_LightVec) + normalize(fs_CamPos); + vec4 avg = av / 2.0; + float specularIntensity = 0.f; + + + vec4 cam = (fs_CamPos - fs_Pos); + diffuseTerm = dot(normalize(fs_Nor), normalize(cam)); + diffuseTerm += dot(normalize(fs_Nor), normalize(fs_LightVec)); + + if (g > 0.98) + { + diffuseColor = vec4(1.f, 1.f, 1.f, 1.f); + vec4 cam = (fs_CamPos - fs_Pos); + diffuseTerm = dot(normalize(fs_Nor), normalize(fs_LightVec)); + + specularIntensity = max(pow(dot(normalize(avg), normalize(fs_Nor)), 8.f), 0.f); + + } + + else if (g > 0.92) + { + + diffuseColor = greyMt; + diffuseTerm = dot(normalize(fs_Nor), normalize(fs_LightVec)); + specularIntensity = 0.f; + } + + else if (g >= 0.89) + { + float newG = remapTo01(.89, .92, g); + + vec4 green = vec4(0.0, 1.0, 0.0, 1.0); + vec4 white = vec4(1.0); + diffuseColor = mix(greyMt, greenColor, 1.0-newG); + + diffuseTerm = dot(normalize(fs_Nor), normalize(fs_LightVec)); + specularIntensity = 0.f; + } + else if(g > .855) + { + diffuseColor = greenColor; + + diffuseTerm = dot(normalize(fs_Nor), normalize(fs_LightVec)); + specularIntensity = 0.f; + } + else if (g > 0.845) + { + diffuseColor = vec4(207.f / 255.f, 182.f / 255.f, 70.f / 255.f, 1.f); + diffuseTerm = dot(normalize(fs_Nor), normalize(fs_LightVec)); + specularIntensity = 0.f; + } + + + + // + if(diffuseTerm < 0.f) + { + diffuseColor = diffuseColor * u_Shadow; + } + else + { + diffuseColor = diffuseColor * u_Light; + } + + vec3 diffuse3 = vec3(fs_Pos.x, fs_Pos.y, fs_Pos.z); + + // Avoid negative lighting values + // diffuseTerm = clamp(diffuseTerm, 0, 1); + + float ambientTerm = 0.2; + diffuseTerm = clamp(diffuseTerm, 0.f, 1.f); + float lightIntensity = diffuseTerm + ambientTerm + specularIntensity; //Add a small float value to the color multiplier + //to simulate ambient lighting. This ensures that faces that are not + //lit by our point light are not completely black. + + // Compute final shaded color + out_Col = vec4(diffuseColor.rgb * lightIntensity, diffuseColor.a); +} diff --git a/src/shaders/cool-vert.glsl b/src/shaders/cool-vert.glsl new file mode 100644 index 0000000..d41a860 --- /dev/null +++ b/src/shaders/cool-vert.glsl @@ -0,0 +1,65 @@ +#version 300 es + +//This is a vertex shader. While it is called a "shader" due to outdated conventions, this file +//is used to apply matrix transformations to the arrays of vertex data passed to it. +//Since this code is run on your GPU, each vertex is transformed simultaneously. +//If it were run on your CPU, each vertex would have to be processed in a FOR loop, one at a time. +//This simultaneous transformation allows your program to run much faster, especially when rendering +//geometry with millions of vertices. + +uniform mat4 u_Model; // The matrix that defines the transformation of the + // object we're rendering. In this assignment, + // this will be the result of traversing your scene graph. + +uniform mat4 u_ModelInvTr; // The inverse transpose of the model matrix. + // This allows us to transform the object's normals properly + // if the object has been non-uniformly scaled. + +uniform mat4 u_ViewProj; // The matrix that defines the camera's transformation. + // We've written a static matrix for you to use for HW2, +uniform float u_Time; // but in HW3 you'll have to generate one yourself + +in vec4 vs_Pos; // The array of vertex positions passed to the shader + +in vec4 vs_Nor; // The array of vertex normals passed to the shader + +in vec4 vs_Col; // The array of vertex colors passed to the shader. +out vec4 old; +out vec4 fs_Nor; // The array of normals that has been transformed by u_ModelInvTr. This is implicitly passed to the fragment shader. +out vec4 fs_LightVec; // The direction in which our virtual light lies, relative to each vertex. This is implicitly passed to the fragment shader. +out vec4 fs_Col; +out vec4 fs_Pos; // The color of each vertex. This is implicitly passed to the fragment shader. +out vec4 fs_CamPos; +const vec4 lightPos = vec4(5, 5, 3, 1); //The position of our virtual light, which is used to compute the shading of + //the geometry in the fragment shader. + +void main() +{ + fs_Col = vs_Col; // Pass the vertex colors to the fragment shader for interpolation + fs_Pos = vs_Pos; + mat3 invTranspose = mat3(u_ModelInvTr); + fs_Nor = vec4(invTranspose * vec3(vs_Nor), 0); // Pass the vertex normals to the fragment shader for interpolation. + float t = u_Time * 0.004; // Transform the geometry's normals by the inverse transpose of the + t = sin(t); + //cubic interpolation + t = t * t * (3.0 - 2.0 * t); // model matrix. This is necessary to ensure the normals remain + + + // perpendicular to the surface after the surface is transformed by + // the model matrix. + + + vec4 modelposition = u_Model * vs_Pos; // Temporarily store the transformed vertex positions for use below + vec4 camera = vec4(0.f, 0.f, 0.f, 1.f); + vec4 modelposition2 = vec4(.5, 0, 0, 0); + // modelposition2.xyz = modelposition.xyz + (sin(modelposition.xyz) * 1.05) + (sin(modelposition.xyz) * .25); + modelposition2.yz = modelposition.yz + (cos(modelposition.yz) * .35); + //modelposition2 + modelposition2.y = modelposition2.y + (sin(modelposition2.y) * .20); + modelposition.xyz = mix(modelposition.xyz, modelposition2.xyz, t); + gl_Position = u_ViewProj * modelposition; + fs_LightVec = lightPos - modelposition; // Compute the direction in which the light source lies + + //gl_Position = u_ViewProj * modelposition;// gl_Position is a built-in variable of OpenGL which is + // used to render the final positions of the geometry's vertices +} diff --git a/src/shaders/moon-frag.glsl b/src/shaders/moon-frag.glsl new file mode 100644 index 0000000..c84e83b --- /dev/null +++ b/src/shaders/moon-frag.glsl @@ -0,0 +1,173 @@ +#version 300 es + +// This is a fragment shader. If you've opened this file first, please +// open and read lambert.vert.glsl before reading on. +// Unlike the vertex shader, the fragment shader actually does compute +// the shading of geometry. For every pixel in your program's output +// screen, the fragment shader is run for every bit of geometry that +// particular pixel overlaps. By implicitly interpolating the position +// data passed into the fragment shader by the vertex shader, the fragment shader +// can compute what color to apply to its pixel based on things like vertex +// position, light position, and vertex color. +precision highp float; + +uniform vec4 u_Color; // The color with which to render this instance of geometry. +uniform float u_Time; +uniform vec4 u_Light; +uniform vec4 u_Shadow; +// These are the interpolated values out of the rasterizer, so you can't know +// their specific values without knowing the vertices that contributed to them +in vec4 fs_Nor; +in vec4 fs_Pos; +in vec4 fs_LightVec; +in vec4 fs_Col; + +out vec4 out_Col; // This is the final output color that you will see on your + // screen for the pixel that is currently being processed. +float random1( vec3 p ) { + return fract(sin(dot(p, vec3(127.1, 311.7, 191.999))) * 43758.5453); +} + + +float mySmootherStep(float a, float b, float t) { + t = t*t*t*(t*(t*6.0 - 15.0) + 10.0); + return mix(a, b, t); +} + +float interpNoise3D1(vec3 p) { + vec3 pFract = fract(p); + float llb = random1(floor(p)); + float lrb = random1(floor(p) + vec3(1.0,0.0,0.0)); + float ulb = random1(floor(p) + vec3(0.0,1.0,0.0)); + float urb = random1(floor(p) + vec3(1.0,1.0,0.0)); + + float llf = random1(floor(p) + vec3(0.0,0.0,1.0)); + float lrf = random1(floor(p) + vec3(1.0,0.0,1.0)); + float ulf = random1(floor(p) + vec3(0.0,1.0,1.0)); + float urf = random1(floor(p) + vec3(1.0,1.0,1.0)); + + float lerpXLB = mySmootherStep(llb, lrb, pFract.x); + float lerpXHB = mySmootherStep(ulb, urb, pFract.x); + float lerpXLF = mySmootherStep(llf, lrf, pFract.x); + float lerpXHF = mySmootherStep(ulf, urf, pFract.x); + + float lerpYB = mySmootherStep(lerpXLB, lerpXHB, pFract.y); + float lerpYF = mySmootherStep(lerpXLF, lerpXHF, pFract.y); + + return mySmootherStep(lerpYB, lerpYF, pFract.z); +} + +vec3 random3( vec3 p ) { + return fract(sin(vec3(dot(p,vec3(127.1, 311.7, 191.999)), + dot(p,vec3(269.5, 183.3, 765.54)), + dot(p, vec3(420.69, 631.2,109.21)))) + *43758.5453); +} + + +float surflet(vec3 p, vec3 gridPoint) { + // Compute the distance between p and the grid point along each axis, and warp it with a + // quintic function so we can smooth our cells + vec3 t2 = abs(p - gridPoint); + vec3 pow1 = vec3(pow(t2.x, 5.f), pow(t2.y, 5.f), pow(t2.z, 5.f)); + vec3 pow2 = vec3(pow(t2.x, 4.f), pow(t2.y, 4.f), pow(t2.z, 4.f)); + vec3 pow3 = vec3(pow(t2.x, 3.f), pow(t2.y, 3.f), pow(t2.z, 3.f)); + vec3 t = vec3(1.f) - 6.f * pow1 + + 15.f * pow2 + - 10.f * pow3; + // Get the random vector for the grid point (assume we wrote a function random2 + // that returns a vec2 in the range [0, 1]) + vec3 gradient = random3(gridPoint) * 2.f - vec3(1,1,1); + // Get the vector from the grid point to P + vec3 diff = p - gridPoint; + + float height = dot(diff, gradient); + // Scale our height field (i.e. reduce it) by our polynomial falloff function + return height * t.x * t.y * t.z; +} + + +//PERLIN NOISE + +float perlinNoise3D(vec3 p) { + float surfletSum = 0.f; + // Iterate over the four integer corners surrounding uv + for(int dx = 0; dx <= 1; ++dx) { + for(int dy = 0; dy <= 1; ++dy) { + for (int dz = 0; dz <= 1; ++dz) { + surfletSum += surflet(p, floor(p) + vec3(dx, dy, dz)); + } + } + } + return surfletSum; + //* sin(u_Time * 0.04); +} + + +float fbm(vec3 newP, float octaves) { + float amp = 0.5; + float freq = 4.0; + float sum = 0.0; + float maxSum = 0.0; + for(float i = 0.0; i < 10.0; ++i) { + if(i == octaves) + break; + maxSum += amp; + sum += interpNoise3D1(newP * freq) * amp; + amp *= 0.5; + freq *= 2.0; + } + return (sum / maxSum); +} +vec3 palette(float t, vec3 a, vec3 b, vec3 c, vec3 d ) +{ + return a + b * cos( 6.28318*(c*t+d) ); +} + + float remapTo01(float min, float max, float t) + { + float difference = max - min; + float scaleFactor = 1.0 / difference; + t *= scaleFactor; + t -= (min * scaleFactor); + return t; + } +void main() +{ + //color options + vec3 purple1 = vec3(0.308, 0.390, 0.390); + vec3 purple2 = vec3(0.588, 0.398, 0.688); + vec3 purple3 = vec3(0.458, 0.948, 0.538); + vec3 purple4 = vec3(-1.332, 0.333, 0.667); + + vec3 pos = vec3(fs_Pos); + pos.x = remapTo01(0.f, .2, pos.x); + pos.y = remapTo01(0.f, .2, pos.y); + pos.z = remapTo01(0.f, .2, pos.z); + + float G = perlinNoise3D(pos * 1.1); + //float G = fbm(pos * .85, 2.0); + G = G / 2.0 + 1.0; + vec4 col = vec4(palette(G, purple1, purple2, purple3, purple4), 1.f); + + + // Calculate the diffuse term for Lambert shading + //vec3 newColor = mix(newColorA, newColorB, .8); + float diffuseTerm = dot(normalize(fs_Nor), normalize(fs_LightVec)); + if(diffuseTerm < 0.f) + { + col = col * u_Shadow; + } + else + { + col = col * u_Light; + } + float ambientTerm = 0.2; + diffuseTerm = clamp(diffuseTerm, 0.f, 1.f); + float lightIntensity = diffuseTerm + ambientTerm; //Add a small float value to the color multiplier + //to simulate ambient lighting. This ensures that faces that are not + //lit by our point light are not completely black. + + // Compute final shaded color + out_Col = vec4(col.rgb * lightIntensity, 1.f); +} diff --git a/src/shaders/moon-vert.glsl b/src/shaders/moon-vert.glsl new file mode 100644 index 0000000..9bc2dc0 --- /dev/null +++ b/src/shaders/moon-vert.glsl @@ -0,0 +1,261 @@ +#version 300 es + +//This is a vertex shader. While it is called a "shader" due to outdated conventions, this file +//is used to apply matrix transformations to the arrays of vertex data passed to it. +//Since this code is run on your GPU, each vertex is transformed simultaneously. +//If it were run on your CPU, each vertex would have to be processed in a FOR loop, one at a time. +//This simultaneous transformation allows your program to run much faster, especially when rendering +//geometry with millions of vertices. + +uniform mat4 u_Model; // The matrix that defines the transformation of the + // object we're rendering. In this assignment, + // this will be the result of traversing your scene graph. + +uniform mat4 u_ModelInvTr; // The inverse transpose of the model matrix. + // This allows us to transform the object's normals properly + // if the object has been non-uniformly scaled. + +uniform mat4 u_ViewProj; // The matrix that defines the camera's transformation. + // We've written a static matrix for you to use for HW2, +uniform float u_Time; // but in HW3 you'll have to generate one yourself +uniform vec3 u_CamPos; +in vec4 vs_Pos; // The array of vertex positions passed to the shader + +in vec4 vs_Nor; // The array of vertex normals passed to the shader + +in vec4 vs_Col; // The array of vertex colors passed to the shader. +out vec4 old; +out vec4 fs_Nor; // The array of normals that has been transformed by u_ModelInvTr. This is implicitly passed to the fragment shader. +out vec4 fs_LightVec; // The direction in which our virtual light lies, relative to each vertex. This is implicitly passed to the fragment shader. +out vec4 fs_Col; +out vec4 fs_Pos; // The color of each vertex. This is implicitly passed to the fragment shader. +out vec4 fs_CamPos; +const vec4 lightPos = vec4(5, 5, 3, 1); //The position of our virtual light, which is used to compute the shading of + //the geometry in the fragment shader. + + +float random1( vec3 p ) { + return fract(sin(dot(p, vec3(127.1, 311.7, 191.999))) * 43758.5453); +} + +vec3 random3( vec3 p ) { + return fract(sin(vec3(dot(p,vec3(127.1, 311.7, 191.999)), + dot(p,vec3(269.5, 183.3, 765.54)), + dot(p, vec3(420.69, 631.2,109.21)))) + *43758.5453); +} + +float mySmootherStep(float a, float b, float t) { + t = t*t*t*(t*(t*6.0 - 15.0) + 10.0); + return mix(a, b, t); +} + +float interpNoise3D1(vec3 p) { + vec3 pFract = fract(p); + float llb = random1(floor(p)); + float lrb = random1(floor(p) + vec3(1.0,0.0,0.0)); + float ulb = random1(floor(p) + vec3(0.0,1.0,0.0)); + float urb = random1(floor(p) + vec3(1.0,1.0,0.0)); + + float llf = random1(floor(p) + vec3(0.0,0.0,1.0)); + float lrf = random1(floor(p) + vec3(1.0,0.0,1.0)); + float ulf = random1(floor(p) + vec3(0.0,1.0,1.0)); + float urf = random1(floor(p) + vec3(1.0,1.0,1.0)); + + float lerpXLB = mySmootherStep(llb, lrb, pFract.x); + float lerpXHB = mySmootherStep(ulb, urb, pFract.x); + float lerpXLF = mySmootherStep(llf, lrf, pFract.x); + float lerpXHF = mySmootherStep(ulf, urf, pFract.x); + + float lerpYB = mySmootherStep(lerpXLB, lerpXHB, pFract.y); + float lerpYF = mySmootherStep(lerpXLF, lerpXHF, pFract.y); + + return mySmootherStep(lerpYB, lerpYF, pFract.z); +} +float fbm(vec3 newP, float octaves) { + float amp = 0.5; + float freq = 6.0; + float sum = 0.0; + float maxSum = 0.0; + for(float i = 0.0; i < 10.0; ++i) { + if(i == octaves) + break; + maxSum += amp; + sum += interpNoise3D1(newP * freq) * amp; + amp *= 0.5; + freq *= 2.0; + } + return (sum / maxSum); +} + +//worley noise +float WorleyNoise(vec3 pos) +{ + pos *= 3.0; + vec3 uvInt = floor(pos); + vec3 uvFract = fract(pos); + float minDist = 1.0; + vec3 closeOne; + for(int z = -1; z <= 1; z++) + { + for(int y = -1; y <= 1; ++y) + { + for(int x = -1; x <= 1; ++x) + { + vec3 neighbor = vec3(float(x), float(y), float(z)); + vec3 point = random3(uvInt + neighbor); + vec3 diff = neighbor + point - uvFract; + float dist = length(diff); + //finding the point that is the closest random point + if(dist < minDist) + { + //getting the point into the correct uv coordinate space + minDist = dist; + //closeOne = (uvInt + neighbor + point) / 8.0; + } + + + } + } + } + return minDist; + // return clamp(minDist, .1f, 1.f); + //return vec3(0.0, 0.f, 0.f); +} + +float surflet(vec3 p, vec3 gridPoint) { + // Compute the distance between p and the grid point along each axis, and warp it with a + // quintic function so we can smooth our cells + vec3 t2 = abs(p - gridPoint); + vec3 pow1 = vec3(pow(t2.x, 5.f), pow(t2.y, 5.f), pow(t2.z, 5.f)); + vec3 pow2 = vec3(pow(t2.x, 4.f), pow(t2.y, 4.f), pow(t2.z, 4.f)); + vec3 pow3 = vec3(pow(t2.x, 3.f), pow(t2.y, 3.f), pow(t2.z, 3.f)); + vec3 t = vec3(1.f) - 6.f * pow1 + + 15.f * pow2 + - 10.f * pow3; + // Get the random vector for the grid point (assume we wrote a function random2 + // that returns a vec2 in the range [0, 1]) + vec3 gradient = random3(gridPoint) * 2.f - vec3(1,1,1); + // Get the vector from the grid point to P + vec3 diff = p - gridPoint; + + float height = dot(diff, gradient); + // Scale our height field (i.e. reduce it) by our polynomial falloff function + return height * t.x * t.y * t.z; +} + +//float bias() +//PERLIN NOISE + +float perlinNoise3D(vec3 p) { + float surfletSum = 0.f; + // Iterate over the four integer corners surrounding uv + for(int dx = 0; dx <= 1; ++dx) { + for(int dy = 0; dy <= 1; ++dy) { + for (int dz = 0; dz <= 1; ++dz) { + surfletSum += surflet(p, floor(p) + vec3(dx, dy, dz)); + } + } + } + return surfletSum; +} + + + + float remapTo01(float min, float max, float t) + { + float difference = max - min; + float scaleFactor = 1.0 / difference; + t *= scaleFactor; + t -= (min * scaleFactor); + return t; + } + + +float getHeight(vec3 pos) +{ + + pos.x = remapTo01(0.f, .2, pos.x); + pos.y = remapTo01(0.f, .2, pos.y); + pos.z = remapTo01(0.f, .2, pos.z); + float height = fbm(pos * .5, 3.0); + //height = pow(height, 1.2); + height -= .1; + + height = clamp(height, .19f, .22f); + // height = height / .2; + return height; +} + +vec3 getNormal(float total) +{ + vec3 tangent = cross(vec3(0.f, 1.f, 0.f), fs_Nor.xyz); + vec3 bitangent = cross(fs_Nor.xyz, tangent); + + vec3 newPt1 = vs_Pos.xyz + tangent * .01; + vec3 newPt2 = vs_Pos.xyz + bitangent * .01; + + float a = total; + float b = getHeight(newPt1); + float c = getHeight(newPt2); + + vec3 aPt = vs_Pos.xyz + fs_Nor.xyz * a; + vec3 bPt = newPt1 + fs_Nor.xyz * b; + vec3 cPt = newPt2 + fs_Nor.xyz * c; + + vec3 final = cross(normalize(aPt - bPt), normalize(aPt - cPt)); + return final; + +} +void main() +{ + + + vec4 column1 = vec4(1.8, 0.f, 0.f, 0.f); + vec4 column2 = vec4(0.f, .8, 0.f, 0.f); + vec4 column3 = vec4(0.f, 0.f, .8, 0.f); + vec4 column4 = vec4(0.f, 0.f, 0.f, 1.f); + mat4 scaleMat; + scaleMat[0] = column1; + scaleMat[1] = column2; + scaleMat[2] = column3; + scaleMat[3] = column4; + fs_Col = vs_Col; // Pass the vertex colors to the fragment shader for interpolation + fs_Pos = vs_Pos; + old = vs_Pos; + mat3 invTranspose = mat3(u_ModelInvTr); + + fs_Nor = vec4(invTranspose * vec3(vs_Nor), 0); // Pass the vertex normals to the fragment shader for interpolation. + // model matrix. This is necessary to ensure the normals remain + + float total = getHeight(old.xyz); + fs_Pos += fs_Nor * total; + + + fs_Nor = vec4(getNormal(total), 0.f); + //fs_Pos += fs_Nor * vec4(w, w, w, 1); + // fs_Nor.x = WorleyNoise(fs_Pos.xyz + vec3(.0001, 0.f, 0.f)) - WorleyNoise(fs_Pos.xyz - vec3(.0001, 0.0, 0.0)); + // fs_Nor.y = WorleyNoise(fs_Pos.xyz + vec3(0.f, .0001, 0.f)) - WorleyNoise(fs_Pos.xyz - vec3(0.0, .0001, 0.0)); + // fs_Nor.z = WorleyNoise(fs_Pos.xyz + vec3(0.f, 0.f, .0001)) - WorleyNoise(fs_Pos.xyz - vec3(0.0, 0.0, .0001)); + + //vs_Pos.x = vs_Pos.x * p; + + // perpendicular to the surface after the surface is transformed by + // the model matrix. + + //glRotatef() + vec4 modelposition = u_Model * (fs_Pos); + //* cos(u_Time); // Temporarily store the transformed vertex positions for use below + + vec4 modelposition2 = vec4(.5, 0, 0, 0); + // modelposition2.xyz = modelposition.xyz + (sin(modelposition.xyz) * 1.05) + (sin(modelposition.xyz) * .25); + //modelposition2.yz = modelposition.yz + (cos(modelposition.yz) * .35); + //modelposition2 + //modelposition2.y = modelposition2.y + (sin(modelposition2.y) * .20); + //modelposition.xyz = mix(modelposition.xyz, modelposition2.xyz, t); + gl_Position = u_ViewProj * modelposition; + fs_LightVec = lightPos - modelposition; // Compute the direction in which the light source lies + fs_CamPos = vec4(u_CamPos, 0.f); + //gl_Position = u_ViewProj * modelposition;// gl_Position is a built-in variable of OpenGL which is + // used to render the final positions of the geometry's vertices +} diff --git a/src/shaders/planet-frag.glsl b/src/shaders/planet-frag.glsl new file mode 100644 index 0000000..6ef1025 --- /dev/null +++ b/src/shaders/planet-frag.glsl @@ -0,0 +1,158 @@ +#version 300 es + +// This is a fragment shader. If you've opened this file first, please +// open and read lambert.vert.glsl before reading on. +// Unlike the vertex shader, the fragment shader actually does compute +// the shading of geometry. For every pixel in your program's output +// screen, the fragment shader is run for every bit of geometry that +// particular pixel overlaps. By implicitly interpolating the position +// data passed into the fragment shader by the vertex shader, the fragment shader +// can compute what color to apply to its pixel based on things like vertex +// position, light position, and vertex color. +precision highp float; + +uniform vec4 u_Color; // The color with which to render this instance of geometry. +uniform float u_Time; +// These are the interpolated values out of the rasterizer, so you can't know +// their specific values without knowing the vertices that contributed to them +in vec4 fs_Nor; +in vec4 fs_Pos; +in vec4 fs_LightVec; +in vec4 fs_Col; + +out vec4 out_Col; // This is the final output color that you will see on your + // screen for the pixel that is currently being processed. +float random1( vec3 p ) { + return fract(sin(dot(p, vec3(127.1, 311.7, 191.999))) * 43758.5453); +} + + +float mySmootherStep(float a, float b, float t) { + t = t*t*t*(t*(t*6.0 - 15.0) + 10.0); + return mix(a, b, t); +} + +float interpNoise3D1(vec3 p) { + vec3 pFract = fract(p); + float llb = random1(floor(p)); + float lrb = random1(floor(p) + vec3(1.0,0.0,0.0)); + float ulb = random1(floor(p) + vec3(0.0,1.0,0.0)); + float urb = random1(floor(p) + vec3(1.0,1.0,0.0)); + + float llf = random1(floor(p) + vec3(0.0,0.0,1.0)); + float lrf = random1(floor(p) + vec3(1.0,0.0,1.0)); + float ulf = random1(floor(p) + vec3(0.0,1.0,1.0)); + float urf = random1(floor(p) + vec3(1.0,1.0,1.0)); + + float lerpXLB = mySmootherStep(llb, lrb, pFract.x); + float lerpXHB = mySmootherStep(ulb, urb, pFract.x); + float lerpXLF = mySmootherStep(llf, lrf, pFract.x); + float lerpXHF = mySmootherStep(ulf, urf, pFract.x); + + float lerpYB = mySmootherStep(lerpXLB, lerpXHB, pFract.y); + float lerpYF = mySmootherStep(lerpXLF, lerpXHF, pFract.y); + + return mySmootherStep(lerpYB, lerpYF, pFract.z); +} + +vec3 random3( vec3 p ) { + return fract(sin(vec3(dot(p,vec3(127.1, 311.7, 191.999)), + dot(p,vec3(269.5, 183.3, 765.54)), + dot(p, vec3(420.69, 631.2,109.21)))) + *43758.5453); +} + + +float surflet(vec3 p, vec3 gridPoint) { + // Compute the distance between p and the grid point along each axis, and warp it with a + // quintic function so we can smooth our cells + vec3 t2 = abs(p - gridPoint); + vec3 pow1 = vec3(pow(t2.x, 5.f), pow(t2.y, 5.f), pow(t2.z, 5.f)); + vec3 pow2 = vec3(pow(t2.x, 4.f), pow(t2.y, 4.f), pow(t2.z, 4.f)); + vec3 pow3 = vec3(pow(t2.x, 3.f), pow(t2.y, 3.f), pow(t2.z, 3.f)); + vec3 t = vec3(1.f) - 6.f * pow1 + + 15.f * pow2 + - 10.f * pow3; + // Get the random vector for the grid point (assume we wrote a function random2 + // that returns a vec2 in the range [0, 1]) + vec3 gradient = random3(gridPoint) * 2.f - vec3(1,1,1); + // Get the vector from the grid point to P + vec3 diff = p - gridPoint; + + float height = dot(diff, gradient); + // Scale our height field (i.e. reduce it) by our polynomial falloff function + return height * t.x * t.y * t.z; +} + + +//PERLIN NOISE + +float perlinNoise3D(vec3 p) { + float surfletSum = 0.f; + // Iterate over the four integer corners surrounding uv + for(int dx = 0; dx <= 1; ++dx) { + for(int dy = 0; dy <= 1; ++dy) { + for (int dz = 0; dz <= 1; ++dz) { + surfletSum += surflet(p, floor(p) + vec3(dx, dy, dz)); + } + } + } + return surfletSum * sin(u_Time * 0.04); +} + + +float fbm(vec3 newP, float octaves) { + float amp = 0.5; + float freq = 4.0; + float sum = 0.0; + float maxSum = 0.0; + for(float i = 0.0; i < 10.0; ++i) { + if(i == octaves) + break; + maxSum += amp; + sum += interpNoise3D1(newP * freq) * amp; + amp *= 0.5; + freq *= 2.0; + } + return (sum / maxSum); +} +vec3 palette(float t, vec3 a, vec3 b, vec3 c, vec3 d ) +{ + return a + b * cos( 6.28318*(c*t+d) ); +} +void main() +{ + //color options + vec3 a = vec3(0.5, 0.5, 0.5); + vec3 b = vec3(0.5, 0.5, 0.5); + vec3 newC = vec3(1.0, 1.0, 1.0); + vec3 d = vec3(0.00, 0.33, 0.67); + vec3 sA = vec3(0.5, 0.5, 0.5); + vec3 sB = vec3(0.5, 0.5, 0.5); + vec3 sC = vec3(2.0, 1.0, 0.0); + vec3 sD = vec3(0.50, 0.20, 0.25); + // Material base color (before shading) + vec4 diffuseColor = u_Color; + vec3 diffuse3 = vec3(fs_Pos.x, fs_Pos.y, fs_Pos.z); + float p = perlinNoise3D(diffuse3); + float fb = fbm(diffuse3, 2.0); + float f = sin(u_Time); + vec3 testColor = vec3(f, f, f); + float G = fbm(diffuse3, 4.0); + vec3 newColorA = palette(fb, diffuseColor.rgb, b, newC, d); + vec3 newColorB = palette(p, diffuseColor.rgb, b, newC, d); + // Calculate the diffuse term for Lambert shading + vec3 newColor = mix(newColorA, newColorB, .8); + float diffuseTerm = dot(normalize(fs_Nor), normalize(fs_LightVec)); + // Avoid negative lighting values + // diffuseTerm = clamp(diffuseTerm, 0, 1); + + float ambientTerm = 0.2; + diffuseTerm = clamp(diffuseTerm, 0.f, 1.f); + float lightIntensity = diffuseTerm + ambientTerm; //Add a small float value to the color multiplier + //to simulate ambient lighting. This ensures that faces that are not + //lit by our point light are not completely black. + + // Compute final shaded color + out_Col = vec4(newColor.rgb * lightIntensity, diffuseColor.a); +} diff --git a/src/shaders/planet-vert.glsl b/src/shaders/planet-vert.glsl new file mode 100644 index 0000000..d63d5a7 --- /dev/null +++ b/src/shaders/planet-vert.glsl @@ -0,0 +1,321 @@ +#version 300 es + +//This is a vertex shader. While it is called a "shader" due to outdated conventions, this file +//is used to apply matrix transformations to the arrays of vertex data passed to it. +//Since this code is run on your GPU, each vertex is transformed simultaneously. +//If it were run on your CPU, each vertex would have to be processed in a FOR loop, one at a time. +//This simultaneous transformation allows your program to run much faster, especially when rendering +//geometry with millions of vertices. + +uniform mat4 u_Model; // The matrix that defines the transformation of the + // object we're rendering. In this assignment, + // this will be the result of traversing your scene graph. + +uniform mat4 u_ModelInvTr; // The inverse transpose of the model matrix. + // This allows us to transform the object's normals properly + // if the object has been non-uniformly scaled. + +uniform mat4 u_ViewProj; // The matrix that defines the camera's transformation. + // We've written a static matrix for you to use for HW2, +uniform float u_Time; // but in HW3 you'll have to generate one yourself +uniform vec3 u_CamPos; +in vec4 vs_Pos; // The array of vertex positions passed to the shader + +in vec4 vs_Nor; // The array of vertex normals passed to the shader + +in vec4 vs_Col; // The array of vertex colors passed to the shader. +out vec4 old; +out vec4 fs_Nor; // The array of normals that has been transformed by u_ModelInvTr. This is implicitly passed to the fragment shader. +out vec4 fs_LightVec; // The direction in which our virtual light lies, relative to each vertex. This is implicitly passed to the fragment shader. +out vec4 fs_Col; +out vec4 fs_Pos; // The color of each vertex. This is implicitly passed to the fragment shader. +out vec4 fs_CamPos; +const vec4 lightPos = vec4(5, 5, 3, 1); //The position of our virtual light, which is used to compute the shading of + //the geometry in the fragment shader. + + +float random1( vec3 p ) { + return fract(sin(dot(p, vec3(127.1, 311.7, 191.999))) * 43758.5453); +} + +vec3 random3( vec3 p ) { + return fract(sin(vec3(dot(p,vec3(127.1, 311.7, 191.999)), + dot(p,vec3(269.5, 183.3, 765.54)), + dot(p, vec3(420.69, 631.2,109.21)))) + *43758.5453); +} + +float mySmootherStep(float a, float b, float t) { + t = t*t*t*(t*(t*6.0 - 15.0) + 10.0); + return mix(a, b, t); +} + +float interpNoise3D1(vec3 p) { + vec3 pFract = fract(p); + float llb = random1(floor(p)); + float lrb = random1(floor(p) + vec3(1.0,0.0,0.0)); + float ulb = random1(floor(p) + vec3(0.0,1.0,0.0)); + float urb = random1(floor(p) + vec3(1.0,1.0,0.0)); + + float llf = random1(floor(p) + vec3(0.0,0.0,1.0)); + float lrf = random1(floor(p) + vec3(1.0,0.0,1.0)); + float ulf = random1(floor(p) + vec3(0.0,1.0,1.0)); + float urf = random1(floor(p) + vec3(1.0,1.0,1.0)); + + float lerpXLB = mySmootherStep(llb, lrb, pFract.x); + float lerpXHB = mySmootherStep(ulb, urb, pFract.x); + float lerpXLF = mySmootherStep(llf, lrf, pFract.x); + float lerpXHF = mySmootherStep(ulf, urf, pFract.x); + + float lerpYB = mySmootherStep(lerpXLB, lerpXHB, pFract.y); + float lerpYF = mySmootherStep(lerpXLF, lerpXHF, pFract.y); + + return mySmootherStep(lerpYB, lerpYF, pFract.z); +} +float fbm(vec3 newP, float octaves) { + float amp = 0.5; + float freq = 6.0; + float sum = 0.0; + float maxSum = 0.0; + for(float i = 0.0; i < 10.0; ++i) { + if(i == octaves) + break; + maxSum += amp; + sum += interpNoise3D1(newP * freq) * amp; + amp *= 0.5; + freq *= 2.0; + } + return (sum / maxSum); +} + +//worley noise +float WorleyNoise(vec3 pos) +{ + pos *= 3.0; + vec3 uvInt = floor(pos); + vec3 uvFract = fract(pos); + float minDist = 1.0; + vec3 closeOne; + for(int z = -1; z <= 1; z++) + { + for(int y = -1; y <= 1; ++y) + { + for(int x = -1; x <= 1; ++x) + { + vec3 neighbor = vec3(float(x), float(y), float(z)); + vec3 point = random3(uvInt + neighbor); + vec3 diff = neighbor + point - uvFract; + float dist = length(diff); + //finding the point that is the closest random point + if(dist < minDist) + { + //getting the point into the correct uv coordinate space + minDist = dist; + //closeOne = (uvInt + neighbor + point) / 8.0; + } + + + } + } + } + return minDist; + // return clamp(minDist, .1f, 1.f); + //return vec3(0.0, 0.f, 0.f); +} + +float surflet(vec3 p, vec3 gridPoint) { + // Compute the distance between p and the grid point along each axis, and warp it with a + // quintic function so we can smooth our cells + vec3 t2 = abs(p - gridPoint); + vec3 pow1 = vec3(pow(t2.x, 5.f), pow(t2.y, 5.f), pow(t2.z, 5.f)); + vec3 pow2 = vec3(pow(t2.x, 4.f), pow(t2.y, 4.f), pow(t2.z, 4.f)); + vec3 pow3 = vec3(pow(t2.x, 3.f), pow(t2.y, 3.f), pow(t2.z, 3.f)); + vec3 t = vec3(1.f) - 6.f * pow1 + + 15.f * pow2 + - 10.f * pow3; + // Get the random vector for the grid point (assume we wrote a function random2 + // that returns a vec2 in the range [0, 1]) + vec3 gradient = random3(gridPoint) * 2.f - vec3(1,1,1); + // Get the vector from the grid point to P + vec3 diff = p - gridPoint; + + float height = dot(diff, gradient); + // Scale our height field (i.e. reduce it) by our polynomial falloff function + return height * t.x * t.y * t.z; +} + + +//PERLIN NOISE + +float perlinNoise3D(vec3 p) { + float surfletSum = 0.f; + // Iterate over the four integer corners surrounding uv + for(int dx = 0; dx <= 1; ++dx) { + for(int dy = 0; dy <= 1; ++dy) { + for (int dz = 0; dz <= 1; ++dz) { + surfletSum += surflet(p, floor(p) + vec3(dx, dy, dz)); + } + } + } + return surfletSum; +} +float fbmMoutain(vec3 p){ + + float total = 0.f; + float persistence = 0.5f; + int octaves = 2; + float start = 1.f; + float freq = 1.f; + float max = 0.f; + float amp = 0.7f; + //increasing the layers of noise with every octave + for(int i = 1; i <= octaves; i++) { + //freq = pow(2.f, i); + //float amp = pow(persistence, i); + max += amp; + vec3 vec = p * freq; + total += (abs(perlinNoise3D(vec))); + total = total * start; + start = total; + //total += n * amp; + + freq *= 2.f; + amp *= 0.5; + } + return total / max; + + } +vec3 IDF(vec3 vec){ + + vec3 total = vec3(perlinNoise3D(vec), perlinNoise3D(vec + vec3(-1.2, 1.5, 1)), perlinNoise3D(vec + vec3(.2, -.5, 1.0))); + + vec3 totalSmall = (total + vec3(1.f)) * 0.5f; + //smooth stepping so that the biomes blend smoothly + totalSmall = smoothstep(0.f, 1.f, (smoothstep(0.25f, 0.75f, totalSmall))); + // totalSmall = (glm::smoothstep(0.25f, 0.75f, totalSmall)); + return totalSmall; + } + +float hills(vec3 p) +{ + float h = perlinNoise3D(p * 3.0); + h = clamp(h, 0.f, .03); + return h; +} + +float moutains(vec3 p) +{ + float m = fbm(p * .8, 3.0); + m = pow(m, 3.1); + m -= .12; + m = clamp(m, 0.f, 1.f); + return m; +} + +float plateau(vec3 pos) +{ + float p = fbmMoutain(pos * 4.50); + p = pow(p, 1.3f); + p = clamp(p, 0.f, .2); + return p; +} + +//float +float bias(float b, float t) +{ + return pow(t, log(b) / log(0.5f)); +} + +float ocean(vec3 pos) +{ + float o = perlinNoise3D(pos * 3.8); + //o = pow(o, 2.0f); + o = bias(o, .4f); + o -= .09; + o = clamp(o, 0.f, .02f); + return o; +} + +float getHeight(vec3 pos) +{ + float h = hills(pos * .5); + //float p = fbm(vs_Pos.xyz * .4, 4.0); + //float p = fbmMoutain(vs_Pos.xyz * 6.50); + + + float m = moutains(pos * .6); + float p = plateau(pos * .4); + float i = ocean(pos); + + //p = mix(h, p, .2); + //float a = abs(perlinNoise3D(vs_Pos.xyz * 3.0)) - .2; + //a = clamp(a, -0.1f, .05f); + + //starting idf + vec3 idf = IDF(pos * .5); + float mixPlatReg = mix(p, i, idf.x); + float mixMoutHill = mix(m, h, idf.x); + float total = mix(mixMoutHill, mixPlatReg, idf.z); + return total; +} + + +vec3 getNormal(float total) +{ + vec3 tangent = cross(vec3(0.f, 1.f, 0.f), fs_Nor.xyz); + vec3 bitangent = cross(fs_Nor.xyz, tangent); + + vec3 newPt1 = vs_Pos.xyz + tangent * .1; + vec3 newPt2 = vs_Pos.xyz + bitangent * .1; + + float a = total; + float b = getHeight(newPt1); + float c = getHeight(newPt2); + + vec3 aPt = vs_Pos.xyz + fs_Nor.xyz * a; + vec3 bPt = newPt1 + fs_Nor.xyz * b; + vec3 cPt = newPt2 + fs_Nor.xyz * c; + + vec3 final = cross(normalize(aPt - bPt), normalize(aPt - cPt)); + return final; + +} +void main() +{ + fs_Col = vs_Col; // Pass the vertex colors to the fragment shader for interpolation + fs_Pos = vs_Pos; + old = vs_Pos; + mat3 invTranspose = mat3(u_ModelInvTr); + + fs_Nor = vec4(invTranspose * vec3(vs_Nor), 0); // Pass the vertex normals to the fragment shader for interpolation. + // model matrix. This is necessary to ensure the normals remain + + float total = getHeight(vs_Pos.xyz); + fs_Pos += fs_Nor * total; + + fs_Nor = vec4(getNormal(total), 0.f); + //fs_Pos += fs_Nor * vec4(w, w, w, 1); + // fs_Nor.x = WorleyNoise(fs_Pos.xyz + vec3(.0001, 0.f, 0.f)) - WorleyNoise(fs_Pos.xyz - vec3(.0001, 0.0, 0.0)); + // fs_Nor.y = WorleyNoise(fs_Pos.xyz + vec3(0.f, .0001, 0.f)) - WorleyNoise(fs_Pos.xyz - vec3(0.0, .0001, 0.0)); + // fs_Nor.z = WorleyNoise(fs_Pos.xyz + vec3(0.f, 0.f, .0001)) - WorleyNoise(fs_Pos.xyz - vec3(0.0, 0.0, .0001)); + + //vs_Pos.x = vs_Pos.x * p; + + // perpendicular to the surface after the surface is transformed by + // the model matrix. + + + vec4 modelposition = u_Model * (fs_Pos); // Temporarily store the transformed vertex positions for use below + + vec4 modelposition2 = vec4(.5, 0, 0, 0); + // modelposition2.xyz = modelposition.xyz + (sin(modelposition.xyz) * 1.05) + (sin(modelposition.xyz) * .25); + //modelposition2.yz = modelposition.yz + (cos(modelposition.yz) * .35); + //modelposition2 + //modelposition2.y = modelposition2.y + (sin(modelposition2.y) * .20); + //modelposition.xyz = mix(modelposition.xyz, modelposition2.xyz, t); + gl_Position = u_ViewProj * modelposition; + fs_LightVec = lightPos - modelposition; // Compute the direction in which the light source lies + fs_CamPos = vec4(u_CamPos, 0.f); + //gl_Position = u_ViewProj * modelposition;// gl_Position is a built-in variable of OpenGL which is + // used to render the final positions of the geometry's vertices +}