Basic fog lighting, refactoring

This commit is contained in:
Lauri Räsänen 2023-11-09 21:04:59 +02:00
parent 5a8dec8d02
commit 1c0b9aa63f
10 changed files with 203 additions and 124 deletions

View file

@ -9,3 +9,5 @@ const FOG_MAX_STEPS = 20;
const FOG_MAX_DIST = 300.0;
const FOG_SCALE = 0.01;
const FOG_DENSITY = 1.0;
const FOG_ALPHA = 1.0;
const FOG_BLEND_DIST = 10.0;

View file

@ -1,10 +1,11 @@
#include globals.wgsl
#include constants.wgsl
#include globals.wgsl
#include light.wgsl
#include noise.wgsl
struct FogVertexOutput {
@builtin(position) clip_position: vec4<f32>,
@location(0) world_position: vec3<f32>,
@location(0) world_position: vec4<f32>,
@location(1) light_world_position: vec3<f32>,
}
@ -26,7 +27,7 @@ fn vs_main(
var out: FogVertexOutput;
out.clip_position = camera.proj * camera.view * world_position;
out.world_position = world_position.xyz / world_position.w;
out.world_position = world_position;
out.light_world_position = light.position;
return out;
@ -72,9 +73,11 @@ fn ray_march(origin: vec3<f32>, direction: vec3<f32>, scene_depth: f32) -> f32 {
var depth = 0.0;
for (var i = 0; i < FOG_MAX_STEPS; i++)
{
let noise = fog_noise(origin + direction * depth);
depth += FOG_MAX_DIST / f32(FOG_MAX_STEPS);
let p = origin + direction * depth;
density += fog_noise(p) * FOG_DENSITY / f32(FOG_MAX_STEPS);
let blend = min(depth / FOG_BLEND_DIST, 1.0);
let contribution = FOG_DENSITY / f32(FOG_MAX_STEPS);
density += blend * noise * contribution;
if (density >= 1.0)
{
density = 1.0;
@ -88,7 +91,6 @@ fn ray_march(origin: vec3<f32>, direction: vec3<f32>, scene_depth: f32) -> f32 {
return density;
}
// FIXME: always 0???
fn scene_depth(clip_position: vec4<f32>) -> f32 {
if (clip_position.w <= 0.0) {
return 0.0;
@ -96,24 +98,76 @@ fn scene_depth(clip_position: vec4<f32>) -> f32 {
let ndc = clip_position.xy / clip_position.w;
let uv = ndc * vec2<f32>(0.5, -0.5) + vec2<f32>(0.5, 0.5);
return textureSample(t_geometry_depth, s_geometry_depth, uv);
let depth = textureSample(t_geometry_depth, s_geometry_depth, uv);
// convert to linear [near, far] range
let z_near = camera.planes.x;
let z_far = camera.planes.y;
return z_near * z_far / (z_far + depth * (z_near - z_far));
}
@fragment
fn fs_main(vert: FogVertexOutput) -> @location(0) vec4<f32> {
var color = vec4<f32>(0.5, 0.5, 0.5, 1.0);
let cam_to_volume = vert.world_position.xyz - camera.position.xyz;
let distance_to_volume = length(cam_to_volume);
let direction = cam_to_volume / distance_to_volume;
// TODO: pass near and far plane in uniforms
let geometry_depth = scene_depth(vert.clip_position) * (3000.0 - 1.0) + 1.0 - distance_to_volume;
if (geometry_depth <= 0.0)
{
return vec4<f32>(0.0);
}
// FIXME: t_geometry_depth is 0
// let geometry_depth = scene_depth(vert.clip_position) - distance_to_volume;
// if (geometry_depth <= 0.0)
// {
// return vec4<f32>(0.0);
// }
let geometry_depth = 3000.0;
let density = ray_march(vert.world_position.xyz, direction, geometry_depth);
color.a *= density;
return color;
var in_light = 0.0;
if (global_uniforms.use_shadowmaps > 0u) {
for (var i: i32 = 0; i < 6; i++) {
let light_coords = light.matrices[i] * vert.world_position;
let light_dir = normalize(light_coords.xyz);
let bias = 0.01;
// z can never be smaller than this inside 90 degree frustum
if (light_dir.z < INV_SQRT_3 - bias) {
continue;
}
// x and y can never be larger than this inside frustum
if (abs(light_dir.y) > INV_SQRT_2 + bias) {
continue;
}
if (abs(light_dir.x) > INV_SQRT_2 + bias) {
continue;
}
in_light = sample_direct_light(i, light_coords);
// TODO should break even if 0 since we're inside frustum.
// See if causes issues with bias overlap between directions.
if (in_light > 0.0) {
break;
}
}
} else {
in_light = 1.0;
}
var color = vec3<f32>(0.5, 0.5, 0.5);
let ambient_strength = 0.02;
let ambient_color = color * ambient_strength;
var radiance = vec3<f32>(0.0);
if (in_light > 0.0) {
// attenuation
let light_dist = length(light.position - vert.world_position.xyz);
let coef_a = 0.0;
let coef_b = 1.0;
let light_attenuation = 1.0 / (1.0 + coef_a * light_dist + coef_b * light_dist * light_dist);
radiance = light.color.rgb * light.color.a * light_attenuation * in_light;
}
var result = ambient_color + radiance;
// tonemap
result = result / (result + vec3(1.0));
return vec4(result, density * FOG_ALPHA);
}

View file

@ -5,6 +5,7 @@ struct CameraUniform {
proj: mat4x4<f32>,
inv_view_proj: mat4x4<f32>,
position: vec4<f32>,
planes: vec4<f32>,
}
@group(0) @binding(0)
var<uniform> camera: CameraUniform;

View file

@ -1,26 +1,29 @@
#include globals.wgsl
fn sample_direct_light(index: i32, light_coords: vec4<f32>) -> f32 {
if (light_coords.w <= 0.0) {
return 0.0;
}
struct LightVertexInput {
@location(0) position: vec3<f32>,
};
let flip_correction = vec2<f32>(0.5, -0.5);
let proj_correction = 1.0 / light_coords.w;
let light_local = light_coords.xy * flip_correction * proj_correction + vec2<f32>(0.5, 0.5);
let bias = 0.000001;
let reference_depth = light_coords.z * proj_correction - bias;
struct LightVertexOutput {
@builtin(position) clip_position: vec4<f32>,
@location(0) color: vec3<f32>,
};
var total_sample = 0.0;
for (var x: i32 = -SHADOW_SAMPLES; x < SHADOW_SAMPLES; x++) {
for (var y: i32 = -SHADOW_SAMPLES; y < SHADOW_SAMPLES; y++) {
let texelSize = vec2<f32>(textureDimensions(t_light_depth));
let offset = vec2<f32>(f32(x), f32(y)) / texelSize.xy;
let s = textureSampleCompare(
t_light_depth,
s_light_depth,
light_local + offset,
index,
reference_depth
);
total_sample += s * INV_SHADOW_SAMPLES;
}
}
@vertex
fn vs_main(
model: LightVertexInput,
) -> LightVertexOutput {
let scale = 10.0;
var out: LightVertexOutput;
out.clip_position = camera.proj * camera.view * vec4<f32>(model.position * scale + light.position, 1.0);
out.color = light.color.xyz;
return out;
}
@fragment
fn fs_main(in: LightVertexOutput) -> @location(0) vec4<f32> {
return vec4<f32>(in.color, 1.0);
return total_sample;
}

View file

@ -0,0 +1,26 @@
#include globals.wgsl
struct LightVertexInput {
@location(0) position: vec3<f32>,
};
struct LightVertexOutput {
@builtin(position) clip_position: vec4<f32>,
@location(0) color: vec3<f32>,
};
@vertex
fn vs_main(
model: LightVertexInput,
) -> LightVertexOutput {
let scale = 10.0;
var out: LightVertexOutput;
out.clip_position = camera.proj * camera.view * vec4<f32>(model.position * scale + light.position, 1.0);
out.color = light.color.xyz;
return out;
}
@fragment
fn fs_main(in: LightVertexOutput) -> @location(0) vec4<f32> {
return vec4<f32>(in.color, 1.0);
}

View file

@ -1,5 +1,6 @@
#include constants.wgsl
#include globals.wgsl
#include light.wgsl
#include brdf.wgsl
// Vertex shader
@ -66,36 +67,6 @@ var t_roughness_metalness: texture_2d<f32>;
@group(3) @binding(5)
var s_roughness_metalness: sampler;
fn sample_direct_light(index: i32, light_coords: vec4<f32>) -> f32 {
if (light_coords.w <= 0.0) {
return 0.0;
}
let flip_correction = vec2<f32>(0.5, -0.5);
let proj_correction = 1.0 / light_coords.w;
let light_local = light_coords.xy * flip_correction * proj_correction + vec2<f32>(0.5, 0.5);
let bias = 0.000001;
let reference_depth = light_coords.z * proj_correction - bias;
var total_sample = 0.0;
for (var x: i32 = -SHADOW_SAMPLES; x < SHADOW_SAMPLES; x++) {
for (var y: i32 = -SHADOW_SAMPLES; y < SHADOW_SAMPLES; y++) {
let texelSize = vec2<f32>(textureDimensions(t_light_depth));
let offset = vec2<f32>(f32(x), f32(y)) / texelSize.xy;
let s = textureSampleCompare(
t_light_depth,
s_light_depth,
light_local + offset,
index,
reference_depth
);
total_sample += s * INV_SHADOW_SAMPLES;
}
}
return total_sample;
}
@fragment
fn fs_main(vert: VertexOutput) -> @location(0) vec4<f32> {
// textures
@ -159,7 +130,7 @@ fn fs_main(vert: VertexOutput) -> @location(0) vec4<f32> {
// radiance
let radiance_strength = max(dot(normal_dir, light_dir), 0.0);
let radiance = radiance_strength * light.color.xyz * light.color.w * light_attenuation * in_light;
let radiance = radiance_strength * light.color.rgb * light.color.a * light_attenuation * in_light;
// brdf shading
total_radiance += radiance * brdf(

View file

@ -108,6 +108,7 @@ pub struct CameraUniform {
pub proj: [[f32; 4]; 4],
pub inv_view_proj: [[f32; 4]; 4],
pub position: [f32; 4],
pub planes: [f32; 4],
}
impl CameraUniform {
@ -117,6 +118,7 @@ impl CameraUniform {
proj: cgmath::Matrix4::identity().into(),
inv_view_proj: cgmath::Matrix4::identity().into(),
position: [0.0; 4],
planes: [NEAR_PLANE, FAR_PLANE, 0.0, 0.0],
}
}

View file

@ -20,6 +20,7 @@ impl RenderPass {
label: &str,
is_shadow: bool,
has_transparency: bool,
write_depth: bool,
) -> Self {
let layout = device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor {
label: Some((label.to_owned() + " pipeline Layout").as_str()),
@ -40,6 +41,7 @@ impl RenderPass {
label,
is_shadow,
has_transparency,
write_depth,
);
Self { pipeline }
@ -55,6 +57,7 @@ impl RenderPass {
label: &str,
is_shadow: bool,
has_transparency: bool,
write_depth: bool,
) -> wgpu::RenderPipeline {
let shader = device.create_shader_module(shader);
@ -108,7 +111,7 @@ impl RenderPass {
},
depth_stencil: depth_format.map(|format| wgpu::DepthStencilState {
format,
depth_write_enabled: true,
depth_write_enabled: write_depth,
depth_compare: if is_shadow { wgpu::CompareFunction::LessEqual } else { wgpu::CompareFunction::Less },
stencil: wgpu::StencilState::default(),
bias: if is_shadow {

View file

@ -1,5 +1,4 @@
use cgmath::prelude::*;
use wgpu::{InstanceDescriptor, Backends, TextureView, TextureViewDescriptor, StoreOp};
use std::default::Default;
use std::mem;
use std::time::Duration;
@ -55,19 +54,18 @@ pub struct State {
light_bind_group: wgpu::BindGroup,
depth_bind_group: wgpu::BindGroup,
light_depth_pass: RenderPass,
light_depth_texture_target_views: [TextureView; SHADOW_MAP_LAYERS as usize],
light_depth_texture_target_views: [wgpu::TextureView; SHADOW_MAP_LAYERS as usize],
global_uniforms: GlobalUniforms,
global_uniforms_buffer: wgpu::Buffer,
}
impl State {
// Creating some of the wgpu types requires async code
pub async fn new(window: &Window) -> Self {
log::info!("Creating surface");
let mut size = window.inner_size();
size.width = size.width.max(1);
size.height = size.height.max(1);
let instance = wgpu::Instance::new(InstanceDescriptor { backends: Backends::PRIMARY | Backends::GL, ..Default::default() });
let instance = wgpu::Instance::new(wgpu::InstanceDescriptor { backends: wgpu::Backends::PRIMARY | wgpu::Backends::GL, ..Default::default() });
let surface = unsafe { instance.create_surface(window) }.unwrap();
let adapter = instance
@ -109,7 +107,6 @@ impl State {
surface.configure(&device, &config);
// Camera
let camera = Camera::new(
(-500.0, 150.0, 0.0).into(),
0.0,
@ -155,15 +152,14 @@ impl State {
let geometry_depth_texture = Texture::create_depth_texture(
&device,
"geometry_depth_texture",
Some(wgpu::CompareFunction::Less),
None,
config.width,
config.height,
1,
wgpu::TextureUsages::RENDER_ATTACHMENT | wgpu::TextureUsages::TEXTURE_BINDING,
wgpu::TextureUsages::RENDER_ATTACHMENT | wgpu::TextureUsages::TEXTURE_BINDING | wgpu::TextureUsages::COPY_DST,
true,
);
let geometry_abs_depth_sampler = Texture::create_sampler(&device, None);
let light_depth_texture = Texture::create_depth_texture(
&device,
"light_depth_texture",
@ -172,15 +168,16 @@ impl State {
SHADOW_MAP_SIZE,
SHADOW_MAP_LAYERS,
wgpu::TextureUsages::RENDER_ATTACHMENT | wgpu::TextureUsages::TEXTURE_BINDING,
true,
);
let light_depth_texture_target_views = (0..SHADOW_MAP_LAYERS)
.map(|i| {
light_depth_texture.texture.create_view(&TextureViewDescriptor {
light_depth_texture.texture.create_view(&wgpu::TextureViewDescriptor {
label: Some("light_depth_texture_view"),
format: None,
dimension: Some(wgpu::TextureViewDimension::D2),
aspect: wgpu::TextureAspect::All,
aspect: wgpu::TextureAspect::DepthOnly,
base_mip_level: 0,
mip_level_count: None,
base_array_layer: i,
@ -193,7 +190,6 @@ impl State {
let light_uniform = LightUniform::new([0.0, 0.0, 0.0], [1.0, 1.0, 1.0, 250000.0]);
// We'll want to update our lights position, so we use COPY_DST
let light_buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: Some("Light UB"),
contents: bytemuck::cast_slice(&[light_uniform]),
@ -316,7 +312,7 @@ impl State {
},
wgpu::BindGroupEntry {
binding: 3,
resource: wgpu::BindingResource::Sampler(&geometry_abs_depth_sampler),
resource: wgpu::BindingResource::Sampler(&geometry_depth_texture.sampler),
},
],
label: Some("Depth Bind Group"),
@ -448,6 +444,7 @@ impl State {
"light depth pass",
true,
false,
true,
);
let geometry_pass = RenderPass::new(
@ -466,6 +463,21 @@ impl State {
"geometry pass",
false,
false,
true,
);
let light_debug_pass = RenderPass::new(
&device,
&[&camera_bind_group_layout, &light_bind_group_layout],
&[],
"light_debug.wgsl",
Some(config.format),
Some(Texture::DEPTH_FORMAT),
&[ModelVertex::desc()],
"light debug pass",
false,
false,
true,
);
let fog_pass = RenderPass::new(
@ -484,18 +496,6 @@ impl State {
"fog pass",
false,
true,
);
let light_debug_pass = RenderPass::new(
&device,
&[&camera_bind_group_layout, &light_bind_group_layout],
&[],
"light.wgsl",
Some(config.format),
Some(Texture::DEPTH_FORMAT),
&[ModelVertex::desc()],
"light debug pass",
false,
false,
);
@ -544,11 +544,12 @@ impl State {
self.geometry_depth_texture = Texture::create_depth_texture(
&self.device,
"geometry_depth_texture",
Some(wgpu::CompareFunction::Less),
None,
self.config.width,
self.config.height,
1,
wgpu::TextureUsages::RENDER_ATTACHMENT | wgpu::TextureUsages::TEXTURE_BINDING,
wgpu::TextureUsages::RENDER_ATTACHMENT | wgpu::TextureUsages::TEXTURE_BINDING | wgpu::TextureUsages::COPY_DST,
true,
);
}
}
@ -620,7 +621,7 @@ impl State {
view: &self.light_depth_texture_target_views[i],
depth_ops: Some(wgpu::Operations {
load: wgpu::LoadOp::Clear(1.0),
store: StoreOp::Store,
store: wgpu::StoreOp::Store,
}),
stencil_ops: None,
}),
@ -645,15 +646,15 @@ impl State {
let surface_view = surface_texture
.texture
.create_view(&wgpu::TextureViewDescriptor::default());
let mut encoder = self
let mut geometry_encoder = self
.device
.create_command_encoder(&wgpu::CommandEncoderDescriptor {
label: Some("Render Encoder"),
});
encoder.push_debug_group("geometry pass");
geometry_encoder.push_debug_group("geometry pass");
{
let mut geom_render_pass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor {
let mut geom_render_pass = geometry_encoder.begin_render_pass(&wgpu::RenderPassDescriptor {
label: Some("Geometry Render Pass"),
color_attachments: &[Some(wgpu::RenderPassColorAttachment {
view: &surface_view,
@ -665,14 +666,14 @@ impl State {
b: 0.0,
a: 1.0,
}),
store: StoreOp::Store,
store: wgpu::StoreOp::Store,
},
})],
depth_stencil_attachment: Some(wgpu::RenderPassDepthStencilAttachment {
view: &self.geometry_depth_texture.view,
depth_ops: Some(wgpu::Operations {
load: wgpu::LoadOp::Clear(1.0),
store: StoreOp::Store,
store: wgpu::StoreOp::Store,
}),
stencil_ops: None,
}),
@ -688,26 +689,26 @@ impl State {
[&self.camera_bind_group, &self.light_bind_group, &self.depth_bind_group].into(),
);
}
encoder.pop_debug_group();
geometry_encoder.pop_debug_group();
encoder.push_debug_group("debug light pass");
geometry_encoder.push_debug_group("debug light pass");
{
let mut light_debug_render_pass =
encoder.begin_render_pass(&wgpu::RenderPassDescriptor {
geometry_encoder.begin_render_pass(&wgpu::RenderPassDescriptor {
label: Some("Light Debug Render Pass"),
color_attachments: &[Some(wgpu::RenderPassColorAttachment {
view: &surface_view,
resolve_target: None,
ops: wgpu::Operations {
load: wgpu::LoadOp::Load,
store: StoreOp::Store,
store: wgpu::StoreOp::Store,
},
})],
depth_stencil_attachment: Some(wgpu::RenderPassDepthStencilAttachment {
view: &self.geometry_depth_texture.view,
depth_ops: Some(wgpu::Operations {
load: wgpu::LoadOp::Load,
store: StoreOp::Store,
store: wgpu::StoreOp::Store,
}),
stencil_ops: None,
}),
@ -722,26 +723,31 @@ impl State {
&self.light_bind_group,
);
}
encoder.pop_debug_group();
geometry_encoder.pop_debug_group();
encoder.push_debug_group("fog pass");
self.queue.submit(std::iter::once(geometry_encoder.finish()));
let mut fog_encoder = self
.device
.create_command_encoder(&wgpu::CommandEncoderDescriptor {
label: Some("Fog Encoder"),
});
fog_encoder.push_debug_group("fog pass");
{
let mut fog_render_pass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor {
let mut fog_render_pass = fog_encoder.begin_render_pass(&wgpu::RenderPassDescriptor {
label: Some("Fog Render Pass"),
color_attachments: &[Some(wgpu::RenderPassColorAttachment {
view: &surface_view,
resolve_target: None,
ops: wgpu::Operations {
load: wgpu::LoadOp::Load,
store: StoreOp::Store,
store: wgpu::StoreOp::Store,
},
})],
depth_stencil_attachment: Some(wgpu::RenderPassDepthStencilAttachment {
view: &self.geometry_depth_texture.view,
depth_ops: Some(wgpu::Operations {
load: wgpu::LoadOp::Load,
store: StoreOp::Store,
}),
depth_ops: None,
stencil_ops: None,
}),
timestamp_writes: None,
@ -756,9 +762,10 @@ impl State {
[&self.camera_bind_group, &self.light_bind_group, &self.depth_bind_group].into(),
);
}
encoder.pop_debug_group();
fog_encoder.pop_debug_group();
self.queue.submit(std::iter::once(fog_encoder.finish()));
self.queue.submit(std::iter::once(encoder.finish()));
surface_texture.present();
Ok(())

View file

@ -18,6 +18,7 @@ impl Texture {
height: u32,
layers: u32,
usage: wgpu::TextureUsages,
filter: bool,
) -> Self {
let size = wgpu::Extent3d {
width,
@ -36,8 +37,17 @@ impl Texture {
};
let texture = device.create_texture(&desc);
let view = texture.create_view(&wgpu::TextureViewDescriptor::default());
let sampler = Texture::create_sampler(device, compare);
let view = texture.create_view(&wgpu::TextureViewDescriptor {
label: Some(&format!("{:?}_view", label)),
format: Some(Self::DEPTH_FORMAT),
dimension: if layers > 1 { Some(wgpu::TextureViewDimension::D2Array) } else { Some(wgpu::TextureViewDimension::D2) },
aspect: wgpu::TextureAspect::DepthOnly,
base_mip_level: 0,
mip_level_count: None,
base_array_layer: 0,
array_layer_count: if layers > 1 { Some(layers) } else { None },
});
let sampler = Texture::create_sampler(device, compare, filter);
Self {
texture,
@ -46,13 +56,13 @@ impl Texture {
}
}
pub fn create_sampler(device: &wgpu::Device, compare: Option<wgpu::CompareFunction>) -> wgpu::Sampler {
pub fn create_sampler(device: &wgpu::Device, compare: Option<wgpu::CompareFunction>, filter: bool) -> wgpu::Sampler {
device.create_sampler(&wgpu::SamplerDescriptor {
address_mode_u: wgpu::AddressMode::ClampToEdge,
address_mode_v: wgpu::AddressMode::ClampToEdge,
address_mode_w: wgpu::AddressMode::ClampToEdge,
mag_filter: wgpu::FilterMode::Linear,
min_filter: wgpu::FilterMode::Linear,
mag_filter: if filter { wgpu::FilterMode::Linear } else { wgpu::FilterMode::Nearest },
min_filter: if filter { wgpu::FilterMode::Linear } else { wgpu::FilterMode::Nearest },
mipmap_filter: wgpu::FilterMode::Nearest,
compare,
lod_min_clamp: 0.0,