forestiles/src/graphics.rs
2024-09-08 20:03:55 +02:00

266 lines
9.8 KiB
Rust

use std::sync::Arc;
use bytemuck::{Pod, Zeroable};
use wgpu::{include_wgsl, util::DeviceExt, BindGroup, Buffer, Device, Queue, RenderPipeline, Surface, SurfaceConfiguration, VertexBufferLayout};
use winit::{event::{Event, WindowEvent}, window::Window};
use crate::{state::State, App};
#[repr(C)]
#[derive(Clone, Copy, Zeroable, Pod, Debug)]
pub struct Vertex {
pub pos: [f32; 2],
pub color: [f32; 4]
}
impl Vertex {
const DESC: VertexBufferLayout<'static> = VertexBufferLayout {
array_stride: std::mem::size_of::<Vertex>() as wgpu::BufferAddress,
step_mode: wgpu::VertexStepMode::Vertex,
attributes: &wgpu::vertex_attr_array![0 => Float32x2, 1 => Float32x4],
};
}
#[repr(C)]
#[derive(Clone, Copy, Zeroable, Pod, Debug)]
pub struct Uniforms {
// [x, y, zoom]
pub camera: [f32; 3],
pub darkness: f32
}
impl Default for Uniforms {
fn default() -> Self {
Self {
camera: [0., 0., 1.],
darkness: 0.
}
}
}
pub struct Graphics<'a> {
// window: &'a Window,
pub surface_config: SurfaceConfiguration,
surface: Surface<'a>,
device: Device,
render_pipeline: RenderPipeline,
queue: Queue,
vertex_buf: Buffer,
index_buf: Buffer,
uniforms_buf: Buffer,
uniforms_bind_group: BindGroup
}
impl<'a> Graphics<'a> {
pub async fn init(state: &State, window: Arc<Window>) -> Self {
let instance = wgpu::Instance::new(wgpu::InstanceDescriptor {
backends: wgpu::Backends::PRIMARY,
dx12_shader_compiler: Default::default(),
..Default::default()
});
let surface = instance.create_surface(window).unwrap();
let adapter = instance
.request_adapter(&wgpu::RequestAdapterOptions {
power_preference: wgpu::PowerPreference::default(),
force_fallback_adapter: false,
// Request an adapter which can render to our surface
compatible_surface: Some(&surface),
})
.await
.expect("Failed to find an appropriate adapter");
// Create the logical device and command queue
let (device, queue) = adapter
.request_device(
&wgpu::DeviceDescriptor {
label: None,
required_features: wgpu::Features::empty(),
// Make sure we use the texture resolution limits from the adapter, so we can support images the size of the swapchain.
required_limits: wgpu::Limits::default()
.using_resolution(adapter.limits()),
memory_hints: wgpu::MemoryHints::MemoryUsage,
},
None,
)
.await
.expect("Failed to create device");
dbg!(&device);
let vertex_buf = device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: Some("Vertex Buffer"),
contents: &[bytemuck::cast_slice::<Vertex, _>(&state.vertices), &[0; 100000]].concat(),
usage: wgpu::BufferUsages::VERTEX | wgpu::BufferUsages::COPY_DST,
});
let index_buf = device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: Some("Index Buffer"),
contents: &[bytemuck::cast_slice::<u32, _>(&state.indices), &[0; 100000]].concat(),
usage: wgpu::BufferUsages::INDEX | wgpu::BufferUsages::COPY_DST,
});
let uniforms_buf = device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: Some("Uniforms Buffer"),
contents: bytemuck::cast_slice(&[state.uniforms]),
usage: wgpu::BufferUsages::UNIFORM | wgpu::BufferUsages::COPY_DST,
});
let uniforms_bind_group_layout = device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
entries: &[
wgpu::BindGroupLayoutEntry {
binding: 0,
visibility: wgpu::ShaderStages::VERTEX,
ty: wgpu::BindingType::Buffer {
ty: wgpu::BufferBindingType::Uniform,
has_dynamic_offset: false,
min_binding_size: None,
},
count: None,
}
],
label: Some("Uniforms Bind Group Layout"),
});
let uniforms_bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
layout: &uniforms_bind_group_layout,
entries: &[
wgpu::BindGroupEntry {
binding: 0,
resource: uniforms_buf.as_entire_binding(),
}
],
label: Some("Uniforms Bind Group"),
});
// Load the shaders from disk
let shader = device.create_shader_module(include_wgsl!("shader.wgsl"));
let pipeline_layout = device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor {
label: None,
bind_group_layouts: &[
&uniforms_bind_group_layout
],
push_constant_ranges: &[],
});
let swapchain_capabilities = surface.get_capabilities(&adapter);
let swapchain_format = swapchain_capabilities.formats[0];
let render_pipeline = device.create_render_pipeline(&wgpu::RenderPipelineDescriptor {
label: None,
layout: Some(&pipeline_layout),
vertex: wgpu::VertexState {
module: &shader,
entry_point: "vs_main",
buffers: &[
Vertex::DESC
],
compilation_options: Default::default(),
},
fragment: Some(wgpu::FragmentState {
module: &shader,
entry_point: "fs_main",
compilation_options: Default::default(),
targets: &[Some(swapchain_format.into())],
}),
primitive: wgpu::PrimitiveState::default(),
depth_stencil: None,
multisample: wgpu::MultisampleState::default(),
multiview: None,
cache: None,
});
let surface_config = surface
.get_default_config(&adapter, 1, 1)
.unwrap();
surface.configure(&device, &surface_config);
Self {
// window,
surface_config,
surface,
device,
render_pipeline,
queue,
vertex_buf,
index_buf,
uniforms_buf,
uniforms_bind_group
}
}
pub fn window_event(&mut self, event: &WindowEvent, window: &Window) {
match event {
WindowEvent::Resized(new_size) => {
// Reconfigure the surface with the new size
self.surface_config.width = new_size.width;
self.surface_config.height = new_size.height;
self.surface.configure(&self.device, &self.surface_config);
// On macos the window needs to be redrawn manually after resizing
window.request_redraw();
},
_ => {}
}
}
pub fn event(&mut self, event: &Event<()>, window: &Window) {
match event {
Event::WindowEvent {
event: WindowEvent::Resized(new_size),
..
} => {
// Reconfigure the surface with the new size
self.surface_config.width = new_size.width;
self.surface_config.height = new_size.height;
self.surface.configure(&self.device, &self.surface_config);
// On macos the window needs to be redrawn manually after resizing
window.request_redraw();
},
Event::AboutToWait => {
// RedrawRequested will only trigger once unless we manually
// request it.
window.request_redraw();
},
_ => {}
}
}
pub fn render(&self, state: &State) {
let frame = self.surface
.get_current_texture()
.expect("Failed to acquire next swap chain texture");
let view = frame
.texture
.create_view(&wgpu::TextureViewDescriptor::default());
let mut encoder =
self.device.create_command_encoder(&wgpu::CommandEncoderDescriptor {
label: None,
});
{
let mut rpass =
encoder.begin_render_pass(&wgpu::RenderPassDescriptor {
label: None,
color_attachments: &[Some(wgpu::RenderPassColorAttachment {
view: &view,
resolve_target: None,
ops: wgpu::Operations {
load: wgpu::LoadOp::Clear(wgpu::Color::BLACK),
store: wgpu::StoreOp::Store,
},
})],
depth_stencil_attachment: None,
timestamp_writes: None,
occlusion_query_set: None,
});
rpass.set_pipeline(&self.render_pipeline);
rpass.set_bind_group(0, &self.uniforms_bind_group, &[]);
rpass.set_vertex_buffer(0, self.vertex_buf.slice(..));
rpass.set_index_buffer(self.index_buf.slice(..), wgpu::IndexFormat::Uint32);
rpass.draw_indexed(0..state.indices.len() as u32, 0, 0..1);
// rpass.draw(0..self.state.vertices.len() as u32, 0..1);
}
self.queue.submit(Some(encoder.finish()));
frame.present();
}
pub fn update(&mut self, state: &State) {
self.queue.write_buffer(&self.vertex_buf, 0, bytemuck::cast_slice(&state.vertices));
self.queue.write_buffer(&self.index_buf, 0, bytemuck::cast_slice(&state.indices));
self.queue.write_buffer(&self.uniforms_buf, 0, bytemuck::cast_slice(&[state.uniforms]));
}
}