diff --git a/Cargo.toml b/Cargo.toml
new file mode 100644
index 0000000..b562172
--- /dev/null
+++ b/Cargo.toml
@@ -0,0 +1,41 @@
+[package]
+name = "agpu"
+version = "0.1.0"
+edition = "2018"
+resolver = "2"
+description = "Abstract GPU Project"
+homepage = "https://github.com/lyricwulf/agpu"
+repository = "https://github.com/lyricwulf/agpu"
+keywords = ["gpu", "graphics", "compute"]
+license = "MIT"
+
+# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
+
+[dependencies]
+wgpu = { version = "0.11", features = ["spirv"] }
+futures = "0.3"
+bytemuck = "1.7"
+num-traits = "0.2"
+raw-window-handle = "0.3"
+tracing = "0.1"
+
+[dependencies.winit]
+version = "0.25"
+optional = true
+
+[dependencies.egui]
+optional = true
+version = "0.15"
+features = ["convert_bytemuck"]
+
+[dependencies.egui-winit]
+version = "0.15"
+optional = true
+
+[features]
+profiler = []
+default = ["profiler", "egui", "winit", "egui-winit"]
+
+[dev-dependencies]
+# Used in example
+tracing-subscriber = "0.3"
\ No newline at end of file
diff --git a/README.md b/README.md
new file mode 100644
index 0000000..08c817d
--- /dev/null
+++ b/README.md
@@ -0,0 +1,56 @@
+
+
agpu
+
Abstract GPU Project
+
+
+`agpu` is an abstraction library to the [wgpu](https://github.com/gfx-rs/wgpu) library, with the goal of providing a GPU framework for both small applications and large engines alike, with minimal boilerplate and maximum readability.
+
+## Quick Start
+To get started with a program that renders to the screen:
+```rust
+fn main() -> Result<(), agpu::BoxError> {
+ let program = agpu::GpuProgram::builder().build()?;
+
+ let example_pipeline = program.gpu.create_pipeline().build();
+
+ program.run_draw(move |mut frame| {
+ frame
+ .render_pass("Example render pass")
+ .with_pipeline(&example_pipeline)
+ .begin()
+ .draw_triangle();
+ })
+}
+```
+More examples are available in the examples folder.
+
+## Goals
+- The easiest GPU library
+- No loss of API functionality for underlying libraries
+- Zero (ideal) runtime cost
+
+### Non-goals
+- Managed rendering engine
+- Adhering strictly to WebGPU standard
+
+## State
+`agpu` is in a very early stage of development. It strives to be as stable as the underlying wgpu library, but some features will be incomplete or missing.
+
+The current goal is to replicate all wgpu examples using minimal code.
+
+## Style
+Builder-style API is used:
+- Avoids boilerplate and struct hell
+- Allows user to opt-in to functionality
+- Using sensible defaults, default constructors are one-liners
+
+[`Deref`](https://doc.rust-lang.org/std/ops/trait.Deref.html) is **abused**([?](https://rust-unofficial.github.io/patterns/anti_patterns/deref.html)) to add redundant/convenience functions to wgpu types. This is currently preferred to utility traits that add functions to the underlying types to avoid needing to include various traits that are not used directly.
+
+## Integrations
+
+Some integrations are provided as default features to this crate:
+- [`winit`](https://github.com/rust-windowing/winit) for windowing (WIP)
+- [`egui`](https://github.com/emilk/egui) for GUI (WIP)
+
+You can (*not yet!*) disable them by opting out of default features, as well as create your own integration using this library.
+
diff --git a/examples/egui.rs b/examples/egui.rs
new file mode 100644
index 0000000..078434b
--- /dev/null
+++ b/examples/egui.rs
@@ -0,0 +1,408 @@
+// ! Usage will be greatly improved in the future...
+
+use std::mem::size_of;
+use std::time::{Duration, Instant};
+
+use agpu::prelude::*;
+use egui::plot::{Line, Plot, Value, Values};
+use wgpu::util::DeviceExt;
+use winit::event::{Event, WindowEvent};
+use winit::event_loop::{ControlFlow, EventLoop};
+use winit::window::WindowBuilder;
+
+use egui::epaint;
+
+fn main() {
+ tracing_subscriber::fmt::init();
+
+ let framerate = 60.0;
+ // Initialize winit
+ let event_loop = EventLoop::new();
+ let window = WindowBuilder::new().build(&event_loop).unwrap();
+
+ let mut state = egui_winit::State::new(&window);
+
+ // Initialize the gpu
+ let gpu = Gpu::builder()
+ .with_label("Example Gpu Handle")
+ .with_backends(wgpu::Backends::VULKAN)
+ .with_profiler()
+ .build(&window)
+ .unwrap();
+
+ // Create the viewport
+ let viewport = gpu.create_viewport(window).build();
+
+ let mut last_update_inst = Instant::now();
+
+ let pipeline = gpu
+ .create_pipeline()
+ .with_fragment(include_bytes!("shader/dog.frag.spv"))
+ .with_bind_groups(&[])
+ .build();
+
+ let mut egui_ctx = egui::CtxRef::default();
+ // let mut stat_counts = [0; 5];
+ let mut timestamps: Vec<(String, f32)> = vec![];
+
+ let vertex_layout = wgpu::VertexBufferLayout {
+ step_mode: wgpu::VertexStepMode::Vertex,
+ attributes: &wgpu::vertex_attr_array![0 => Float32x2, 1 => Float32x2, 2 => Unorm8x4],
+ array_stride: (2 + 2 + 1) * 4,
+ };
+
+ let mut vertex_buffers = Vec::<(Buffer, Buffer)>::new();
+
+ egui_ctx.begin_frame(Default::default());
+ let egui_font_texture = egui_ctx.texture();
+
+ let sampler = gpu.device.create_sampler(&wgpu::SamplerDescriptor {
+ label: Some("UI sampler"),
+ mag_filter: wgpu::FilterMode::Linear,
+ min_filter: wgpu::FilterMode::Linear,
+ mipmap_filter: wgpu::FilterMode::Linear,
+ ..Default::default()
+ });
+
+ // font data
+ // we need to convert the texture into rgba_srgb format
+ let mut pixels: Vec = Vec::with_capacity(egui_font_texture.pixels.len() * 4);
+ for srgba in egui_font_texture.srgba_pixels(0.33) {
+ pixels.push(srgba.r());
+ pixels.push(srgba.g());
+ pixels.push(srgba.b());
+ pixels.push(srgba.a());
+ }
+
+ let font_texture = gpu.device.create_texture_with_data(
+ &gpu.queue,
+ &wgpu::TextureDescriptor {
+ label: Some("EGUI font texture"),
+ size: wgpu::Extent3d {
+ width: egui_font_texture.width as u32,
+ height: egui_font_texture.height as u32,
+ depth_or_array_layers: 1,
+ },
+ mip_level_count: 1,
+ sample_count: 1,
+ dimension: wgpu::TextureDimension::D2,
+ format: wgpu::TextureFormat::Rgba8UnormSrgb,
+ usage: wgpu::TextureUsages::TEXTURE_BINDING,
+ },
+ &pixels,
+ );
+ let font_texture_view = font_texture.create_view(&wgpu::TextureViewDescriptor {
+ ..Default::default()
+ });
+
+ let bind_group_layout = gpu
+ .device
+ .create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
+ label: Some("UI bind group"),
+ entries: &[
+ wgpu::BindGroupLayoutEntry {
+ binding: 0,
+ visibility: wgpu::ShaderStages::VERTEX,
+ ty: wgpu::BindingType::Buffer {
+ ty: wgpu::BufferBindingType::Uniform,
+ has_dynamic_offset: false,
+ min_binding_size: None,
+ },
+ count: None,
+ },
+ wgpu::BindGroupLayoutEntry {
+ binding: 1,
+ visibility: wgpu::ShaderStages::FRAGMENT,
+ ty: wgpu::BindingType::Texture {
+ sample_type: wgpu::TextureSampleType::Float { filterable: true },
+ view_dimension: wgpu::TextureViewDimension::D2,
+ multisampled: false,
+ },
+ count: None,
+ },
+ wgpu::BindGroupLayoutEntry {
+ binding: 2,
+ visibility: wgpu::ShaderStages::FRAGMENT,
+ ty: wgpu::BindingType::Sampler {
+ comparison: false,
+ filtering: true,
+ },
+ count: None,
+ },
+ ],
+ });
+ let ui_bind_group = gpu.device.create_bind_group(&wgpu::BindGroupDescriptor {
+ label: Some("UI bind group"),
+ layout: &bind_group_layout,
+ entries: &[
+ wgpu::BindGroupEntry {
+ binding: 0,
+ resource: wgpu::BindingResource::Buffer(
+ viewport.data_buffer.as_entire_buffer_binding(),
+ ),
+ },
+ wgpu::BindGroupEntry {
+ binding: 1,
+ resource: wgpu::BindingResource::TextureView(&font_texture_view),
+ },
+ wgpu::BindGroupEntry {
+ binding: 2,
+ resource: wgpu::BindingResource::Sampler(&sampler),
+ },
+ ],
+ });
+
+ let ui_pipeline = gpu
+ .create_pipeline()
+ .with_vertex_layouts(&[vertex_layout])
+ .with_fragment(include_bytes!("shader/egui.frag.spv"))
+ .with_vertex(include_bytes!("shader/egui.vert.spv"))
+ .with_bind_groups(&[&bind_group_layout])
+ .build();
+
+ // Start the event loop
+ event_loop.run(move |event, _, control_flow| {
+ // Reset control flow
+ *control_flow = ControlFlow::Poll;
+
+ match event {
+ Event::WindowEvent { event, .. } => {
+ if state.on_event(&egui_ctx, &event) {
+ return;
+ };
+
+ match event {
+ WindowEvent::Resized(_) => {
+ // viewport.resize(new_size.width, new_size.height);
+ }
+ WindowEvent::CloseRequested => {
+ *control_flow = ControlFlow::Exit;
+ }
+ _ => {}
+ }
+ }
+ Event::MainEventsCleared => {
+ viewport.window.request_redraw();
+ }
+ Event::RedrawRequested(_) => {
+ // UI logic
+ egui_ctx.begin_frame(state.take_egui_input(&viewport.window));
+ egui::CentralPanel::default()
+ .frame(egui::Frame {
+ margin: egui::vec2(100.0, 100.0),
+ ..egui::Frame::none()
+ })
+ .show(&egui_ctx, |ui| {
+ egui::SidePanel::left("dog")
+ .frame(egui::Frame {
+ margin: egui::vec2(40.0, 40.0),
+ corner_radius: 4.0,
+ fill: egui::Color32::from_rgb(4, 4, 4),
+ ..egui::Frame::none()
+ })
+ .show_inside(ui, |ui| {
+ egui::Grid::new("stats grid").show(ui, |ui| {
+ // // Shows stat counts
+ // FIXME: Stat counts are broken and cause gpu to hang
+ // for (count, label) in stat_counts
+ // .iter()
+ // .zip(thermite_core::PIPELINE_STATISTICS_LABELS)
+ // {
+ // ui.label(label);
+ // ui.label(count);
+ // ui.end_row();
+ // }
+
+ for (label, value) in ×tamps {
+ ui.label(label);
+ ui.label(value.to_string() + " ms");
+ ui.end_row();
+ }
+ });
+ ui.add(egui::Label::new("Hello World!"));
+ ui.label("A shorter and more convenient way to add a label.");
+ if ui.button("Click me").clicked() { /* take some action here */ }
+ })
+ });
+
+ // Window
+ egui::Window::new("dog")
+ .resizable(true)
+ .show(&egui_ctx, |ui| {
+ ui.add(egui::Label::new("Hello World!"));
+ ui.heading("im a dog");
+ ui.label("A shorter and more convenient way to add a label.");
+ if ui.button("Click me").clicked() { /* take some action here */ }
+
+ let sin = (0..1000).map(|i| {
+ let x = i as f64 * 0.01;
+ Value::new(x, x.sin())
+ });
+ let line = Line::new(Values::from_values_iter(sin));
+
+ // performance plotter
+ ui.add(Plot::new("Performance plot").line(line));
+ });
+
+ let (output, cl_sh) = egui_ctx.end_frame();
+
+ state.handle_output(&viewport.window, &egui_ctx, output);
+
+ let cl_me = egui_ctx.tessellate(cl_sh);
+
+ // update ui buffers
+ for (i, egui::ClippedMesh(_, mesh)) in cl_me.iter().enumerate() {
+ // create any missing buffers
+ if i >= vertex_buffers.len() {
+ vertex_buffers.push((
+ gpu.create_buffer("")
+ .with_data_slice(&mesh.vertices)
+ .as_vertex_buffer()
+ .allow_copy_to()
+ .build(),
+ gpu.create_buffer("")
+ .with_data_slice(&mesh.indices)
+ .as_index_buffer()
+ .allow_copy_to()
+ .build(),
+ ));
+ } else {
+ // resize buffer if needed
+ if size_of::() * mesh.vertices.len()
+ > vertex_buffers[i].0.size()
+ {
+ vertex_buffers[i].0 = gpu
+ .create_buffer("")
+ .with_data_slice(&mesh.vertices)
+ .as_vertex_buffer()
+ .allow_copy_to()
+ .build();
+ } else {
+ gpu.queue.write_buffer(
+ &vertex_buffers[i].0,
+ 0,
+ bytemuck::cast_slice(&mesh.vertices),
+ );
+ };
+
+ if size_of::() * mesh.indices.len() > vertex_buffers[i].1.size() {
+ vertex_buffers[i].1 = gpu
+ .create_buffer("")
+ .with_data_slice(&mesh.indices)
+ .as_index_buffer()
+ .allow_copy_to()
+ .build();
+ } else {
+ gpu.queue.write_buffer(
+ &vertex_buffers[i].1,
+ 0,
+ bytemuck::cast_slice(&mesh.indices),
+ );
+ }
+ }
+ }
+ // Submit buffer updates
+ gpu.queue.submit(None);
+
+ // Render gpu
+ let mut frame = viewport.begin_frame().unwrap();
+
+ // FIXME: ERROR Vulkan validation error, VK_IMAGE_LAYOUT_UNDEFINED
+ // * This ERROR only happens in our example and not when used in our project
+
+ // gpu.profiler.timestamp("begin", &mut encoder);
+ // let a = viewport.depth_view.borrow();
+ {
+ // Begin render pass
+ let mut render_pass = frame.render_pass("example pass").begin();
+
+ // Draw the scene
+ render_pass.set_pipeline(&pipeline);
+ render_pass.draw(0..3, 0..1);
+ }
+
+ {
+ let mut ui_pass = frame.render_pass("UI Render Pass").begin();
+
+ ui_pass.set_pipeline(&ui_pipeline);
+ ui_pass.set_bind_group(0, &ui_bind_group, &[]);
+
+ for (egui::ClippedMesh(clip, me), (vb, ib)) in cl_me.iter().zip(&vertex_buffers)
+ {
+ if let Some((x, y, width, height)) = render_region(clip, &viewport) {
+ ui_pass.set_scissor_rect(x, y, width, height);
+ ui_pass.set_vertex_buffer(0, vb.slice(..));
+ ui_pass.set_index_buffer(ib.slice(..), wgpu::IndexFormat::Uint32);
+ ui_pass.draw_indexed(0..me.indices.len() as u32, 0, 0..1);
+ }
+ }
+ }
+
+ // if let Ok(stats) = gpu.total_statistics() {
+ // stat_counts = stats;
+ // };
+ timestamps = gpu.timestamp_report();
+ }
+
+ Event::RedrawEventsCleared => {
+ if let Some(instant) = next_frame_time(framerate, &mut last_update_inst) {
+ *control_flow = ControlFlow::WaitUntil(instant);
+ } else {
+ viewport.request_redraw();
+ }
+ }
+ _ => {}
+ }
+ });
+}
+
+fn next_frame_time(framerate: f32, last_update_inst: &mut Instant) -> Option {
+ // Clamp to some max framerate to avoid busy-looping too much (we might be in
+ // wgpu::PresentMode::Mailbox, thus discarding superfluous frames)
+ let target_frametime = Duration::from_secs_f32(1.0 / framerate);
+ let time_since_last_frame = last_update_inst.elapsed();
+
+ if time_since_last_frame >= target_frametime {
+ *last_update_inst = Instant::now();
+ None
+ } else {
+ Some(Instant::now() + target_frametime - time_since_last_frame)
+ }
+}
+
+/// Uses https://github.com/hasenbanck/egui_wgpu_backend/blob/master/src/lib.rs
+fn render_region(clip_rect: &egui::Rect, viewport: &Viewport) -> Option<(u32, u32, u32, u32)> {
+ let scale_factor = 1.0;
+ // Transform clip rect to physical pixels.
+ let clip_min_x = scale_factor * clip_rect.min.x;
+ let clip_min_y = scale_factor * clip_rect.min.y;
+ let clip_max_x = scale_factor * clip_rect.max.x;
+ let clip_max_y = scale_factor * clip_rect.max.y;
+
+ // Make sure clip rect can fit within an `u32`.
+ let clip_min_x = clip_min_x.clamp(0.0, viewport.width() as f32);
+ let clip_min_y = clip_min_y.clamp(0.0, viewport.height() as f32);
+ let clip_max_x = clip_max_x.clamp(clip_min_x, viewport.width() as f32);
+ let clip_max_y = clip_max_y.clamp(clip_min_y, viewport.height() as f32);
+
+ let clip_min_x = clip_min_x.round() as u32;
+ let clip_min_y = clip_min_y.round() as u32;
+ let clip_max_x = clip_max_x.round() as u32;
+ let clip_max_y = clip_max_y.round() as u32;
+
+ let width = (clip_max_x - clip_min_x).max(1);
+ let height = (clip_max_y - clip_min_y).max(1);
+
+ // Clip scissor rectangle to target size.
+ let x = clip_min_x.min(viewport.width());
+ let y = clip_min_y.min(viewport.height());
+ let width = width.min(viewport.width() - x);
+ let height = height.min(viewport.height() - y);
+
+ // Skip rendering with zero-sized clip areas.
+ if width == 0 || height == 0 {
+ return None;
+ }
+
+ Some((x, y, width, height))
+}
diff --git a/examples/manual.rs b/examples/manual.rs
new file mode 100644
index 0000000..2aeea2c
--- /dev/null
+++ b/examples/manual.rs
@@ -0,0 +1,48 @@
+use agpu::prelude::*;
+use winit::{
+ event::{Event, WindowEvent},
+ event_loop::{ControlFlow, EventLoop},
+ window::WindowBuilder,
+};
+
+fn main() -> Result<(), agpu::BoxError> {
+ tracing_subscriber::fmt::init();
+
+ // Initialize winit
+ let event_loop = EventLoop::new();
+ let window = WindowBuilder::new().build(&event_loop).unwrap();
+
+ let gpu = Gpu::builder().build(&window)?;
+
+ let viewport = gpu.create_viewport(window).build();
+
+ let pipeline = gpu.create_pipeline().build();
+
+ event_loop.run(move |event, _, control_flow| match event {
+ Event::RedrawRequested(_) => {
+ let mut frame = match viewport.begin_frame() {
+ Ok(frame) => frame,
+ Err(err) => {
+ tracing::error!("{}", err);
+ return;
+ }
+ };
+
+ {
+ let mut rpass = frame.render_pass("Base render pass").begin();
+ rpass.set_pipeline(&pipeline);
+ rpass.draw(0..3, 0..1);
+ }
+ }
+ Event::MainEventsCleared => {
+ viewport.window.request_redraw();
+ }
+ Event::WindowEvent {
+ event: WindowEvent::CloseRequested,
+ ..
+ } => {
+ *control_flow = ControlFlow::Exit;
+ }
+ _ => {}
+ });
+}
diff --git a/examples/shader/dog.frag.spv b/examples/shader/dog.frag.spv
new file mode 100644
index 0000000..f8dd964
Binary files /dev/null and b/examples/shader/dog.frag.spv differ
diff --git a/examples/shader/egui.frag b/examples/shader/egui.frag
new file mode 100644
index 0000000..0251212
--- /dev/null
+++ b/examples/shader/egui.frag
@@ -0,0 +1,14 @@
+#version 460
+
+layout (location = 0) in vec2 in_pos;
+layout (location = 1) in vec2 in_uv;
+layout (location = 2) in vec4 in_color;
+
+layout (set = 0, binding = 1) uniform texture2D tex;
+layout (set = 0, binding = 2) uniform sampler s;
+
+layout (location = 0) out vec4 out_color;
+
+void main() {
+ out_color = in_color * texture(sampler2D(tex, s), in_uv);
+}
diff --git a/examples/shader/egui.frag.spv b/examples/shader/egui.frag.spv
new file mode 100644
index 0000000..b5977a8
Binary files /dev/null and b/examples/shader/egui.frag.spv differ
diff --git a/examples/shader/egui.vert b/examples/shader/egui.vert
new file mode 100644
index 0000000..f157b68
--- /dev/null
+++ b/examples/shader/egui.vert
@@ -0,0 +1,24 @@
+//! Vertex shader that simply passes the gl_Position from the given vertex
+
+#version 460
+
+layout (location = 0) in vec2 in_pos;
+layout (location = 1) in vec2 in_uv;
+layout (location = 2) in vec4 in_color;
+
+layout (set = 0, binding = 0) uniform Viewport {
+ vec2 size;
+} viewport;
+
+layout (location = 0) out vec2 out_pos;
+layout (location = 1) out vec2 out_uv;
+layout (location = 2) out vec4 out_color;
+
+void main() {
+ vec2 xy = (in_pos / viewport.size) * 2 - 1;
+ gl_Position = vec4(xy, 0, 1);
+ gl_Position.y = - gl_Position.y;
+ out_pos = in_pos;
+ out_uv = in_uv;
+ out_color = in_color;
+}
diff --git a/examples/shader/egui.vert.spv b/examples/shader/egui.vert.spv
new file mode 100644
index 0000000..f2685a0
Binary files /dev/null and b/examples/shader/egui.vert.spv differ
diff --git a/examples/shader/hello-triangle.wgsl b/examples/shader/hello-triangle.wgsl
new file mode 100644
index 0000000..4903d72
--- /dev/null
+++ b/examples/shader/hello-triangle.wgsl
@@ -0,0 +1,11 @@
+[[stage(vertex)]]
+fn vs_main([[builtin(vertex_index)]] in_vertex_index: u32) -> [[builtin(position)]] vec4 {
+ let x = f32(i32(in_vertex_index) - 1);
+ let y = f32(i32(in_vertex_index & 1u) * 2 - 1);
+ return vec4(x, y, 0.0, 1.0);
+}
+
+[[stage(fragment)]]
+fn fs_main() -> [[location(0)]] vec4 {
+ return vec4(1.0, 0.0, 0.0, 1.0);
+}
\ No newline at end of file
diff --git a/examples/wgpu-hello-triangle.rs b/examples/wgpu-hello-triangle.rs
new file mode 100644
index 0000000..c2673bb
--- /dev/null
+++ b/examples/wgpu-hello-triangle.rs
@@ -0,0 +1,20 @@
+const GREEN: u32 = 0x00FF00FF;
+
+fn main() -> Result<(), agpu::BoxError> {
+ let program = agpu::GpuProgram::builder().build()?;
+
+ let example_pipeline = program
+ .gpu
+ .create_pipeline()
+ .with_vertex_fragment(include_bytes!("shader/hello-triangle.wgsl"))
+ .build();
+
+ program.run_draw(move |mut frame| {
+ frame
+ .render_pass("Example render pass")
+ .with_pipeline(&example_pipeline)
+ .clear_color(GREEN)
+ .begin()
+ .draw_triangle();
+ })
+}
diff --git a/examples/wgpu-hello.rs b/examples/wgpu-hello.rs
new file mode 100644
index 0000000..a550b25
--- /dev/null
+++ b/examples/wgpu-hello.rs
@@ -0,0 +1,13 @@
+fn main() -> Result<(), agpu::BoxError> {
+ let program = agpu::GpuProgram::builder().build()?;
+
+ let example_pipeline = program.gpu.create_pipeline().build();
+
+ program.run_draw(move |mut frame| {
+ frame
+ .render_pass("Example render pass")
+ .with_pipeline(&example_pipeline)
+ .begin()
+ .draw_triangle();
+ })
+}
diff --git a/src/graphics.rs b/src/graphics.rs
new file mode 100644
index 0000000..be46053
--- /dev/null
+++ b/src/graphics.rs
@@ -0,0 +1,23 @@
+/// Error types for core
+pub mod error;
+pub use error::*;
+
+/// Gpu abstraction
+pub mod gpu;
+pub use gpu::*;
+
+/// Need this for rendering to screen!
+pub mod viewport;
+pub use viewport::*;
+
+pub mod buffer;
+pub use buffer::*;
+
+pub mod profiler;
+pub use profiler::*;
+
+pub mod pipeline;
+pub use pipeline::*;
+
+pub mod texture;
+pub use texture::*;
diff --git a/src/graphics/buffer.rs b/src/graphics/buffer.rs
new file mode 100644
index 0000000..7d60331
--- /dev/null
+++ b/src/graphics/buffer.rs
@@ -0,0 +1,40 @@
+mod builder;
+pub use builder::*;
+
+mod view;
+use futures::executor::block_on;
+pub use view::*;
+
+use crate::GpuHandle;
+use std::ops::Deref;
+
+/// * Probably best used as `RefCell`
+pub struct Buffer {
+ pub(crate) gpu: GpuHandle,
+ pub(crate) inner: wgpu::Buffer,
+ pub size: u64,
+}
+/// Allows you to use this as a reference to the inner `wgpu::Buffer`
+impl Deref for Buffer {
+ type Target = wgpu::Buffer;
+
+ fn deref(&self) -> &Self::Target {
+ &self.inner
+ }
+}
+impl Buffer {
+ /// # Errors
+ /// Errors according to [`wgpu::BufferAsyncError`]
+ pub fn download(&self) -> Result {
+ let fut = wgpu::util::DownloadBuffer::read_buffer(
+ &self.gpu.device,
+ &self.gpu.queue,
+ &self.inner.slice(..),
+ );
+ block_on(fut)
+ }
+
+ pub fn size(&self) -> usize {
+ self.size as usize
+ }
+}
diff --git a/src/graphics/buffer/builder.rs b/src/graphics/buffer/builder.rs
new file mode 100644
index 0000000..c0bffe8
--- /dev/null
+++ b/src/graphics/buffer/builder.rs
@@ -0,0 +1,193 @@
+use bytemuck::Pod;
+use wgpu::util::DeviceExt;
+
+use tracing::warn;
+
+use crate::{Buffer, GpuHandle};
+
+pub enum BufferInitContent<'a> {
+ /// The buffer will be initialized with the given data
+ Data(&'a [u8]),
+ /// The buffer will be initialized with the given size
+ Size(u64),
+}
+
+pub struct BufferBuilder<'a> {
+ pub gpu: GpuHandle,
+ pub label: Option<&'a str>,
+ pub content: BufferInitContent<'a>,
+ pub usage: wgpu::BufferUsages,
+}
+impl<'a> BufferBuilder<'a> {
+ #[must_use]
+ pub const fn new(gpu: GpuHandle, label: &'a str) -> Self {
+ BufferBuilder {
+ gpu,
+ label: Some(label),
+ content: BufferInitContent::Size(0),
+ usage: wgpu::BufferUsages::empty(),
+ }
+ }
+
+ /// Set a label that GPU debuggers can display
+ pub fn with_label(mut self, label: &'a str) -> Self {
+ self.label = Some(label);
+ self
+ }
+
+ /// The buffer will be initialized with this size
+ /// Mutually exclusive to `with_data`
+ pub fn with_size(mut self, size: u64) -> Self {
+ self.content = BufferInitContent::Size(size);
+ self
+ }
+
+ /// The buffer will be initialized with this content
+ /// Mutually exclusive to `with_size`
+ /// See also [`with_data_slice`]
+ pub fn with_data(mut self, data: &'a T) -> Self
+ where
+ T: Pod,
+ {
+ self.content = BufferInitContent::Data(bytemuck::bytes_of(data));
+ self
+ }
+
+ /// The buffer will be initialized with the contents of the given slice
+ /// Mutually exclusive to `with_size`
+ /// See also [`with_data`]
+ pub fn with_data_slice(mut self, data: &'a [T]) -> Self
+ where
+ T: Pod,
+ {
+ self.content = BufferInitContent::Data(bytemuck::cast_slice(data));
+ self
+ }
+
+ /// Allow a buffer to be the index buffer in a draw operation.
+ pub fn as_index_buffer(mut self) -> Self {
+ self.usage |= wgpu::BufferUsages::INDEX;
+ self
+ }
+
+ /// Allow a buffer to be the vertex buffer in a draw operation.
+ pub fn as_vertex_buffer(mut self) -> Self {
+ self.usage |= wgpu::BufferUsages::VERTEX;
+ self
+ }
+
+ /// Allow a buffer to be a `BufferBindingType::Uniform` inside a bind group.
+ pub fn as_uniform_buffer(mut self) -> Self {
+ self.usage |= wgpu::BufferUsages::UNIFORM;
+ self
+ }
+
+ /// Allow a buffer to be a `BufferBindingType::Storage` inside a bind group.
+ pub fn as_storage_buffer(mut self) -> Self {
+ self.usage |= wgpu::BufferUsages::STORAGE;
+ self
+ }
+
+ /// Allow a buffer to be the indirect buffer in an indirect draw call.
+ pub fn as_indirect_buffer(mut self) -> Self {
+ self.usage |= wgpu::BufferUsages::INDIRECT;
+ self
+ }
+
+ /// See [`wgpu::BufferUsages::MAP_READ`]
+ pub fn allow_map_read(mut self) -> Self {
+ self.usage |= wgpu::BufferUsages::MAP_READ;
+ self
+ }
+
+ /// See [`wgpu::BufferUsages::MAP_WRITE`]
+ pub fn allow_map_write(mut self) -> Self {
+ self.usage |= wgpu::BufferUsages::MAP_WRITE;
+ self
+ }
+
+ /// See [`wgpu::BufferUsages::COPY_DST`]
+ pub fn allow_copy_to(mut self) -> Self {
+ self.usage |= wgpu::BufferUsages::COPY_DST;
+ self
+ }
+
+ /// See [`wgpu::BufferUsages::COPY_SRC`]
+ pub fn allow_copy_from(mut self) -> Self {
+ self.usage |= wgpu::BufferUsages::COPY_SRC;
+ self
+ }
+
+ /// Sets the usage of the buffer
+ /// See also `add_usage` and `rm_usage`
+ pub fn with_usage(mut self, usage: wgpu::BufferUsages) -> Self {
+ self.usage = usage;
+ self
+ }
+ /// Adds the usage flag to the buffer
+ /// See also `with_usage` and `add_usage`
+ pub fn add_usage(mut self, usage: wgpu::BufferUsages) -> Self {
+ self.usage |= usage;
+ self
+ }
+ /// Removes the usage flag from the buffer
+ /// See also `with_usage` and `add_usage`
+ pub fn rm_usage(mut self, usage: wgpu::BufferUsages) -> Self {
+ self.usage &= !usage;
+ self
+ }
+
+ // This is used by build() and build_and_map() for our convenience
+ fn build_impl(&self, mapped_at_creation: bool) -> (wgpu::Buffer, u64) {
+ match self.content {
+ BufferInitContent::Data(data) => {
+ if mapped_at_creation {
+ warn!("mapped a buffer on creation, but it is already being initialized with data");
+ }
+ (
+ self.gpu
+ .device
+ .create_buffer_init(&wgpu::util::BufferInitDescriptor {
+ label: self.label,
+ usage: self.usage,
+ contents: data,
+ }),
+ data.len() as u64,
+ )
+ }
+ BufferInitContent::Size(size) => (
+ self.gpu.device.create_buffer(&wgpu::BufferDescriptor {
+ label: self.label,
+ size,
+ usage: self.usage,
+ mapped_at_creation,
+ }),
+ size,
+ ),
+ }
+ }
+
+ /// Creates the buffer
+ #[must_use]
+ pub fn build(&self) -> Buffer {
+ let (inner, size) = self.build_impl(false);
+
+ Buffer {
+ inner,
+ gpu: self.gpu.clone(),
+ size,
+ }
+ }
+
+ /// Allows a buffer to be mapped immediately after they are made.
+ #[must_use]
+ pub fn build_and_map(&self) -> Buffer {
+ let (inner, size) = self.build_impl(true);
+
+ Buffer {
+ inner,
+ gpu: self.gpu.clone(),
+ size,
+ }
+ }
+}
diff --git a/src/graphics/buffer/view.rs b/src/graphics/buffer/view.rs
new file mode 100644
index 0000000..361c342
--- /dev/null
+++ b/src/graphics/buffer/view.rs
@@ -0,0 +1,37 @@
+use std::ops::Deref;
+
+/// Utility struct for creating a `BufferView` that auto unmaps when dropped.
+pub struct ScopedBufferView<'a> {
+ /// The inner buffer view.
+ /// This is `Option` because it needs to be dropped before the buffer can be
+ /// unmapped, however it will not be `None` until the scope is dropped.
+ buffer_view: Option>,
+ /// Reference to the Buffer so we can unmap it when the view is dropped.
+ buffer: &'a wgpu::Buffer,
+}
+impl<'a> ScopedBufferView<'a> {
+ pub fn new(
+ buffer: &'a wgpu::Buffer,
+ buffer_view: wgpu::BufferView<'a>,
+ ) -> ScopedBufferView<'a> {
+ // Wrap in Option
+ let buffer_view = Some(buffer_view);
+ ScopedBufferView {
+ buffer_view,
+ buffer,
+ }
+ }
+}
+impl<'a> Deref for ScopedBufferView<'a> {
+ type Target = wgpu::BufferView<'a>;
+ fn deref(&self) -> &Self::Target {
+ // We can always unwrap this
+ self.buffer_view.as_ref().unwrap()
+ }
+}
+impl Drop for ScopedBufferView<'_> {
+ fn drop(&mut self) {
+ self.buffer_view = None;
+ self.buffer.unmap();
+ }
+}
diff --git a/src/graphics/error.rs b/src/graphics/error.rs
new file mode 100644
index 0000000..dd20f08
--- /dev/null
+++ b/src/graphics/error.rs
@@ -0,0 +1,21 @@
+#[non_exhaustive]
+#[derive(Debug)]
+pub enum GpuError {
+ AdapterNone,
+ ShaderParseError,
+ RequestDeviceError(wgpu::RequestDeviceError),
+ DisplayNone,
+ SurfaceError(wgpu::SurfaceError),
+ BufferAsyncError,
+ QueryNone,
+}
+impl std::fmt::Display for GpuError {
+ fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::result::Result<(), std::fmt::Error> {
+ write!(f, "{:?}", self)
+ }
+}
+impl std::error::Error for GpuError {}
+
+/// Generic error type for any error.
+/// Recommended to use with terminal errors only, which are expected to be displayed and not handled.
+pub type BoxError = Box;
diff --git a/src/graphics/gpu.rs b/src/graphics/gpu.rs
new file mode 100644
index 0000000..60b16ac
--- /dev/null
+++ b/src/graphics/gpu.rs
@@ -0,0 +1,112 @@
+mod builder;
+pub use builder::GpuBuilder;
+
+pub use wgpu::Backends;
+
+use crate::{BufferBuilder, GpuError, Profiler, ViewportBuilder};
+use std::{ops::Deref, rc::Rc};
+use winit::window::Window;
+
+/// The HW GPU context which contains all wgpu context info.
+/// This is meant as an easier and more ergonomic way to pass around wgpu info.
+/// You can manually construct this with fields but it is recommended to use the [builder].
+///
+/// [builder]: Gpu::builder()
+pub struct Gpu {
+ /// This is the instance for wgpu itself. We shouldn't need more than 1 in the
+ /// life of a program.
+ pub instance: wgpu::Instance,
+ /// This is the adapter, representing the physical device.
+ pub adapter: wgpu::Adapter,
+ pub device: wgpu::Device,
+ pub queue: wgpu::Queue,
+ pub profiler: Profiler,
+}
+impl Gpu {
+ /// An alias for `GpuBuilder::new()`
+ #[must_use]
+ pub fn builder<'a>() -> GpuBuilder<'a> {
+ GpuBuilder::default()
+ }
+
+ /// Converts the Gpu into a `GpuHandle` which can be passed around by clone
+ #[must_use]
+ pub fn to_handle(self) -> GpuHandle {
+ GpuHandle {
+ context: Rc::new(self),
+ }
+ }
+}
+
+/// A struct that wraps over `Rc` which can be passed around by clone.
+/// Because this is a `Rc`, it will automatically be freed when there are no
+/// more references to it. It follows that any struct with a `GpuHandle` will be
+/// always be guaranteed a valid reference to the `Gpu`.
+#[derive(Clone)]
+pub struct GpuHandle {
+ context: Rc,
+}
+impl GpuHandle {
+ /// Create a Viewport for displaying to the given window.
+ // Lifetime `a`: The reference Gpu and Window must outlive ViewportBuilder
+ #[must_use]
+ pub fn create_viewport(&self, window: Window) -> ViewportBuilder {
+ ViewportBuilder::new(self.clone(), window)
+ }
+
+ #[must_use]
+ pub fn create_buffer<'a>(&self, label: &'a str) -> BufferBuilder<'a> {
+ BufferBuilder::new(self.clone(), label)
+ }
+
+ #[must_use]
+ pub fn create_pipeline<'a>(&self) -> crate::pipeline::PipelineBuilder<'a> {
+ crate::pipeline::PipelineBuilder::new(self.clone())
+ }
+
+ pub fn create_command_encoder(&self, label: &str) -> wgpu::CommandEncoder {
+ self.device
+ .create_command_encoder(&wgpu::CommandEncoderDescriptor { label: Some(label) })
+ }
+
+ pub(crate) fn begin_profiler_section<'a>(
+ &self,
+ label: &str,
+ encoder: &'a mut wgpu::CommandEncoder,
+ ) {
+ self.profiler.begin_section(label);
+ self.profiler.timestamp(label, encoder);
+ }
+
+ pub(crate) fn begin_pipeline_statistics_query(&self, render_pass: &mut wgpu::RenderPass) {
+ self.profiler.begin_stats(render_pass);
+ }
+
+ pub fn total_statistics(&self) -> Result<[u64; 5], GpuError> {
+ // Get the QuerySet from the profiler
+ let stats = self.profiler.stats.as_ref().ok_or(GpuError::QueryNone)?;
+
+ let mut ret = [0; 5];
+
+ for (i, stat) in stats
+ .get(&self.device, self.profiler.query_count())?
+ .iter()
+ .enumerate()
+ {
+ ret[i % 5] += stat;
+ }
+
+ Ok(ret)
+ }
+
+ pub fn timestamp_report(&self) -> Vec<(String, f32)> {
+ self.profiler.timestamp_report(&self.device)
+ }
+}
+
+impl Deref for GpuHandle {
+ type Target = Gpu;
+ fn deref(&self) -> &Self::Target {
+ &self.context
+ }
+}
diff --git a/src/graphics/gpu/builder.rs b/src/graphics/gpu/builder.rs
new file mode 100644
index 0000000..f73e1c9
--- /dev/null
+++ b/src/graphics/gpu/builder.rs
@@ -0,0 +1,187 @@
+use futures::executor::block_on;
+use raw_window_handle::{HasRawWindowHandle, RawWindowHandle};
+
+use crate::{
+ graphics::{Gpu, GpuError, GpuHandle},
+ Profiler,
+};
+
+#[derive(Clone)]
+/// Builder for `GpuContext`.
+/// By default this is initialized with sensible values for our use case.
+pub struct GpuBuilder<'a> {
+ /// The backends that wgpu should use.
+ /// By default, this is only the PRIMARY backends, which have first-class support.
+ /// You can alternatively specify individual backends such as `VULKAN` or `DX12`.
+ backends: wgpu::Backends,
+ /// The power preference for the adapter.
+ /// This defaults to `HighPerformance` but can be set to use `LowPower`.
+ power_preference: wgpu::PowerPreference,
+ /// The device limits.
+ limits: wgpu::Limits,
+ /// The features that the device must support.
+ features: wgpu::Features,
+ /// The features that the device can optionally support.
+ optional_features: wgpu::Features,
+ /// The optional output trace path for wgpu
+ trace_path: Option<&'a std::path::Path>,
+ /// The label for this context.
+ label: Option<&'a str>,
+ /// The texture format of the swapchain.
+ swapchain_format: wgpu::TextureFormat,
+}
+impl Default for GpuBuilder<'_> {
+ fn default() -> Self {
+ Self::new()
+ }
+}
+impl<'a> GpuBuilder<'a> {
+ /// Create a `GpuBuilder` with sensible defaults.
+ fn new() -> Self {
+ Self {
+ backends: wgpu::Backends::PRIMARY,
+ power_preference: wgpu::PowerPreference::HighPerformance,
+ limits: wgpu::Limits::default(),
+ label: None,
+ swapchain_format: crate::DEFAULT_SWAP_CHAIN_FORMAT,
+ features: wgpu::Features::default(),
+ optional_features: wgpu::Features::empty(),
+ trace_path: None,
+ }
+ }
+ /// Sets the backends that wgpu should use.
+ pub fn with_backends(mut self, backends: wgpu::Backends) -> Self {
+ self.backends = backends;
+ self
+ }
+
+ /// Sets the power preference for the adapter.
+ pub fn with_power_preference(mut self, power_preference: wgpu::PowerPreference) -> Self {
+ self.power_preference = power_preference;
+ self
+ }
+
+ /// Sets the limits for the device.
+ /// There is certainly a better way to do this.
+ pub fn with_limits(mut self, limits: wgpu::Limits) -> Self {
+ self.limits = limits;
+ self
+ }
+
+ /// Sets the label for the device.
+ /// Can be used with `&String` argument.
+ /// The argument must outlive the builder.
+ pub fn with_label(mut self, label: &'a str) -> Self {
+ self.label = Some(label);
+ self
+ }
+
+ /// Sets the format for the swapchain.
+ pub fn with_swapchain_format(mut self, format: wgpu::TextureFormat) -> Self {
+ self.swapchain_format = format;
+ self
+ }
+
+ /// Sets the features that the device must support.
+ pub fn with_features(mut self, features: wgpu::Features) -> Self {
+ self.features = features;
+ self
+ }
+
+ /// Sets the features that the device can optionally support.
+ pub fn with_optional_features(mut self, features: wgpu::Features) -> Self {
+ self.optional_features = features;
+ self
+ }
+
+ /// Enables the GPU profiler by setting the corresponding feature flags
+ pub fn with_profiler(mut self) -> Self {
+ self.features |= wgpu::Features::TIMESTAMP_QUERY;
+ self.features |= wgpu::Features::PIPELINE_STATISTICS_QUERY;
+ self
+ }
+
+ /// Sets the output trace path for wgpu
+ pub fn with_trace_path(mut self, path: &'a std::path::Path) -> Self {
+ self.trace_path = Some(path);
+ self
+ }
+
+ /// Shorthand for build_windowed_sync().
+ /// # Errors
+ /// Errors when the inner build() fails.
+ pub fn build(self, window: &W) -> Result
+ where
+ W: HasRawWindowHandle,
+ {
+ block_on(self.build_impl(Some(window)))
+ }
+
+ pub async fn build_headless(self) -> Result {
+ self.build_impl::(None).await
+ }
+
+ /// Build the `GpuContext` from the builder.
+ /// Use `build_sync` for synchronous.
+ /// # Errors
+ /// Errors when a connection to the GPU could not be established.
+ pub async fn build_impl(self, window: Option<&W>) -> Result
+ where
+ W: HasRawWindowHandle,
+ {
+ // Create the wgpu instance.
+ let instance = wgpu::Instance::new(self.backends);
+
+ // Create a surface to test compatibility, if there is a window.
+ // Note that it is illegal to create a swapchain to the same surface twice,
+ // however since we are only creating the surface and not the swapchain,
+ // this is not an issue.
+ let compatible_surface = window.map(|w| unsafe { instance.create_surface(w) });
+
+ // Initialize the adapter (physical device).
+ let adapter = instance
+ .request_adapter(&wgpu::RequestAdapterOptions {
+ power_preference: self.power_preference,
+ compatible_surface: compatible_surface.as_ref(),
+ force_fallback_adapter: false,
+ })
+ .await
+ .ok_or(GpuError::AdapterNone)?;
+
+ // Create the `device` (and get the handle for the command queue `queue`)
+ let (device, queue) = adapter
+ .request_device(
+ &wgpu::DeviceDescriptor {
+ limits: self.limits.clone(),
+ label: self.label,
+ features: self.features(&adapter),
+ },
+ self.trace_path,
+ )
+ .await
+ .map_err(GpuError::RequestDeviceError)?;
+
+ let profiler = Profiler::new(&device, &queue);
+
+ let gpu = Gpu {
+ instance,
+ adapter,
+ device,
+ queue,
+ profiler,
+ };
+
+ Ok(gpu.to_handle())
+ }
+
+ fn features(&self, adapter: &wgpu::Adapter) -> wgpu::Features {
+ self.features | (self.optional_features & adapter.features())
+ }
+}
+
+struct NoWindow;
+unsafe impl HasRawWindowHandle for NoWindow {
+ fn raw_window_handle(&self) -> RawWindowHandle {
+ unsafe { std::mem::zeroed() }
+ }
+}
diff --git a/src/graphics/pipeline.rs b/src/graphics/pipeline.rs
new file mode 100644
index 0000000..70dfa43
--- /dev/null
+++ b/src/graphics/pipeline.rs
@@ -0,0 +1,23 @@
+use std::ops::Deref;
+
+use crate::GpuHandle;
+
+mod builder;
+pub use builder::PipelineBuilder;
+
+pub struct Pipeline {
+ pub gpu: GpuHandle,
+ inner: wgpu::RenderPipeline,
+}
+
+impl Deref for Pipeline {
+ type Target = wgpu::RenderPipeline;
+
+ fn deref(&self) -> &Self::Target {
+ &self.inner
+ }
+}
+
+pub trait RenderPipeline {
+ fn render();
+}
diff --git a/src/graphics/pipeline/builder.rs b/src/graphics/pipeline/builder.rs
new file mode 100644
index 0000000..32641d9
--- /dev/null
+++ b/src/graphics/pipeline/builder.rs
@@ -0,0 +1,289 @@
+use std::borrow::Cow;
+
+use wgpu::ShaderModuleDescriptor;
+use wgpu::ShaderSource;
+
+use crate::GpuError;
+use crate::GpuHandle;
+
+use crate::Pipeline;
+
+pub struct PipelineBuilder<'a> {
+ /// Handle to the Gpu
+ gpu: GpuHandle,
+ /// Data that is used to build the pipeline
+ /// This is a seperate struct to take advantage of Default trait derivation
+ desc: PipelineDescriptor<'a>,
+
+ /// SPIR-V bytes for the vertex shader
+ vertex: ShaderModuleDescriptor<'a>,
+ /// SPIR-V bytes for the fragment shader.
+ /// This is optional
+ fragment: Option>,
+ vertex_entry: &'a str,
+ fragment_entry: &'a str,
+ fragment_targets: &'a [wgpu::ColorTargetState],
+}
+
+#[derive(Default)]
+struct PipelineDescriptor<'a> {
+ label: Option<&'a str>,
+ // PIPELINE LAYOUT
+ /// Bind groups that this pipeline uses. The first entry will provide all the bindings for
+ /// "set = 0", second entry will provide all the bindings for "set = 1" etc.
+ bind_group_layouts: &'a [&'a wgpu::BindGroupLayout],
+ /// Set of push constant ranges this pipeline uses. Each shader stage that uses push constants
+ /// must define the range in push constant memory that corresponds to its single `layout(push_constant)`
+ /// uniform block.
+ /// Requires [`Features::PUSH_CONSTANTS`].
+ push_constant_ranges: &'a [wgpu::PushConstantRange],
+ // RENDER PIPELINE
+ /// Primitive type the input mesh is composed of. Has Default.
+ primitive: wgpu::PrimitiveState,
+ /// Describes the depth/stencil state in a render pipeline. Optional.
+ depth_stencil: Option,
+ multisample: wgpu::MultisampleState,
+ vertex_layouts: &'a [wgpu::VertexBufferLayout<'a>],
+}
+impl PipelineBuilder<'_> {
+ pub fn make_spirv(bytes: &[u8]) -> Result {
+ // HACK: This is a workaround for wgpu's spirv parsing. It will panic if the bytes
+ // are not valid SPIR-V instead of returning a Result.
+ // But even using catch_unwind the panic will be logged in stdout. So we're
+ // registering a custom panic hook to suppress the output for this function.
+
+ // This is *potentially* dangerous since make_spirv() could panic for other reasons.
+ // TODO: Check the data length and magic number here before calling make_spirv().
+ // That will allow us to remove the panic code.
+
+ // First we save the current hook
+ let prev_hook = std::panic::take_hook();
+ // Now we register our own hook which does nothing
+ std::panic::set_hook(Box::new(|_| {}));
+ // Now we try to parse the bytes, and if it panics, we return an error instead of panicking
+ let result = std::panic::catch_unwind(|| wgpu::util::make_spirv(bytes))
+ .map_err(|_| GpuError::ShaderParseError);
+ // Now we restore the previous hook
+ std::panic::set_hook(prev_hook);
+ // Return the result
+ result
+ }
+
+ // FIXME: This is so scuffed
+ pub fn make_spirv_owned<'f>(mut vec8: Vec) -> Result, GpuError> {
+ // I copy-pasted this code from StackOverflow without reading the answer
+ // surrounding it that told me to write a comment explaining why this code
+ // is actually safe for my own use case.
+ let vec32 = unsafe {
+ let ratio = std::mem::size_of::() / std::mem::size_of::();
+
+ let length = vec8.len() * ratio;
+ let capacity = vec8.capacity() * ratio;
+ let ptr = vec8.as_mut_ptr() as *mut u32;
+
+ // Don't run the destructor for vec32
+ std::mem::forget(vec8);
+
+ // Construct new Vec
+ Vec::from_raw_parts(ptr, length, capacity)
+ };
+ Ok(ShaderSource::SpirV(Cow::Owned(vec32)))
+ }
+
+ pub fn make_wgsl(wgsl: &str) -> Result {
+ Ok(ShaderSource::Wgsl(Cow::Borrowed(wgsl)))
+ }
+
+ pub fn make_wgsl_owned<'f>(wgsl: String) -> Result, GpuError> {
+ Ok(ShaderSource::Wgsl(Cow::Owned(wgsl)))
+ }
+
+ pub fn shader_auto_load(path: &str) -> Result {
+ if let Ok(spirv) = Self::make_spirv_owned(std::fs::read(path).unwrap()) {
+ Ok(spirv)
+ } else if let Ok(wgsl) = Self::make_wgsl_owned(std::fs::read_to_string(path).unwrap()) {
+ Ok(wgsl)
+ } else {
+ Err(GpuError::ShaderParseError)
+ }
+ }
+
+ pub fn shader_auto(bytes: &[u8]) -> Result {
+ if let Ok(spirv) = Self::make_spirv(bytes) {
+ Ok(spirv)
+ } else if let Ok(wgsl) = Self::make_wgsl(Self::str_from_bytes(bytes)?) {
+ Ok(wgsl)
+ } else {
+ Err(GpuError::ShaderParseError)
+ }
+ }
+}
+impl<'a> PipelineBuilder<'a> {
+ pub fn new(gpu: GpuHandle) -> Self {
+ let vertex = wgpu::util::make_spirv(include_bytes!("../../shader/screen.vert.spv"));
+ let fragment = wgpu::util::make_spirv(include_bytes!("../../shader/uv.frag.spv"));
+
+ let vertex = ShaderModuleDescriptor {
+ label: Some("Default vertex shader"),
+ source: vertex,
+ };
+ let fragment = Some(ShaderModuleDescriptor {
+ label: Some("Default fragment shader"),
+ source: fragment,
+ });
+
+ Self {
+ gpu,
+ desc: PipelineDescriptor::default(),
+ vertex,
+ fragment,
+ vertex_entry: "main",
+ fragment_entry: "main",
+ fragment_targets: &[wgpu::ColorTargetState {
+ format: crate::DEFAULT_SWAP_CHAIN_FORMAT,
+ blend: Some(wgpu::BlendState::ALPHA_BLENDING),
+ write_mask: wgpu::ColorWrites::ALL,
+ }],
+ }
+ }
+ /// Set the vertex buffer layouts
+ pub fn with_vertex_layouts(mut self, layouts: &'a [wgpu::VertexBufferLayout<'a>]) -> Self {
+ self.desc.vertex_layouts = layouts;
+ self
+ }
+
+ fn str_from_bytes(bytes: &[u8]) -> Result<&str, GpuError> {
+ std::str::from_utf8(bytes).map_err(|_| GpuError::ShaderParseError)
+ }
+
+ /// Load the vertex shader from file path.
+ /// See `with_vertex()` for loading static bytes.
+ pub fn load_vertex(mut self, path: &'a str) -> Self {
+ self.vertex.source = Self::shader_auto_load(path).expect("Load vertex shader");
+ self
+ }
+ /// Load the vertex shader from bytes.
+ /// This is convenient for static bytes. If you want to load from a file, at
+ /// runtime, see load_vertex()
+ pub fn with_vertex(mut self, bytes: &'a [u8]) -> Self {
+ self.vertex.source = Self::shader_auto(bytes).expect("Parse vertex shader");
+ self
+ }
+
+ /// Load the fragment shader from bytes.
+ /// This is convenient for static bytes. If you want to load from a file, at
+ /// runtime, see load_fragment()
+ pub fn with_fragment(mut self, bytes: &'static [u8]) -> Self {
+ self.fragment = Some(ShaderModuleDescriptor {
+ label: Some("Default fragment shader"),
+ source: Self::shader_auto(bytes).expect("Parse fragment shader"),
+ });
+ self
+ }
+
+ /// Convenience method for with_vertex() + with_fragment()
+ /// This also sets the entry points to vs_main and fs_main respectively.
+ pub fn with_vertex_fragment(mut self, bytes: &'static [u8]) -> Self {
+ self.vertex_entry = "vs_main";
+ self.fragment_entry = "fs_main";
+ self.with_vertex(bytes).with_fragment(bytes)
+ }
+
+ /// Optional version of with_fragment_bytes(), for use in macros
+ /// This has no effect if None is provided. To remove the fragment shader,
+ /// use no_fragment() instead.
+ pub fn with_fragment_opt(self, fragment_bytes: Option<&'static [u8]>) -> Self {
+ if let Some(bytes) = fragment_bytes {
+ self.with_fragment(bytes)
+ } else {
+ self
+ }
+ }
+
+ /// Load the fragment shader from file path at runtime.
+ /// See `with_fragment()` for loading static bytes.
+ pub fn load_fragment(mut self, fragment: &'a str) -> Self {
+ self.fragment = Some(ShaderModuleDescriptor {
+ label: Some("Default fragment shader"),
+ source: Self::shader_auto_load(fragment).expect("Load fragment shader"),
+ });
+ self
+ }
+
+ pub fn with_bind_groups(mut self, bind_groups: &'a [&wgpu::BindGroupLayout]) -> Self {
+ self.desc.bind_group_layouts = bind_groups;
+ self
+ }
+
+ /// Cull front faces.
+ /// Front is CCW.
+ pub fn cull_front(mut self) -> Self {
+ self.desc.primitive.cull_mode = Some(wgpu::Face::Front);
+ self
+ }
+
+ /// Cull back faces.
+ /// Back is CW.
+ pub fn cull_back(mut self) -> Self {
+ self.desc.primitive.cull_mode = Some(wgpu::Face::Back);
+ self
+ }
+
+ #[must_use]
+ pub fn build(&self) -> Pipeline {
+ // Create vertex module
+ let vertex_module = self.gpu.device.create_shader_module(&self.vertex);
+
+ // Create shader module
+ let fragment_module = self
+ .fragment
+ .as_ref()
+ .map(|fragment| self.gpu.device.create_shader_module(fragment));
+
+ // Map fragment state if Some() otherwise it is None
+ let fragment = fragment_module
+ .as_ref()
+ .map(|fs_module| wgpu::FragmentState {
+ module: fs_module,
+ entry_point: self.fragment_entry,
+ targets: self.fragment_targets,
+ });
+
+ // The pipeline layout
+ let layout = self
+ .gpu
+ .device
+ .create_pipeline_layout(&wgpu::PipelineLayoutDescriptor {
+ label: self.label_suffix("pipeline layout").as_deref(),
+ bind_group_layouts: self.desc.bind_group_layouts,
+ push_constant_ranges: self.desc.push_constant_ranges,
+ });
+
+ // Create the pipeline
+ let pipeline = self
+ .gpu
+ .device
+ .create_render_pipeline(&wgpu::RenderPipelineDescriptor {
+ layout: Some(&layout),
+ label: self.desc.label,
+ vertex: wgpu::VertexState {
+ module: &vertex_module,
+ entry_point: self.vertex_entry,
+ buffers: self.desc.vertex_layouts,
+ },
+ primitive: self.desc.primitive,
+ depth_stencil: self.desc.depth_stencil.clone(),
+ multisample: self.desc.multisample,
+ fragment,
+ });
+ Pipeline {
+ gpu: self.gpu.clone(),
+ inner: pipeline,
+ }
+ }
+
+ /// Helper function to append a suffix to the label, if Some
+ fn label_suffix(&self, suffix: &str) -> Option {
+ self.desc.label.map(|label| format!("{} {}", label, suffix))
+ }
+}
diff --git a/src/graphics/profiler.rs b/src/graphics/profiler.rs
new file mode 100644
index 0000000..e06097e
--- /dev/null
+++ b/src/graphics/profiler.rs
@@ -0,0 +1,148 @@
+use std::{
+ cell::{Cell, RefCell},
+ mem::size_of,
+};
+
+mod marker;
+
+mod section;
+
+mod queryset;
+pub use queryset::QuerySet;
+
+pub const MAX_QUERIES: u32 = wgpu::QUERY_SET_MAX_QUERIES;
+pub const MAX_BUFFER_SIZE: u64 = MAX_QUERIES as u64 * size_of::() as u64;
+
+pub const PIPELINE_STATISTICS_LABELS: [&str; 5] = [
+ "Vertex shader invocations",
+ "Clipper invokations",
+ "Clipper primitives out",
+ "Fragment shader invocations",
+ "Compute shader invocations",
+];
+
+pub struct Profiler {
+ /// 64-bit number indicating the GPU-timestamp where all previous commands have finished executing
+ pub(crate) timestamp: Option,
+ pub(crate) stats: Option,
+ /// The amount of nanoseconds each tick of a timestamp query represents
+ pub timestamp_period: f32,
+ markers: RefCell>,
+ resolved: Cell,
+}
+
+impl Profiler {
+ #[allow(clippy::cast_possible_truncation)]
+ pub fn query_count(&self) -> u32 {
+ self.markers.borrow().len() as u32
+ }
+
+ #[allow(clippy::cast_possible_truncation)]
+ pub fn query_index(&self) -> u32 {
+ self.query_count() - 1
+ }
+
+ #[must_use]
+ pub fn new(device: &wgpu::Device, queue: &wgpu::Queue) -> Self {
+ // Timestamp period is multiplied by the time span to get the duration in nanoseconds
+ let timestamp_period = queue.get_timestamp_period();
+
+ // QuerySet availability is based on device's feature support.
+ // If you want to opt in or out of a query set then do so in the feature set.
+ let timestamp = device
+ .features()
+ .contains(wgpu::Features::TIMESTAMP_QUERY)
+ .then(|| QuerySet::new_timestamp(device, MAX_QUERIES));
+
+ let stats = device
+ .features()
+ .contains(wgpu::Features::PIPELINE_STATISTICS_QUERY)
+ .then(|| QuerySet::new_stats(device, MAX_QUERIES));
+
+ Self {
+ timestamp,
+ stats,
+ timestamp_period,
+ markers: RefCell::new(Vec::new()),
+ resolved: Cell::new(false),
+ }
+ }
+
+ pub(crate) fn begin_section(&self, label: &str) {
+ self.markers.borrow_mut().push(label.to_string());
+ }
+
+ pub fn timestamp(&self, _label: &str, encoder: &mut wgpu::CommandEncoder) {
+ if let Some(ts_qs) = &self.timestamp {
+ encoder.write_timestamp(ts_qs, self.query_index());
+ }
+ }
+
+ pub fn begin_stats(&self, render_pass: &mut wgpu::RenderPass) {
+ if let Some(stats_qs) = &self.stats {
+ render_pass.begin_pipeline_statistics_query(stats_qs, self.query_index());
+ }
+ }
+
+ pub fn end_stats(&self, render_pass: &mut wgpu::RenderPass) {
+ if self.stats.is_some() {
+ render_pass.end_pipeline_statistics_query();
+ }
+ }
+
+ /// Must be called before get()
+ pub fn resolve(&self, encoder: &mut wgpu::CommandEncoder) {
+ self.stats
+ .as_ref()
+ .unwrap()
+ .resolve(self.query_count(), encoder);
+ // if !self.resolved.replace(true) {
+ // // If replace() returns false then the query set still needs to be resolved
+ // self.foreach_query_set(|query_set| query_set.resolve(self.query_count(), encoder));
+ // }
+ }
+
+ pub fn timestamp_report(&self, device: &wgpu::Device) -> Vec<(String, f32)> {
+ let mut ret = vec![];
+ if let Some(timestamp) = &self.timestamp {
+ if let Ok(val) = timestamp.get(device, self.query_count()) {
+ for (i, marker) in self.markers.borrow()[1..].iter().enumerate() {
+ let start = val[i];
+ let end = val[i + 1];
+ let duration = self.ts_to_millis(end - start);
+ // println!("{} took {} ms", marker, duration / 1_000_000.0);
+ ret.push((marker.clone(), duration / 1_000_000.0));
+ }
+ }
+ };
+
+ // if let Some(stats) = &self.stats {
+ // if let Ok(val) = stats.get(device, self.query_count()) {
+ // println!("Stats: {:#?}", val);
+ // }
+ // };
+
+ ret
+ }
+
+ #[deprecated]
+ #[allow(dead_code)]
+ fn foreach_query_set(&self, mut f: F) -> Vec
+ where
+ F: FnMut(&QuerySet) -> T,
+ {
+ let sets = &[self.timestamp.as_ref(), self.stats.as_ref()];
+ let iter = sets.iter().flatten();
+ iter.map(|q| f(*q)).collect::>()
+ }
+
+ pub fn clear(&self) {
+ self.markers.borrow_mut().clear();
+ self.resolved.set(false);
+ }
+
+ #[allow(clippy::cast_precision_loss)]
+ fn ts_to_millis(&self, ts: u64) -> f32 {
+ ts as f32 * self.timestamp_period / 1_000_000.0
+ }
+}
diff --git a/src/graphics/profiler/marker.rs b/src/graphics/profiler/marker.rs
new file mode 100644
index 0000000..d84ec73
--- /dev/null
+++ b/src/graphics/profiler/marker.rs
@@ -0,0 +1,5 @@
+#[derive(Debug)]
+pub struct TimestampMarker<'a> {
+ pub label: &'a str,
+ pub duration: f32,
+}
diff --git a/src/graphics/profiler/queryset.rs b/src/graphics/profiler/queryset.rs
new file mode 100644
index 0000000..d98a4b2
--- /dev/null
+++ b/src/graphics/profiler/queryset.rs
@@ -0,0 +1,124 @@
+use std::{mem::size_of, ops::Deref};
+
+use futures::executor::block_on;
+
+use crate::QUERYSET_BUFFER_USAGE;
+use crate::{GpuError, ScopedBufferView};
+
+pub struct QuerySet {
+ pub(crate) inner: wgpu::QuerySet,
+ pub(crate) buffer: wgpu::Buffer,
+ pub(crate) ty: wgpu::QueryType,
+ // /// Currently unused.
+ // pub(crate) query_count: u32,
+}
+/// Allows deref of a `QuerySet` to the inner `wgpu::QuerySet`.
+impl Deref for QuerySet {
+ type Target = wgpu::QuerySet;
+ fn deref(&self) -> &Self::Target {
+ &self.inner
+ }
+}
+
+impl QuerySet {
+ /// Creates a new `QuerySet` that times various events.
+ pub(crate) fn new_timestamp(device: &wgpu::Device, count: u32) -> Self {
+ let ty = wgpu::QueryType::Timestamp;
+
+ let label = Some("Timestamp QuerySet");
+ Self::new_impl(device, ty, label, count)
+ }
+
+ /// Creates a new `QuerySet` that counts various pipeline events.
+ /// See `QueryType::PipelineStatistics`
+ ///
+ /// ! For now, this always uses all pipeline stat types.
+ pub(crate) fn new_stats(device: &wgpu::Device, count: u32) -> Self {
+ // Use all pipeline stat types
+ let all = wgpu::PipelineStatisticsTypes::all();
+ let ty = wgpu::QueryType::PipelineStatistics(all);
+
+ let label = Some("PipelineStatistics QuerySet");
+ Self::new_impl(device, ty, label, count)
+ }
+
+ /// Makes the wgpu calls to create the query set.
+ fn new_impl(
+ device: &wgpu::Device,
+ ty: wgpu::QueryType,
+ label: Option<&str>,
+ count: u32,
+ ) -> Self {
+ // Size of a single query result in u64s.
+ let query_size = query_ty_size(ty);
+ let buffer_size = query_size * count * size_of::() as u32;
+
+ // Create the query set
+ let inner = device.create_query_set(&wgpu::QuerySetDescriptor { ty, count, label });
+
+ let buffer = device.create_buffer(&wgpu::BufferDescriptor {
+ label,
+ size: buffer_size as u64,
+ usage: QUERYSET_BUFFER_USAGE,
+ mapped_at_creation: false,
+ });
+ QuerySet {
+ ty,
+ inner,
+ buffer,
+ // query_count: 0,
+ }
+ }
+
+ pub fn query_size(&self) -> u32 {
+ query_ty_size(self.ty)
+ }
+
+ pub fn resolve(&self, count: u32, encoder: &mut wgpu::CommandEncoder) {
+ encoder.resolve_query_set(self, 0..count * self.query_size(), &self.buffer, 0)
+ }
+
+ /// Must first call resolve()
+ pub fn get(&self, device: &wgpu::Device, count: u32) -> Result, GpuError> {
+ // Check to see if there aren't any markers
+ if count == 0 {
+ // This is not an error
+ // We return an empty array to avoid mapping the buffer which we already know is empty
+ return Ok(Vec::new());
+ }
+
+ // Map the buffer for reading
+ // ? Maybe we can save a local copy of the last data we read so we can avoid mapping the buffer multiple times
+ let slice = self
+ .buffer
+ .slice(..size_of::() as u64 * (count * self.query_size()) as u64);
+ let mapping = slice.map_async(wgpu::MapMode::Read);
+ device.poll(wgpu::Maintain::Wait);
+ block_on(mapping).map_err(|_| GpuError::BufferAsyncError)?;
+ let view = slice.get_mapped_range();
+
+ let view = ScopedBufferView::new(&self.buffer, view);
+ let timestamps: &[u64] = bytemuck::cast_slice(&view);
+
+ // dbg!(×tamps);
+
+ Ok(timestamps.to_vec())
+ }
+}
+
+/// The size (in u64s) of a single query result given the query type.
+fn query_ty_size(ty: wgpu::QueryType) -> u32 {
+ // Size of a single query.
+ match ty {
+ wgpu::QueryType::PipelineStatistics(ty) => num_bits_set(ty.bits()),
+ _ => 1,
+ }
+}
+
+/// Counts the number of bits set for the input n. Used for bitflags.
+fn num_bits_set(n: N) -> u32
+where
+ N: num_traits::PrimInt,
+{
+ n.count_ones()
+}
diff --git a/src/graphics/profiler/section.rs b/src/graphics/profiler/section.rs
new file mode 100644
index 0000000..3a8461f
--- /dev/null
+++ b/src/graphics/profiler/section.rs
@@ -0,0 +1,21 @@
+#![allow(dead_code)]
+///! Dead code is allowed because this is currently unused
+use crate::Profiler;
+
+/// A convenience struct for creating a profiler section.
+pub struct Section<'a> {
+ pub(crate) profiler: &'a Profiler,
+ pub(crate) label: String,
+}
+impl<'a> Section<'a> {
+ pub fn new(profiler: &'a Profiler, label: &'a str) -> Section<'a> {
+ let label = label.to_string();
+ Section { profiler, label }
+ }
+}
+
+impl Drop for Section<'_> {
+ fn drop(&mut self) {
+ // Unused
+ }
+}
diff --git a/src/graphics/texture.rs b/src/graphics/texture.rs
new file mode 100644
index 0000000..6603ea6
--- /dev/null
+++ b/src/graphics/texture.rs
@@ -0,0 +1,2 @@
+mod view;
+pub use view::*;
diff --git a/src/graphics/texture/view.rs b/src/graphics/texture/view.rs
new file mode 100644
index 0000000..7efdc3d
--- /dev/null
+++ b/src/graphics/texture/view.rs
@@ -0,0 +1,3 @@
+pub struct TextureView {
+ pub inner: wgpu::TextureView,
+}
diff --git a/src/graphics/viewport.rs b/src/graphics/viewport.rs
new file mode 100644
index 0000000..1e91b2b
--- /dev/null
+++ b/src/graphics/viewport.rs
@@ -0,0 +1,274 @@
+mod builder;
+pub use builder::*;
+
+mod frame;
+pub use frame::*;
+
+mod render_pass;
+pub use render_pass::*;
+
+use std::{cell::RefCell, ops::Deref};
+
+use crate::{GpuError, GpuHandle};
+
+pub trait RenderTarget {
+ fn begin_frame(&self) -> Result;
+}
+
+/// A `Viewport` is a rectangular area of that can be presented.
+// * Using RefCell for interior mutability is somewhat suboptimal since it does
+// * have a runtime cost, but since we will not have many viewports or calls
+// * to those RefCells, it should be fine.
+// ? But should we have RefCell anyway? Maybe we should just use external mutability?
+pub struct Viewport {
+ pub gpu: GpuHandle,
+ pub surface: wgpu::Surface,
+ pub window: winit::window::Window,
+ /// The swap chain descriptor contains the size and format of the swap chain texture
+ /// Uses RefCell for interior mutability.
+ pub sc_desc: RefCell,
+ /// Uses RefCell for interior mutability.
+ // pub swap_chain: RefCell,
+ pub depth_texture: RefCell,
+ pub depth_view: RefCell,
+ /// Data buffer for viewport properties.
+ /// Binding 0: viewport size f32x2
+ pub data_buffer: wgpu::Buffer,
+ /// A queued resize. Stored when resize() is called and applied before the next
+ /// swapchain frame is given.
+ /// Uses RefCell for interior mutability.
+ pub resize_to: RefCell