diff --git a/.envrc b/.envrc new file mode 100644 index 0000000..3550a30 --- /dev/null +++ b/.envrc @@ -0,0 +1 @@ +use flake diff --git a/Cargo.lock b/Cargo.lock old mode 100755 new mode 100644 index fa92075..0ef9f09 --- a/Cargo.lock +++ b/Cargo.lock @@ -4,9 +4,9 @@ version = 3 [[package]] name = "ab_glyph" -version = "0.2.23" +version = "0.2.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "80179d7dd5d7e8c285d67c4a1e652972a92de7475beddfb92028c76463b13225" +checksum = "79faae4620f45232f599d9bc7b290f88247a0834162c4495ab2f02d60004adfb" dependencies = [ "ab_glyph_rasterizer", "owned_ttf_parser", @@ -20,75 +20,65 @@ checksum = "c71b1793ee61086797f5c80b6efa2b8ffa6d5dd703f118545808a7f2e27f7046" [[package]] name = "accesskit" -version = "0.12.2" +version = "0.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6cb10ed32c63247e4e39a8f42e8e30fb9442fbf7878c8e4a9849e7e381619bea" +checksum = "6cf780eb737f2d4a49ffbd512324d53ad089070f813f7be7f99dbd5123a7f448" [[package]] name = "accesskit_consumer" -version = "0.16.1" +version = "0.22.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8c17cca53c09fbd7288667b22a201274b9becaa27f0b91bf52a526db95de45e6" +checksum = "3bdfa1638ddd6eb9c752def95568df8b3ad832df252e9156d2eb783b201ca8a9" dependencies = [ "accesskit", + "immutable-chunkmap", ] [[package]] name = "accesskit_macos" -version = "0.10.1" +version = "0.15.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cd3b6ae1eabbfbced10e840fd3fce8a93ae84f174b3e4ba892ab7bcb42e477a7" +checksum = "c236a84ff1111defc280cee755eaa953d0b24398786851b9d28322c6d3bb1ebd" dependencies = [ "accesskit", "accesskit_consumer", - "objc2 0.3.0-beta.3.patch-leaks.3", + "objc2", + "objc2-app-kit", + "objc2-foundation", "once_cell", ] [[package]] name = "accesskit_windows" -version = "0.15.1" +version = "0.20.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "afcae27ec0974fc7c3b0b318783be89fd1b2e66dd702179fe600166a38ff4a0b" +checksum = "5d7f43d24b16b3e76bef248124fbfd2493c3a9860edb5aae1010c890e826de5e" dependencies = [ "accesskit", "accesskit_consumer", - "once_cell", "paste", "static_assertions", - "windows 0.48.0", + "windows 0.54.0", ] [[package]] name = "accesskit_winit" -version = "0.15.0" +version = "0.20.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "88e39fcec2e10971e188730b7a76bab60647dacc973d4591855ebebcadfaa738" +checksum = "755535e6bf711a42dac28b888b884b10fc00ff4010d9d3bd871c5f5beae5aa78" dependencies = [ "accesskit", "accesskit_macos", "accesskit_windows", - "winit 0.28.7", -] - -[[package]] -name = "accesskit_winit" -version = "0.17.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "45f8f7c9f66d454d5fd8e344c8c8c7324b57194e1041b955519fc58a01e77a25" -dependencies = [ - "accesskit", - "accesskit_macos", - "accesskit_windows", - "raw-window-handle 0.6.0", - "winit 0.29.10", + "raw-window-handle", + "winit", ] [[package]] name = "addr2line" -version = "0.21.0" +version = "0.22.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8a30b2e23b9e17a9f90641c7ab1549cd9b44f296d3ccbf309d2863cfe398a0cb" +checksum = "6e4503c46a5c0c7844e948c9a4d6acd9f50cccb4de1c48eb9e291ea17470c678" dependencies = [ "gimli", ] @@ -101,9 +91,9 @@ checksum = "f26201604c87b1e01bd3d98f8d5d9a8fcbb815e8cedb41ffccbeb4bf593a35fe" [[package]] name = "ahash" -version = "0.8.9" +version = "0.8.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d713b3834d76b85304d4d525563c1276e2e30dc97cc67bfb4585a4a29fc2c89f" +checksum = "e89da841a80418a9b391ebaea17f5c112ffaaa96f621d2c285b5174da76b9011" dependencies = [ "cfg-if", "getrandom", @@ -114,29 +104,34 @@ dependencies = [ [[package]] name = "aho-corasick" -version = "1.1.2" +version = "1.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b2969dcb958b36655471fc61f7e416fa76033bdd4bfed0678d8fee1e2d07a1f0" +checksum = "8e60d3430d3a69478ad0993f19238d2df97c507009a52b3c10addcd7f6bcb916" dependencies = [ "memchr", ] +[[package]] +name = "aligned-vec" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4aa90d7ce82d4be67b64039a3d588d38dbcc6736577de4a847025ce5b0c468d1" + [[package]] name = "allocator-api2" -version = "0.2.16" +version = "0.2.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0942ffc6dcaadf03badf6e6a2d0228460359d5e34b57ccdc720b7382dfbd5ec5" +checksum = "5c6cb57a04249c6480766f7f7cef5467412af1490f8d1e243141daddada3264f" [[package]] name = "alsa" -version = "0.7.1" +version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e2562ad8dcf0f789f65c6fdaad8a8a9708ed6b488e649da28c01656ad66b8b47" +checksum = "37fe60779335388a88c01ac6c3be40304d1e349de3ada3b15f7808bb90fa9dce" dependencies = [ "alsa-sys", - "bitflags 1.3.2", + "bitflags 2.6.0", "libc", - "nix 0.24.3", ] [[package]] @@ -151,40 +146,22 @@ dependencies = [ [[package]] name = "android-activity" -version = "0.4.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "64529721f27c2314ced0890ce45e469574a73e5e6fdd6e9da1860eb29285f5e0" -dependencies = [ - "android-properties", - "bitflags 1.3.2", - "cc", - "jni-sys", - "libc", - "log", - "ndk 0.7.0", - "ndk-context", - "ndk-sys 0.4.1+23.1.7779620", - "num_enum 0.6.1", -] - -[[package]] -name = "android-activity" -version = "0.5.2" +version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ee91c0c2905bae44f84bfa4e044536541df26b7703fd0888deeb9060fcc44289" +checksum = "ef6978589202a00cd7e118380c448a08b6ed394c3a8df3a430d0898e3a42d046" dependencies = [ "android-properties", - "bitflags 2.4.2", + "bitflags 2.6.0", "cc", "cesu8", - "jni 0.21.1", + "jni", "jni-sys", "libc", "log", - "ndk 0.8.0", + "ndk 0.9.0", "ndk-context", - "ndk-sys 0.5.0+25.2.9519653", - "num_enum 0.7.2", + "ndk-sys 0.6.0+11769913", + "num_enum", "thiserror", ] @@ -217,9 +194,9 @@ dependencies = [ [[package]] name = "anyhow" -version = "1.0.80" +version = "1.0.86" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5ad32ce52e4161730f7098c077cd2ed6229b5804ccf99e5366be1ab72a98b4e1" +checksum = "b3d1d046238990b9cf5bcde22a3fb3584ee5cf65fb2765f454ed428c7a0063da" [[package]] name = "approx" @@ -230,25 +207,41 @@ dependencies = [ "num-traits", ] +[[package]] +name = "arbitrary" +version = "1.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7d5a26814d8dcb93b0e5a0ff3c6d80a8843bafb21b39e8e18a6f05471870e110" + [[package]] name = "arboard" -version = "3.3.1" +version = "3.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1faa3c733d9a3dd6fbaf85da5d162a2e03b2e0033a90dceb0e2a90fdd1e5380a" +checksum = "9fb4009533e8ff8f1450a5bcbc30f4242a1d34442221f72314bea1f5dc9c7f89" dependencies = [ - "clipboard-win 5.2.0", - "core-graphics 0.23.1", + "clipboard-win 5.3.1", + "core-graphics", "image", "log", - "objc", - "objc-foundation", - "objc_id", + "objc2", + "objc2-app-kit", + "objc2-foundation", "parking_lot", - "thiserror", "windows-sys 0.48.0", "x11rb", ] +[[package]] +name = "arg_enum_proc_macro" +version = "0.3.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0ae92a5119aa49cdbcf6b9f893fe4e1d98b04ccbf82ee0584ad948a44a734dea" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.68", +] + [[package]] name = "arrayref" version = "0.3.7" @@ -288,90 +281,56 @@ dependencies = [ [[package]] name = "async-channel" -version = "1.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "81953c529336010edd6d8e358f886d9581267795c61b19475b71314bffa46d35" -dependencies = [ - "concurrent-queue", - "event-listener 2.5.3", - "futures-core", -] - -[[package]] -name = "async-channel" -version = "2.2.0" +version = "2.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f28243a43d821d11341ab73c80bed182dc015c514b951616cf79bd4af39af0c3" +checksum = "89b47800b0be77592da0afd425cc03468052844aff33b84e33cc696f64e77b6a" dependencies = [ "concurrent-queue", - "event-listener 5.1.0", - "event-listener-strategy 0.5.0", + "event-listener-strategy", "futures-core", "pin-project-lite", ] [[package]] name = "async-executor" -version = "1.8.0" +version = "1.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "17ae5ebefcc48e7452b4987947920dac9450be1110cadf34d1b8c116bdbaf97c" +checksum = "c8828ec6e544c02b0d6691d21ed9f9218d0384a82542855073c2a3f58304aaf0" dependencies = [ - "async-lock 3.3.0", "async-task", "concurrent-queue", - "fastrand 2.0.1", - "futures-lite 2.2.0", + "fastrand", + "futures-lite", "slab", ] [[package]] name = "async-fs" -version = "1.6.0" +version = "2.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "279cf904654eeebfa37ac9bb1598880884924aab82e290aa65c9e77a0e142e06" +checksum = "ebcd09b382f40fcd159c2d695175b2ae620ffa5f3bd6f664131efff4e8b9e04a" dependencies = [ - "async-lock 2.8.0", - "autocfg", + "async-lock", "blocking", - "futures-lite 1.13.0", -] - -[[package]] -name = "async-fs" -version = "2.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bc19683171f287921f2405677dd2ed2549c3b3bda697a563ebc3a121ace2aba1" -dependencies = [ - "async-lock 3.3.0", - "blocking", - "futures-lite 2.2.0", + "futures-lite", ] [[package]] name = "async-lock" -version = "2.8.0" +version = "3.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "287272293e9d8c41773cec55e365490fe034813a2f172f502d6ddcf75b2f582b" +checksum = "ff6e472cdea888a4bd64f342f09b3f50e1886d32afe8df3d663c01140b811b18" dependencies = [ - "event-listener 2.5.3", -] - -[[package]] -name = "async-lock" -version = "3.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d034b430882f8381900d3fe6f0aaa3ad94f2cb4ac519b429692a1bc2dda4ae7b" -dependencies = [ - "event-listener 4.0.3", - "event-listener-strategy 0.4.0", + "event-listener 5.3.1", + "event-listener-strategy", "pin-project-lite", ] [[package]] name = "async-task" -version = "4.7.0" +version = "4.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fbb36e985947064623dbd357f727af08ffd077f93d696782f3c56365fa2e2799" +checksum = "8b75356056920673b02621b35afd0f7dda9306d03c79a30f5c56c44cf256e3de" [[package]] name = "atomic-waker" @@ -381,15 +340,38 @@ checksum = "1505bd5d3d116872e7271a6d4e16d81d0c8570876c8de68093a09ac269d8aac0" [[package]] name = "autocfg" -version = "1.1.0" +version = "1.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0c4b4d0bd25bd0b74681c0ad21497610ce1b7c91b1022cd21c80c6fbdd9476b0" + +[[package]] +name = "av1-grain" +version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa" +checksum = "6678909d8c5d46a42abcf571271e15fdbc0a225e3646cf23762cd415046c78bf" +dependencies = [ + "anyhow", + "arrayvec", + "log", + "nom", + "num-rational", + "v_frame", +] + +[[package]] +name = "avif-serialize" +version = "0.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "876c75a42f6364451a033496a14c44bffe41f5f4a8236f697391f11024e596d2" +dependencies = [ + "arrayvec", +] [[package]] name = "backtrace" -version = "0.3.69" +version = "0.3.73" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2089b7e3f35b9dd2d0ed921ead4f6d318c27680d4a5bd167b3ee120edb105837" +checksum = "5cc23269a4f8976d0a4d2e7109211a419fe30e8d88d677cd60b6bc79c5732e0a" dependencies = [ "addr2line", "cc", @@ -407,198 +389,109 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9d297deb1925b89f2ccc13d7635fa0714f12c87adce1c75356b39ca9b7178567" [[package]] -name = "bevy" -version = "0.12.1" +name = "base64" +version = "0.22.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e4bc7e09282a82a48d70ade0c4c1154b0fd7882a735a39c66766a5d0f4718ea9" -dependencies = [ - "bevy_internal 0.12.1", -] +checksum = "72b3254f16251a8381aa12e40e3c4d2f0199f8c6508fbecb9d91f575e0fbb8c6" [[package]] name = "bevy" -version = "0.13.0" +version = "0.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "611dd99f412e862610adb43e2243b16436c6d8009f6d9dbe8ce3d6d840b34029" +checksum = "8e938630e9f472b1899c78ef84aa907081b23bad8333140e2295c620485b6ee7" dependencies = [ "bevy_dylib", - "bevy_internal 0.13.0", -] - -[[package]] -name = "bevy-inspector-egui" -version = "0.21.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d65b98d6fca1209c36c4d403c377f303aad22d940281fe1a9e431217516f0622" -dependencies = [ - "bevy-inspector-egui-derive", - "bevy_app 0.12.1", - "bevy_asset 0.12.1", - "bevy_core 0.12.1", - "bevy_core_pipeline 0.12.1", - "bevy_ecs 0.12.1", - "bevy_egui 0.23.0", - "bevy_hierarchy 0.12.1", - "bevy_log 0.12.1", - "bevy_math 0.12.1", - "bevy_pbr 0.12.1", - "bevy_reflect 0.12.1", - "bevy_render 0.12.1", - "bevy_utils 0.12.1", - "bevy_window 0.12.1", - "egui 0.23.0", - "image", - "once_cell", - "pretty-type-name", - "smallvec", -] - -[[package]] -name = "bevy-inspector-egui-derive" -version = "0.21.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ec800b7cf98151b5dbff80f0eb6dffcb4bcfceef6e457888b395ead4eb7e75ba" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.50", + "bevy_internal", ] [[package]] name = "bevy_a11y" -version = "0.12.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "68080288c932634f6563d3a8299efe0ddc9ea6787539c4c771ba250d089a94f0" -dependencies = [ - "accesskit", - "bevy_app 0.12.1", - "bevy_derive 0.12.1", - "bevy_ecs 0.12.1", -] - -[[package]] -name = "bevy_a11y" -version = "0.13.0" +version = "0.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5bf80cd6d0dca4073f9b34b16f1d187a4caa035fd841892519431783bbc9e287" +checksum = "3e613f0e7d5a92637e59744f7185e374c9a59654ecc6d7575adcec9581db1363" dependencies = [ "accesskit", - "bevy_app 0.13.0", - "bevy_derive 0.13.0", - "bevy_ecs 0.13.0", + "bevy_app", + "bevy_derive", + "bevy_ecs", ] [[package]] name = "bevy_animation" -version = "0.13.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aa4ef4c35533df3f0c4e938cf6a831456ea563775bab799336f74331140c7665" -dependencies = [ - "bevy_app 0.13.0", - "bevy_asset 0.13.0", - "bevy_core 0.13.0", - "bevy_ecs 0.13.0", - "bevy_hierarchy 0.13.0", - "bevy_math 0.13.0", - "bevy_reflect 0.13.0", - "bevy_render 0.13.0", - "bevy_time 0.13.0", - "bevy_transform 0.13.0", - "bevy_utils 0.13.0", -] - -[[package]] -name = "bevy_app" -version = "0.12.1" +version = "0.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d41731817993f92e4363dd3335558e779e290bc71eefc0b5547052b85810907e" -dependencies = [ - "bevy_derive 0.12.1", - "bevy_ecs 0.12.1", - "bevy_reflect 0.12.1", - "bevy_tasks 0.12.1", - "bevy_utils 0.12.1", - "downcast-rs", - "wasm-bindgen", - "web-sys", +checksum = "23aa4141df149b743e69c90244261c6372bafb70d9f115885de48a75fc28fd9b" +dependencies = [ + "bevy_app", + "bevy_asset", + "bevy_color", + "bevy_core", + "bevy_derive", + "bevy_ecs", + "bevy_hierarchy", + "bevy_log", + "bevy_math", + "bevy_reflect", + "bevy_render", + "bevy_time", + "bevy_transform", + "bevy_utils", + "blake3", + "fixedbitset 0.5.7", + "petgraph", + "ron", + "serde", + "thiserror", + "thread_local", + "uuid", ] [[package]] name = "bevy_app" -version = "0.13.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8bce3544afc010ffed39c136f6d5a9322d20d38df1394d468ba9106caa0434cb" -dependencies = [ - "bevy_derive 0.13.0", - "bevy_ecs 0.13.0", - "bevy_reflect 0.13.0", - "bevy_tasks 0.13.0", - "bevy_utils 0.13.0", - "downcast-rs", - "wasm-bindgen", - "web-sys", -] - -[[package]] -name = "bevy_asset" -version = "0.12.1" +version = "0.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "935984568f75867dd7357133b06f4b1502cd2be55e4642d483ce597e46e63bff" +checksum = "6f548e9dab7d10c5f99e3b504c758c4bf87aa67df9bcb9cc8b317a0271770e72" dependencies = [ - "async-broadcast", - "async-fs 1.6.0", - "async-lock 2.8.0", - "bevy_app 0.12.1", - "bevy_asset_macros 0.12.1", - "bevy_ecs 0.12.1", - "bevy_log 0.12.1", - "bevy_reflect 0.12.1", - "bevy_tasks 0.12.1", - "bevy_utils 0.12.1", - "bevy_winit 0.12.1", - "blake3", - "crossbeam-channel", + "bevy_derive", + "bevy_ecs", + "bevy_reflect", + "bevy_tasks", + "bevy_utils", + "console_error_panic_hook", "downcast-rs", - "futures-io", - "futures-lite 1.13.0", - "js-sys", - "parking_lot", - "ron", - "serde", "thiserror", "wasm-bindgen", - "wasm-bindgen-futures", "web-sys", ] [[package]] name = "bevy_asset" -version = "0.13.0" +version = "0.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ac185d8e29c7eb0194f8aae7af3f7234f7ca7a448293be1d3d0d8fef435f65ec" +checksum = "f9d198e4c3419215de2ad981d4e734bbfab46469b7575e3b7150c912b9ec5175" dependencies = [ "async-broadcast", - "async-fs 2.1.1", - "async-lock 3.3.0", - "bevy_app 0.13.0", - "bevy_asset_macros 0.13.0", - "bevy_ecs 0.13.0", - "bevy_log 0.13.0", - "bevy_reflect 0.13.0", - "bevy_tasks 0.13.0", - "bevy_utils 0.13.0", - "bevy_winit 0.13.0", + "async-fs", + "async-lock", + "bevy_app", + "bevy_asset_macros", + "bevy_ecs", + "bevy_reflect", + "bevy_tasks", + "bevy_utils", + "bevy_winit", "blake3", "crossbeam-channel", "downcast-rs", "futures-io", - "futures-lite 2.2.0", + "futures-lite", "js-sys", "notify-debouncer-full", "parking_lot", "ron", "serde", "thiserror", + "uuid", "wasm-bindgen", "wasm-bindgen-futures", "web-sys", @@ -606,962 +499,539 @@ dependencies = [ [[package]] name = "bevy_asset_macros" -version = "0.12.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3f48b9bbe4ec605e4910b5cd1e1a0acbfbe0b80af5f3bcc4489a9fdd1e80058c" -dependencies = [ - "bevy_macro_utils 0.12.1", - "proc-macro2", - "quote", - "syn 2.0.50", -] - -[[package]] -name = "bevy_asset_macros" -version = "0.13.0" +version = "0.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cb82d1aac8251378c45a8d0ad788d1bf75f54db28c1750f84f1fd7c00127927a" +checksum = "11b2cbeba287a4b44e116c33dbaf37dce80a9d84477b2bb35ff459999d6c9e1b" dependencies = [ - "bevy_macro_utils 0.13.0", + "bevy_macro_utils", "proc-macro2", "quote", - "syn 2.0.50", + "syn 2.0.68", ] [[package]] name = "bevy_audio" -version = "0.13.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f4fe7f952e5e0a343fbde43180db7b8e719ad78594480c91b26876623944a3a1" -dependencies = [ - "bevy_app 0.13.0", - "bevy_asset 0.13.0", - "bevy_derive 0.13.0", - "bevy_ecs 0.13.0", - "bevy_math 0.13.0", - "bevy_reflect 0.13.0", - "bevy_transform 0.13.0", - "bevy_utils 0.13.0", - "oboe", +version = "0.14.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e41ecf15d0aae31bdb6d2b5cc590f966451e9736ddfee634c8f1ca5af1ac4342" +dependencies = [ + "bevy_app", + "bevy_asset", + "bevy_derive", + "bevy_ecs", + "bevy_hierarchy", + "bevy_math", + "bevy_reflect", + "bevy_transform", + "bevy_utils", + "cpal", "rodio", ] [[package]] -name = "bevy_core" -version = "0.12.1" +name = "bevy_color" +version = "0.14.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3daa24502a14839509f02407bc7e48299fe84d260877de23b60662de0f4f4b6c" +checksum = "5a933306f5c7dc9568209180f482b28b5f40d2f8d5b361bc1b270c0a588752c0" dependencies = [ - "bevy_app 0.12.1", - "bevy_ecs 0.12.1", - "bevy_math 0.12.1", - "bevy_reflect 0.12.1", - "bevy_tasks 0.12.1", - "bevy_utils 0.12.1", + "bevy_math", + "bevy_reflect", "bytemuck", + "encase", + "serde", + "thiserror", + "wgpu-types", ] [[package]] name = "bevy_core" -version = "0.13.0" +version = "0.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f7b1b340b8d08f48ecd51b97589d261f5023a7b073d25e300382c49146524103" +checksum = "6ddeed5ebf2fa75a4d4f32e2da9c60f11037e36252695059a151c6685cd3d72b" dependencies = [ - "bevy_app 0.13.0", - "bevy_ecs 0.13.0", - "bevy_math 0.13.0", - "bevy_reflect 0.13.0", - "bevy_tasks 0.13.0", - "bevy_utils 0.13.0", - "bytemuck", + "bevy_app", + "bevy_ecs", + "bevy_reflect", + "bevy_tasks", + "bevy_utils", + "uuid", ] [[package]] name = "bevy_core_pipeline" -version = "0.12.1" +version = "0.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b4b77c4fca6e90edbe2e72da7bc9aa7aed7dfdfded0920ae0a0c845f5e11084a" -dependencies = [ - "bevy_app 0.12.1", - "bevy_asset 0.12.1", - "bevy_core 0.12.1", - "bevy_derive 0.12.1", - "bevy_ecs 0.12.1", - "bevy_log 0.12.1", - "bevy_math 0.12.1", - "bevy_reflect 0.12.1", - "bevy_render 0.12.1", - "bevy_transform 0.12.1", - "bevy_utils 0.12.1", - "bitflags 2.4.2", - "radsort", - "serde", -] - -[[package]] -name = "bevy_core_pipeline" -version = "0.13.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "626a5aaadbdd69eae020c5856575d2d0113423ae1ae1351377e20956d940052c" -dependencies = [ - "bevy_app 0.13.0", - "bevy_asset 0.13.0", - "bevy_core 0.13.0", - "bevy_derive 0.13.0", - "bevy_ecs 0.13.0", - "bevy_log 0.13.0", - "bevy_math 0.13.0", - "bevy_reflect 0.13.0", - "bevy_render 0.13.0", - "bevy_transform 0.13.0", - "bevy_utils 0.13.0", - "bitflags 2.4.2", +checksum = "1b978220b5edc98f2c5cbbd14c118c74b3ec7216e5416d3c187c1097279b009b" +dependencies = [ + "bevy_app", + "bevy_asset", + "bevy_color", + "bevy_core", + "bevy_derive", + "bevy_ecs", + "bevy_math", + "bevy_reflect", + "bevy_render", + "bevy_transform", + "bevy_utils", + "bitflags 2.6.0", + "nonmax", "radsort", "serde", + "smallvec", + "thiserror", ] [[package]] name = "bevy_derive" -version = "0.12.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f484318350462c58ba3942a45a656c1fd6b6e484a6b6b7abc3a787ad1a51e500" -dependencies = [ - "bevy_macro_utils 0.12.1", - "quote", - "syn 2.0.50", -] - -[[package]] -name = "bevy_derive" -version = "0.13.0" +version = "0.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "028ae2a34678055185d7f1beebb1ebe6a8dcf3733e139e4ee1383a7f29ae8ba6" +checksum = "c8a8173bad3ed53fa158806b1beda147263337d6ef71a093780dd141b74386b1" dependencies = [ - "bevy_macro_utils 0.13.0", + "bevy_macro_utils", "quote", - "syn 2.0.50", -] - -[[package]] -name = "bevy_diagnostic" -version = "0.12.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fa38ca5967d335cc1006a0e0f1a86c350e2f15fd1878449f61d04cd57a7c4060" -dependencies = [ - "bevy_app 0.12.1", - "bevy_core 0.12.1", - "bevy_ecs 0.12.1", - "bevy_log 0.12.1", - "bevy_time 0.12.1", - "bevy_utils 0.12.1", - "sysinfo 0.29.11", + "syn 2.0.68", ] [[package]] name = "bevy_diagnostic" -version = "0.13.0" +version = "0.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "01a104acfdc5280accd01a3524810daf3bda72924e3da0c8a9ec816a57eef4e3" +checksum = "0b7f82011fd70048be282526a99756d54bf00e874edafa9664ba0dc247678f03" dependencies = [ - "bevy_app 0.13.0", - "bevy_core 0.13.0", - "bevy_ecs 0.13.0", - "bevy_log 0.13.0", - "bevy_time 0.13.0", - "bevy_utils 0.13.0", + "bevy_app", + "bevy_core", + "bevy_ecs", + "bevy_tasks", + "bevy_time", + "bevy_utils", "const-fnv1a-hash", - "sysinfo 0.30.5", + "sysinfo", ] [[package]] name = "bevy_dylib" -version = "0.13.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c3b3b76f0d7a4da8f944e5316f2d2d2af3bbb40d87508355993ea69afbc9411c" -dependencies = [ - "bevy_internal 0.13.0", -] - -[[package]] -name = "bevy_ecs" -version = "0.12.1" +version = "0.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7709fbd22f81fb681534cd913c41e1cd18b17143368743281195d7f024b61aea" +checksum = "f8494bf550eb30f570da1563217bcea25530cf29b35d35887ca6c2d76a411d00" dependencies = [ - "async-channel 1.9.0", - "bevy_ecs_macros 0.12.1", - "bevy_ptr 0.12.1", - "bevy_reflect 0.12.1", - "bevy_tasks 0.12.1", - "bevy_utils 0.12.1", - "downcast-rs", - "event-listener 2.5.3", - "fixedbitset", - "rustc-hash", - "serde", - "thiserror", - "thread_local", + "bevy_internal", ] [[package]] name = "bevy_ecs" -version = "0.13.0" +version = "0.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b85406d5febbbdbcac4444ef61cd9a816f2f025ed692a3fc5439a32153070304" +checksum = "2c77fdc3a7230eff2fcebe4bd17c155bd238c660a0089d0f98c39ba0d461b923" dependencies = [ - "async-channel 2.2.0", - "bevy_ecs_macros 0.13.0", - "bevy_ptr 0.13.0", - "bevy_reflect 0.13.0", - "bevy_tasks 0.13.0", - "bevy_utils 0.13.0", - "downcast-rs", - "fixedbitset", - "rustc-hash", + "arrayvec", + "bevy_ecs_macros", + "bevy_ptr", + "bevy_reflect", + "bevy_tasks", + "bevy_utils", + "bitflags 2.6.0", + "concurrent-queue", + "fixedbitset 0.5.7", + "nonmax", + "petgraph", "serde", "thiserror", - "thread_local", -] - -[[package]] -name = "bevy_ecs_macros" -version = "0.12.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a8843aa489f159f25cdcd9fee75cd7d221a7098a71eaa72cb2d6b40ac4e3f1ba" -dependencies = [ - "bevy_macro_utils 0.12.1", - "proc-macro2", - "quote", - "syn 2.0.50", ] [[package]] name = "bevy_ecs_macros" -version = "0.13.0" +version = "0.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9a3ce4b65d7c5f1990e729df75cec2ea6e2241b4a0c37b31c281a04c59c11b7b" +checksum = "9272b511958525306cd141726d3ca59740f79fc0707c439b55a007bcc3497308" dependencies = [ - "bevy_macro_utils 0.13.0", + "bevy_macro_utils", "proc-macro2", "quote", - "syn 2.0.50", -] - -[[package]] -name = "bevy_editor_pls" -version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "170e3ee3293f70be7fa1b82e8ffe52dcb190c68b2f0ef4847afe65626b417ad7" -dependencies = [ - "bevy 0.12.1", - "bevy_editor_pls_core", - "bevy_editor_pls_default_windows", - "egui 0.23.0", - "egui-gizmo", -] - -[[package]] -name = "bevy_editor_pls_core" -version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b9bab331de653d7d976a1fb0ab3ae8eff1dc821c5991c09bdbe945e70234839c" -dependencies = [ - "bevy 0.12.1", - "bevy-inspector-egui", - "egui_dock", - "indexmap 2.2.3", -] - -[[package]] -name = "bevy_editor_pls_default_windows" -version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b833e3b3d5c69b92527e766b470b27cb4745406e2087be19be81b595cc48792c" -dependencies = [ - "bevy 0.12.1", - "bevy-inspector-egui", - "bevy_editor_pls_core", - "bevy_mod_debugdump", - "egui-gizmo", - "indexmap 2.2.3", - "opener", - "pretty-type-name", + "syn 2.0.68", ] [[package]] name = "bevy_egui" -version = "0.23.0" +version = "0.28.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "85450af551b7e1cb766f710763b60a12a82ffd6323945a8f776c6334c59ccdc1" +checksum = "5e4a90f30f2849a07d91e393b10c0cc05df09b5773c010ddde57dd8b583be230" dependencies = [ "arboard", - "bevy 0.12.1", - "egui 0.23.0", - "thread_local", - "webbrowser", -] - -[[package]] -name = "bevy_egui" -version = "0.25.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b84bfb8d4104a1467910cf2090bc6a6d394ebde39c0dbc02397b45aa9ef88e80" -dependencies = [ - "arboard", - "bevy 0.13.0", - "egui 0.26.2", + "bevy", + "bytemuck", + "console_log", + "crossbeam-channel", + "egui", + "js-sys", + "log", "thread_local", + "wasm-bindgen", + "wasm-bindgen-futures", "web-sys", "webbrowser", + "winit", ] [[package]] name = "bevy_encase_derive" -version = "0.12.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5328a3715e933ebbff07d0e99528dc423c4f7a53590ed1ac19a120348b028990" -dependencies = [ - "bevy_macro_utils 0.12.1", - "encase_derive_impl 0.6.1", -] - -[[package]] -name = "bevy_encase_derive" -version = "0.13.0" +version = "0.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6c3d301922e76b16819e17c8cc43b34e92c13ccd06ad19dfa3e52a91a0e13e5c" +checksum = "f0452d8254c8bfae4bff6caca2a8be3b0c1b2e1a72b93e9b9f6a21c8dff807e0" dependencies = [ - "bevy_macro_utils 0.13.0", - "encase_derive_impl 0.7.0", + "bevy_macro_utils", + "encase_derive_impl", ] [[package]] name = "bevy_gilrs" -version = "0.13.0" +version = "0.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "96364a1875ee4545fcf825c78dc065ddb9a3b2a509083ef11142f9de0eb8aa17" +checksum = "fbad8e59470c3d5cf25aa8c48462c4cf6f0c6314538c68ab2f5cf393146f0fc2" dependencies = [ - "bevy_app 0.13.0", - "bevy_ecs 0.13.0", - "bevy_input 0.13.0", - "bevy_log 0.13.0", - "bevy_time 0.13.0", - "bevy_utils 0.13.0", + "bevy_app", + "bevy_ecs", + "bevy_input", + "bevy_time", + "bevy_utils", "gilrs", "thiserror", ] [[package]] name = "bevy_gizmos" -version = "0.12.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "db232274ddca2ae452eb2731b98267b795d133ddd14013121bc7daddde1c7491" -dependencies = [ - "bevy_app 0.12.1", - "bevy_asset 0.12.1", - "bevy_core 0.12.1", - "bevy_core_pipeline 0.12.1", - "bevy_ecs 0.12.1", - "bevy_math 0.12.1", - "bevy_pbr 0.12.1", - "bevy_reflect 0.12.1", - "bevy_render 0.12.1", - "bevy_sprite 0.12.1", - "bevy_transform 0.12.1", - "bevy_utils 0.12.1", -] - -[[package]] -name = "bevy_gizmos" -version = "0.13.0" +version = "0.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bdca80b7b4db340eb666d69374a0195b3935759120d0b990fcef8b27d0fb3680" +checksum = "bdbb0556f0c6e45f4a17aef9c708c06ebf15ae1bed4533d7eddb493409f9f025" dependencies = [ - "bevy_app 0.13.0", - "bevy_asset 0.13.0", - "bevy_core 0.13.0", - "bevy_core_pipeline 0.13.0", - "bevy_ecs 0.13.0", + "bevy_app", + "bevy_asset", + "bevy_color", + "bevy_core_pipeline", + "bevy_ecs", "bevy_gizmos_macros", - "bevy_log 0.13.0", - "bevy_math 0.13.0", - "bevy_pbr 0.13.0", - "bevy_reflect 0.13.0", - "bevy_render 0.13.0", - "bevy_sprite 0.13.0", - "bevy_transform 0.13.0", - "bevy_utils 0.13.0", + "bevy_math", + "bevy_pbr", + "bevy_reflect", + "bevy_render", + "bevy_sprite", + "bevy_time", + "bevy_transform", + "bevy_utils", + "bytemuck", ] [[package]] name = "bevy_gizmos_macros" -version = "0.13.0" +version = "0.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2a949eb8b4538a6e4d875321cda2b63dc0fb0317cf18c8245ca5a32f24f6d26d" +checksum = "8ef351a4b6498c197d1317c62f46ba84b69fbde3dbeb57beb2e744bbe5b7c3e0" dependencies = [ - "bevy_macro_utils 0.13.0", + "bevy_macro_utils", "proc-macro2", "quote", - "syn 2.0.50", + "syn 2.0.68", ] [[package]] name = "bevy_gltf" -version = "0.13.0" +version = "0.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "031d0c2a7c0353bb9ac08a5130e58b9a2de3cdaa3c31b5da00b22a9e4732a155" +checksum = "cfd7abeaf3f28afd1f8999c2169aa17b40a37ad11253cf7dd05017024b65adc6" dependencies = [ - "base64", + "base64 0.22.1", "bevy_animation", - "bevy_app 0.13.0", - "bevy_asset 0.13.0", - "bevy_core 0.13.0", - "bevy_core_pipeline 0.13.0", - "bevy_ecs 0.13.0", - "bevy_hierarchy 0.13.0", - "bevy_log 0.13.0", - "bevy_math 0.13.0", - "bevy_pbr 0.13.0", - "bevy_reflect 0.13.0", - "bevy_render 0.13.0", - "bevy_scene 0.13.0", - "bevy_tasks 0.13.0", - "bevy_transform 0.13.0", - "bevy_utils 0.13.0", + "bevy_app", + "bevy_asset", + "bevy_color", + "bevy_core", + "bevy_core_pipeline", + "bevy_ecs", + "bevy_hierarchy", + "bevy_math", + "bevy_pbr", + "bevy_reflect", + "bevy_render", + "bevy_scene", + "bevy_tasks", + "bevy_transform", + "bevy_utils", "gltf", "percent-encoding", "serde", "serde_json", + "smallvec", "thiserror", ] [[package]] name = "bevy_hierarchy" -version = "0.12.1" +version = "0.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "06bd477152ce2ae1430f5e0a4f19216e5785c22fee1ab23788b5982dc59d1a55" +checksum = "802eca6f341d19ade790ccfaba7044be4d823b708087eb5ac4c1f74e4ea0916a" dependencies = [ - "bevy_app 0.12.1", - "bevy_core 0.12.1", - "bevy_ecs 0.12.1", - "bevy_log 0.12.1", - "bevy_reflect 0.12.1", - "bevy_utils 0.12.1", + "bevy_app", + "bevy_core", + "bevy_ecs", + "bevy_reflect", + "bevy_utils", "smallvec", ] -[[package]] -name = "bevy_hierarchy" -version = "0.13.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a9f9f843e43d921f07658c24eae74285efc7a335c87998596f3f365155320c69" -dependencies = [ - "bevy_app 0.13.0", - "bevy_core 0.13.0", - "bevy_ecs 0.13.0", - "bevy_log 0.13.0", - "bevy_reflect 0.13.0", - "bevy_utils 0.13.0", -] - -[[package]] -name = "bevy_input" -version = "0.12.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cab9a599189b2a694c182d60cd52219dd9364f9892ff542d87799b8e45d9e6dc" -dependencies = [ - "bevy_app 0.12.1", - "bevy_ecs 0.12.1", - "bevy_math 0.12.1", - "bevy_reflect 0.12.1", - "bevy_utils 0.12.1", - "thiserror", -] - [[package]] name = "bevy_input" -version = "0.13.0" +version = "0.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a9cb5b2f3747ffb00cf7e3d6b52f7384476921cd31f0cfd3d1ddff31f83d9252" +checksum = "2d050f1433f48ca23f1ea078734ebff119a3f76eb7d221725ab0f1fd9f81230b" dependencies = [ - "bevy_app 0.13.0", - "bevy_ecs 0.13.0", - "bevy_math 0.13.0", - "bevy_reflect 0.13.0", - "bevy_utils 0.13.0", + "bevy_app", + "bevy_ecs", + "bevy_math", + "bevy_reflect", + "bevy_utils", "smol_str", "thiserror", ] [[package]] name = "bevy_internal" -version = "0.12.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f124bece9831afd80897815231072d51bfe3ac58c6bb58eca8880963b6d0487c" -dependencies = [ - "bevy_a11y 0.12.1", - "bevy_app 0.12.1", - "bevy_asset 0.12.1", - "bevy_core 0.12.1", - "bevy_core_pipeline 0.12.1", - "bevy_derive 0.12.1", - "bevy_diagnostic 0.12.1", - "bevy_ecs 0.12.1", - "bevy_gizmos 0.12.1", - "bevy_hierarchy 0.12.1", - "bevy_input 0.12.1", - "bevy_log 0.12.1", - "bevy_math 0.12.1", - "bevy_pbr 0.12.1", - "bevy_ptr 0.12.1", - "bevy_reflect 0.12.1", - "bevy_render 0.12.1", - "bevy_scene 0.12.1", - "bevy_sprite 0.12.1", - "bevy_tasks 0.12.1", - "bevy_text 0.12.1", - "bevy_time 0.12.1", - "bevy_transform 0.12.1", - "bevy_ui 0.12.1", - "bevy_utils 0.12.1", - "bevy_window 0.12.1", - "bevy_winit 0.12.1", -] - -[[package]] -name = "bevy_internal" -version = "0.13.0" +version = "0.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7af89c7083830b1d65fcf0260c3d2537c397fe8ce871471b6e97198a4704f23e" +checksum = "8ddd2b23e44d3a1f8ae547cbee5b6661f8135cc456c5de206e8648789944e7a1" dependencies = [ - "bevy_a11y 0.13.0", + "bevy_a11y", "bevy_animation", - "bevy_app 0.13.0", - "bevy_asset 0.13.0", + "bevy_app", + "bevy_asset", "bevy_audio", - "bevy_core 0.13.0", - "bevy_core_pipeline 0.13.0", - "bevy_derive 0.13.0", - "bevy_diagnostic 0.13.0", - "bevy_ecs 0.13.0", + "bevy_color", + "bevy_core", + "bevy_core_pipeline", + "bevy_derive", + "bevy_diagnostic", + "bevy_ecs", "bevy_gilrs", - "bevy_gizmos 0.13.0", + "bevy_gizmos", "bevy_gltf", - "bevy_hierarchy 0.13.0", - "bevy_input 0.13.0", - "bevy_log 0.13.0", - "bevy_math 0.13.0", - "bevy_pbr 0.13.0", - "bevy_ptr 0.13.0", - "bevy_reflect 0.13.0", - "bevy_render 0.13.0", - "bevy_scene 0.13.0", - "bevy_sprite 0.13.0", - "bevy_tasks 0.13.0", - "bevy_text 0.13.0", - "bevy_time 0.13.0", - "bevy_transform 0.13.0", - "bevy_ui 0.13.0", - "bevy_utils 0.13.0", - "bevy_window 0.13.0", - "bevy_winit 0.13.0", + "bevy_hierarchy", + "bevy_input", + "bevy_log", + "bevy_math", + "bevy_pbr", + "bevy_ptr", + "bevy_reflect", + "bevy_render", + "bevy_scene", + "bevy_sprite", + "bevy_state", + "bevy_tasks", + "bevy_text", + "bevy_time", + "bevy_transform", + "bevy_ui", + "bevy_utils", + "bevy_window", + "bevy_winit", ] [[package]] name = "bevy_log" -version = "0.12.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0dc10ba1d225a8477b9e80a1bf797d8a8b8274e83c9b24fb4d9351aec9229755" -dependencies = [ - "android_log-sys", - "bevy_app 0.12.1", - "bevy_ecs 0.12.1", - "bevy_utils 0.12.1", - "console_error_panic_hook", - "tracing-log 0.1.4", - "tracing-subscriber", - "tracing-wasm", -] - -[[package]] -name = "bevy_log" -version = "0.13.0" +version = "0.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cfd5bcc3531f8008897fb03cc8751b86d0d29ef94f8fd38b422f9603b7ae80d0" +checksum = "bab641fd0de254915ab746165a07677465b2d89b72f5b49367d73b9197548a35" dependencies = [ "android_log-sys", - "bevy_app 0.13.0", - "bevy_ecs 0.13.0", - "bevy_utils 0.13.0", - "console_error_panic_hook", - "tracing-log 0.1.4", + "bevy_app", + "bevy_ecs", + "bevy_utils", + "tracing-log", "tracing-subscriber", "tracing-wasm", ] [[package]] name = "bevy_macro_utils" -version = "0.12.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e566640c6b6dced73d2006c764c2cffebe1a82be4809486c4a5d7b4b50efed4d" -dependencies = [ - "proc-macro2", - "quote", - "rustc-hash", - "syn 2.0.50", - "toml_edit 0.20.7", -] - -[[package]] -name = "bevy_macro_utils" -version = "0.13.0" +version = "0.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ac4401c25b197e7c1455a4875a90b61bba047a9e8d290ce029082c818ab1a21c" +checksum = "c3ad860d35d74b35d4d6ae7f656d163b6f475aa2e64fc293ee86ac901977ddb7" dependencies = [ "proc-macro2", "quote", - "rustc-hash", - "syn 2.0.50", - "toml_edit 0.21.1", + "syn 2.0.68", + "toml_edit 0.22.14", ] [[package]] name = "bevy_math" -version = "0.12.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "58ddc2b76783939c530178f88e5711a1b01044d7b02db4033e2eb8b43b6cf4ec" -dependencies = [ - "glam 0.24.2", - "serde", -] - -[[package]] -name = "bevy_math" -version = "0.13.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6f312b1b8aa6d3965b65040b08e33efac030db3071f20b44f9da9c4c3dfcaf76" -dependencies = [ - "glam 0.25.0", - "serde", -] - -[[package]] -name = "bevy_mikktspace" -version = "0.12.1" +version = "0.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8ec4962977a746d870170532fc92759e04d3dbcae8b7b82e7ca3bb83b1d75277" +checksum = "51bd6ce2174d3237d30e0ab5b2508480cc7593ca4d96ffb3a3095f9fc6bbc34c" dependencies = [ - "glam 0.24.2", + "bevy_reflect", + "glam", + "rand", + "smallvec", + "thiserror", ] [[package]] name = "bevy_mikktspace" -version = "0.13.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3075c01f2b1799945892d5310fc1836e47c045dfe6af5878a304a475931a0c5f" -dependencies = [ - "glam 0.25.0", -] - -[[package]] -name = "bevy_mod_debugdump" -version = "0.9.0" +version = "0.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4db8601f41ea570b7d32f3177292a608196c59bdf3298001a9e202d5e7439438" +checksum = "b7ce4266293629a2d10459cc112dffe3b3e9229a4f2b8a4d20061b8dd53316d0" dependencies = [ - "bevy_app 0.12.1", - "bevy_ecs 0.12.1", - "bevy_render 0.12.1", - "bevy_utils 0.12.1", - "once_cell", - "petgraph", - "pretty-type-name", + "glam", ] [[package]] name = "bevy_panorbit_camera" -version = "0.15.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "981580750475c8d23060d8cb66ce5762d32849ad3a02feeb5e0dc5f410973106" +version = "0.18.2" +source = "git+https://github.com/kristoff3r/bevy_panorbit_camera#cf5cd260c79ede7ad4a5711c677deb5c47b72d5f" dependencies = [ - "bevy 0.13.0", + "bevy", ] [[package]] name = "bevy_pbr" -version = "0.12.1" +version = "0.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "520bfd2a898c74f84ea52cfb8eb061f37373ad15e623489d5f75d27ebd6138fe" -dependencies = [ - "bevy_app 0.12.1", - "bevy_asset 0.12.1", - "bevy_core_pipeline 0.12.1", - "bevy_derive 0.12.1", - "bevy_ecs 0.12.1", - "bevy_math 0.12.1", - "bevy_reflect 0.12.1", - "bevy_render 0.12.1", - "bevy_transform 0.12.1", - "bevy_utils 0.12.1", - "bevy_window 0.12.1", - "bitflags 2.4.2", +checksum = "3effe8ff28899f14d250d0649ca9868dbe68b389d0f2b7af086759b8e16c6e3d" +dependencies = [ + "bevy_app", + "bevy_asset", + "bevy_color", + "bevy_core_pipeline", + "bevy_derive", + "bevy_ecs", + "bevy_math", + "bevy_reflect", + "bevy_render", + "bevy_transform", + "bevy_utils", + "bevy_window", + "bitflags 2.6.0", "bytemuck", - "fixedbitset", - "naga_oil 0.10.1", - "radsort", - "smallvec", - "thread_local", -] - -[[package]] -name = "bevy_pbr" -version = "0.13.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c31c72bf12e50ff76c9ed9a7c51ceb88bfea9865d00f24d95b12344fffe1e270" -dependencies = [ - "bevy_app 0.13.0", - "bevy_asset 0.13.0", - "bevy_core_pipeline 0.13.0", - "bevy_derive 0.13.0", - "bevy_ecs 0.13.0", - "bevy_math 0.13.0", - "bevy_reflect 0.13.0", - "bevy_render 0.13.0", - "bevy_transform 0.13.0", - "bevy_utils 0.13.0", - "bevy_window 0.13.0", - "bitflags 2.4.2", - "bytemuck", - "fixedbitset", + "fixedbitset 0.5.7", + "nonmax", "radsort", "smallvec", - "thread_local", + "static_assertions", ] -[[package]] -name = "bevy_ptr" -version = "0.12.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c77ec20c8fafcdc196508ef5ccb4f0400a8d193cb61f7b14a36ed9a25ad423cf" - -[[package]] -name = "bevy_ptr" -version = "0.13.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "86afa4a88ee06b10fe1e6f28a796ba2eedd16804717cbbb911df0cbb0cd6677b" - -[[package]] -name = "bevy_reflect" -version = "0.12.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d7921f15fc944c9c8ad01d7dbcea6505b8909c6655cd9382bab1407181556038" -dependencies = [ - "bevy_math 0.12.1", - "bevy_ptr 0.12.1", - "bevy_reflect_derive 0.12.1", - "bevy_utils 0.12.1", - "downcast-rs", - "erased-serde 0.3.31", - "glam 0.24.2", - "serde", - "smallvec", - "smol_str", - "thiserror", -] +[[package]] +name = "bevy_ptr" +version = "0.14.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c115c97a5c8a263bd0aa7001b999772c744ac5ba797d07c86f25734ce381ea69" [[package]] name = "bevy_reflect" -version = "0.13.0" +version = "0.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "133dfab8d403d0575eeed9084e85780bbb449dcf75dd687448439117789b40a2" +checksum = "406ea0fce267169c2320c7302d97d09f605105686346762562c5f65960b5ca2f" dependencies = [ - "bevy_math 0.13.0", - "bevy_ptr 0.13.0", - "bevy_reflect_derive 0.13.0", - "bevy_utils 0.13.0", + "bevy_ptr", + "bevy_reflect_derive", + "bevy_utils", "downcast-rs", - "erased-serde 0.4.3", - "glam 0.25.0", + "erased-serde", + "glam", + "petgraph", "serde", + "smallvec", "smol_str", "thiserror", -] - -[[package]] -name = "bevy_reflect_derive" -version = "0.12.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b4a8c5475f216e751ef4452a1306b00711f33d2d04d9f149e4c845dfeb6753a0" -dependencies = [ - "bevy_macro_utils 0.12.1", - "proc-macro2", - "quote", - "syn 2.0.50", "uuid", ] [[package]] name = "bevy_reflect_derive" -version = "0.13.0" +version = "0.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ce1679a4dfdb2c9ff24ca590914c3cec119d7c9e1b56fa637776913acc030386" +checksum = "0427fdb4425fc72cc96d45e550df83ace6347f0503840de116c76a40843ba751" dependencies = [ - "bevy_macro_utils 0.13.0", + "bevy_macro_utils", "proc-macro2", "quote", - "syn 2.0.50", + "syn 2.0.68", "uuid", ] [[package]] name = "bevy_render" -version = "0.12.1" +version = "0.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bdefdd3737125b0d94a6ff20bb70fa8cfe9d7d5dcd72ba4dfe6c5f1d30d9f6e4" -dependencies = [ - "async-channel 1.9.0", - "bevy_app 0.12.1", - "bevy_asset 0.12.1", - "bevy_core 0.12.1", - "bevy_derive 0.12.1", - "bevy_ecs 0.12.1", - "bevy_encase_derive 0.12.1", - "bevy_hierarchy 0.12.1", - "bevy_log 0.12.1", - "bevy_math 0.12.1", - "bevy_mikktspace 0.12.1", - "bevy_reflect 0.12.1", - "bevy_render_macros 0.12.1", - "bevy_tasks 0.12.1", - "bevy_time 0.12.1", - "bevy_transform 0.12.1", - "bevy_utils 0.12.1", - "bevy_window 0.12.1", - "bitflags 2.4.2", - "bytemuck", - "codespan-reporting", - "downcast-rs", - "encase 0.6.1", - "futures-lite 1.13.0", - "hexasphere 9.1.0", - "image", - "js-sys", - "naga 0.13.0", - "naga_oil 0.10.1", - "serde", - "smallvec", - "thiserror", - "thread_local", - "wasm-bindgen", - "web-sys", - "wgpu 0.17.2", -] - -[[package]] -name = "bevy_render" -version = "0.13.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d3b194b7029b7541ef9206ac3cb696d3cb37f70bd3260d293fc00d378547e892" -dependencies = [ - "async-channel 2.2.0", - "bevy_app 0.13.0", - "bevy_asset 0.13.0", - "bevy_core 0.13.0", - "bevy_derive 0.13.0", - "bevy_ecs 0.13.0", - "bevy_encase_derive 0.13.0", - "bevy_hierarchy 0.13.0", - "bevy_log 0.13.0", - "bevy_math 0.13.0", - "bevy_mikktspace 0.13.0", - "bevy_reflect 0.13.0", - "bevy_render_macros 0.13.0", - "bevy_tasks 0.13.0", - "bevy_time 0.13.0", - "bevy_transform 0.13.0", - "bevy_utils 0.13.0", - "bevy_window 0.13.0", - "bitflags 2.4.2", +checksum = "4c48acf1ff4267c231def4cbf573248d42ac60c9952108822d505019460bf36d" +dependencies = [ + "async-channel", + "bevy_app", + "bevy_asset", + "bevy_color", + "bevy_core", + "bevy_derive", + "bevy_diagnostic", + "bevy_ecs", + "bevy_encase_derive", + "bevy_hierarchy", + "bevy_math", + "bevy_mikktspace", + "bevy_reflect", + "bevy_render_macros", + "bevy_tasks", + "bevy_time", + "bevy_transform", + "bevy_utils", + "bevy_window", + "bitflags 2.6.0", "bytemuck", "codespan-reporting", "downcast-rs", - "encase 0.7.0", - "futures-lite 2.2.0", - "hexasphere 10.0.0", + "encase", + "futures-lite", + "hexasphere", "image", "js-sys", "ktx2", - "naga 0.19.0", - "naga_oil 0.13.0", + "naga", + "naga_oil", + "nonmax", "ruzstd", + "send_wrapper", "serde", + "smallvec", "thiserror", - "thread_local", "wasm-bindgen", "web-sys", - "wgpu 0.19.1", -] - -[[package]] -name = "bevy_render_macros" -version = "0.12.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "64d86bfc5a1e7fbeeaec0c4ceab18155530f5506624670965db3415f75826bea" -dependencies = [ - "bevy_macro_utils 0.12.1", - "proc-macro2", - "quote", - "syn 2.0.50", + "wgpu", ] [[package]] name = "bevy_render_macros" -version = "0.13.0" +version = "0.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4aa6d99b50375bb7f63be2c3055dfe2f926f7b3c4db108bb0b1181b4f02766aa" +checksum = "72ddf4a96d71519c8eca3d74dabcb89a9c0d50ab5d9230638cb004145f46e9ed" dependencies = [ - "bevy_macro_utils 0.13.0", + "bevy_macro_utils", "proc-macro2", "quote", - "syn 2.0.50", + "syn 2.0.68", ] [[package]] name = "bevy_scene" -version = "0.12.1" +version = "0.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e7df078b5e406e37c8a1c6ba0d652bf105fde713ce3c3efda7263fe27467eee5" -dependencies = [ - "bevy_app 0.12.1", - "bevy_asset 0.12.1", - "bevy_derive 0.12.1", - "bevy_ecs 0.12.1", - "bevy_hierarchy 0.12.1", - "bevy_reflect 0.12.1", - "bevy_render 0.12.1", - "bevy_transform 0.12.1", - "bevy_utils 0.12.1", - "ron", - "serde", - "thiserror", - "uuid", -] - -[[package]] -name = "bevy_scene" -version = "0.13.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2c3c82eaff0b22949183a75a7e2d7fc4ece808235918b34c5b282aab52c3563a" -dependencies = [ - "bevy_app 0.13.0", - "bevy_asset 0.13.0", - "bevy_derive 0.13.0", - "bevy_ecs 0.13.0", - "bevy_hierarchy 0.13.0", - "bevy_reflect 0.13.0", - "bevy_render 0.13.0", - "bevy_transform 0.13.0", - "bevy_utils 0.13.0", +checksum = "b7a9f0388612a116f02ab6187aeab66e52c9e91abbc21f919b8b50230c4d83e7" +dependencies = [ + "bevy_app", + "bevy_asset", + "bevy_derive", + "bevy_ecs", + "bevy_hierarchy", + "bevy_reflect", + "bevy_render", + "bevy_transform", + "bevy_utils", "serde", "thiserror", "uuid", @@ -1569,24 +1039,24 @@ dependencies = [ [[package]] name = "bevy_sprite" -version = "0.12.1" +version = "0.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c7cc0c9d946e17e3e0aaa202f182837bc796c4f862b2e5a805134f873f21cf7f" -dependencies = [ - "bevy_app 0.12.1", - "bevy_asset 0.12.1", - "bevy_core_pipeline 0.12.1", - "bevy_derive 0.12.1", - "bevy_ecs 0.12.1", - "bevy_log 0.12.1", - "bevy_math 0.12.1", - "bevy_reflect 0.12.1", - "bevy_render 0.12.1", - "bevy_transform 0.12.1", - "bevy_utils 0.12.1", - "bitflags 2.4.2", +checksum = "d837e33ed27b9f2e5212eca4bdd5655a9ee64c52914112e6189c043cb25dd1ec" +dependencies = [ + "bevy_app", + "bevy_asset", + "bevy_color", + "bevy_core_pipeline", + "bevy_derive", + "bevy_ecs", + "bevy_math", + "bevy_reflect", + "bevy_render", + "bevy_transform", + "bevy_utils", + "bitflags 2.6.0", "bytemuck", - "fixedbitset", + "fixedbitset 0.5.7", "guillotiere", "radsort", "rectangle-pack", @@ -1594,98 +1064,62 @@ dependencies = [ ] [[package]] -name = "bevy_sprite" -version = "0.13.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5ea977d7d7c48fc4ba283d449f09528c4e70db17c9048e32e99ecd9890d72223" -dependencies = [ - "bevy_app 0.13.0", - "bevy_asset 0.13.0", - "bevy_core_pipeline 0.13.0", - "bevy_derive 0.13.0", - "bevy_ecs 0.13.0", - "bevy_log 0.13.0", - "bevy_math 0.13.0", - "bevy_reflect 0.13.0", - "bevy_render 0.13.0", - "bevy_transform 0.13.0", - "bevy_utils 0.13.0", - "bitflags 2.4.2", - "bytemuck", - "fixedbitset", - "guillotiere", - "radsort", - "rectangle-pack", - "thiserror", +name = "bevy_state" +version = "0.14.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0959984092d56885fd3b320ea84fb816821bad6bfa3040b9d4ee850d3273233d" +dependencies = [ + "bevy_app", + "bevy_ecs", + "bevy_hierarchy", + "bevy_reflect", + "bevy_state_macros", + "bevy_utils", ] [[package]] -name = "bevy_tasks" -version = "0.12.1" +name = "bevy_state_macros" +version = "0.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f4fefa7fe0da8923525f7500e274f1bd60dbd79918a25cf7d0dfa0a6ba15c1cf" +checksum = "887a98bfa268258377cd073f5bb839518d3a1cd6b96ed81418145485b69378e6" dependencies = [ - "async-channel 1.9.0", - "async-executor", - "async-task", - "concurrent-queue", - "futures-lite 1.13.0", - "wasm-bindgen-futures", + "bevy_macro_utils", + "proc-macro2", + "quote", + "syn 2.0.68", ] [[package]] name = "bevy_tasks" -version = "0.13.0" +version = "0.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b20f243f6fc4c4ba10c2dbff891e947ddae947bb20b263f43e023558b35294bd" +checksum = "5a8bfb8d484bdb1e9bec3789c75202adc5e608c4244347152e50fb31668a54f9" dependencies = [ - "async-channel 2.2.0", + "async-channel", "async-executor", - "async-task", "concurrent-queue", - "futures-lite 2.2.0", + "futures-lite", "wasm-bindgen-futures", ] [[package]] name = "bevy_text" -version = "0.12.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3a9a79d49ca06170d69149949b134c14e8b99ace1444c1ca2cd4743b19d5b055" -dependencies = [ - "ab_glyph", - "bevy_app 0.12.1", - "bevy_asset 0.12.1", - "bevy_ecs 0.12.1", - "bevy_math 0.12.1", - "bevy_reflect 0.12.1", - "bevy_render 0.12.1", - "bevy_sprite 0.12.1", - "bevy_transform 0.12.1", - "bevy_utils 0.12.1", - "bevy_window 0.12.1", - "glyph_brush_layout", - "serde", - "thiserror", -] - -[[package]] -name = "bevy_text" -version = "0.13.0" +version = "0.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "006990d27551dbc339774178e833290952511621662fd5ca23a4e6e922ab2d9f" +checksum = "454fd29b7828244356b2e0ce782e6d0a6f26b47f521456accde3a7191b121727" dependencies = [ "ab_glyph", - "bevy_app 0.13.0", - "bevy_asset 0.13.0", - "bevy_ecs 0.13.0", - "bevy_math 0.13.0", - "bevy_reflect 0.13.0", - "bevy_render 0.13.0", - "bevy_sprite 0.13.0", - "bevy_transform 0.13.0", - "bevy_utils 0.13.0", - "bevy_window 0.13.0", + "bevy_app", + "bevy_asset", + "bevy_color", + "bevy_ecs", + "bevy_math", + "bevy_reflect", + "bevy_render", + "bevy_sprite", + "bevy_transform", + "bevy_utils", + "bevy_window", "glyph_brush_layout", "serde", "thiserror", @@ -1693,258 +1127,130 @@ dependencies = [ [[package]] name = "bevy_time" -version = "0.12.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e6250d76eed3077128b6a3d004f9f198b01107800b9824051e32bb658054e837" -dependencies = [ - "bevy_app 0.12.1", - "bevy_ecs 0.12.1", - "bevy_reflect 0.12.1", - "bevy_utils 0.12.1", - "crossbeam-channel", - "thiserror", -] - -[[package]] -name = "bevy_time" -version = "0.13.0" +version = "0.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9738901b6b251d2c9250542af7002d6f671401fc3b74504682697c5ec822f210" +checksum = "a6c3d3d14ee8b0dbe4819fd516cc75509b61946134d78e0ee89ad3d1835ffe6c" dependencies = [ - "bevy_app 0.13.0", - "bevy_ecs 0.13.0", - "bevy_reflect 0.13.0", - "bevy_utils 0.13.0", + "bevy_app", + "bevy_ecs", + "bevy_reflect", + "bevy_utils", "crossbeam-channel", "thiserror", ] [[package]] name = "bevy_transform" -version = "0.12.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d541e0c292edbd96afae816ee680e02247422423ccd5dc635c1e211a20ed64be" -dependencies = [ - "bevy_app 0.12.1", - "bevy_ecs 0.12.1", - "bevy_hierarchy 0.12.1", - "bevy_math 0.12.1", - "bevy_reflect 0.12.1", - "thiserror", -] - -[[package]] -name = "bevy_transform" -version = "0.13.0" +version = "0.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ba73744a95bc4b8683e91cea3e79b1ad0844c1d677f31fbbc1814c79a5b4f8f0" +checksum = "97e8aa6b16be573277c6ceda30aebf1d78af7c6ede19b448dcb052fb8601d815" dependencies = [ - "bevy_app 0.13.0", - "bevy_ecs 0.13.0", - "bevy_hierarchy 0.13.0", - "bevy_math 0.13.0", - "bevy_reflect 0.13.0", + "bevy_app", + "bevy_ecs", + "bevy_hierarchy", + "bevy_math", + "bevy_reflect", "thiserror", ] [[package]] name = "bevy_ui" -version = "0.12.1" +version = "0.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d785e3b75dabcb2a8ad0d50933f8f3446d59e512cabc2d2a145e28c2bb8792ba" -dependencies = [ - "bevy_a11y 0.12.1", - "bevy_app 0.12.1", - "bevy_asset 0.12.1", - "bevy_core_pipeline 0.12.1", - "bevy_derive 0.12.1", - "bevy_ecs 0.12.1", - "bevy_hierarchy 0.12.1", - "bevy_input 0.12.1", - "bevy_log 0.12.1", - "bevy_math 0.12.1", - "bevy_reflect 0.12.1", - "bevy_render 0.12.1", - "bevy_sprite 0.12.1", - "bevy_text 0.12.1", - "bevy_transform 0.12.1", - "bevy_utils 0.12.1", - "bevy_window 0.12.1", +checksum = "38d9f864c646f3742ff77f67bcd89a13a7ab024b68ca2f1bfbab8245bcb1c06c" +dependencies = [ + "bevy_a11y", + "bevy_app", + "bevy_asset", + "bevy_color", + "bevy_core_pipeline", + "bevy_derive", + "bevy_ecs", + "bevy_hierarchy", + "bevy_input", + "bevy_math", + "bevy_reflect", + "bevy_render", + "bevy_sprite", + "bevy_text", + "bevy_transform", + "bevy_utils", + "bevy_window", "bytemuck", - "serde", + "nonmax", "smallvec", "taffy", "thiserror", ] -[[package]] -name = "bevy_ui" -version = "0.13.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fafe872906bac6d7fc8ecff166f56b4253465b2895ed88801499aa113548ccc6" -dependencies = [ - "bevy_a11y 0.13.0", - "bevy_app 0.13.0", - "bevy_asset 0.13.0", - "bevy_core_pipeline 0.13.0", - "bevy_derive 0.13.0", - "bevy_ecs 0.13.0", - "bevy_hierarchy 0.13.0", - "bevy_input 0.13.0", - "bevy_log 0.13.0", - "bevy_math 0.13.0", - "bevy_reflect 0.13.0", - "bevy_render 0.13.0", - "bevy_sprite 0.13.0", - "bevy_text 0.13.0", - "bevy_transform 0.13.0", - "bevy_utils 0.13.0", - "bevy_window 0.13.0", - "bytemuck", - "taffy", - "thiserror", -] - -[[package]] -name = "bevy_utils" -version = "0.12.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7915222f4a08ccc782e08d10b751b42e5f9d786e697d0cb3fd09333cb7e8b6ea" -dependencies = [ - "ahash", - "bevy_utils_proc_macros 0.12.1", - "getrandom", - "hashbrown 0.14.3", - "instant", - "nonmax", - "petgraph", - "thiserror", - "tracing", - "uuid", -] - [[package]] name = "bevy_utils" -version = "0.13.0" +version = "0.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "94a06aca1c1863606416b892f4c79e300dbc6211b6690953269051a431c2cca0" +checksum = "7fab364910e8f5839578aba9cfda00a8388e9ebe352ceb8491a742ce6af9ec6e" dependencies = [ "ahash", - "bevy_utils_proc_macros 0.13.0", + "bevy_utils_proc_macros", "getrandom", - "hashbrown 0.14.3", - "nonmax", - "petgraph", - "smallvec", - "thiserror", + "hashbrown", + "thread_local", "tracing", - "uuid", "web-time", ] [[package]] name = "bevy_utils_proc_macros" -version = "0.12.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7aafecc952b6b8eb1a93c12590bd867d25df2f4ae1033a01dfdfc3c35ebccfff" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.50", -] - -[[package]] -name = "bevy_utils_proc_macros" -version = "0.13.0" +version = "0.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "31ae98e9c0c08b0f5c90e22cd713201f759b98d4fd570b99867a695f8641859a" +checksum = "ad9db261ab33a046e1f54b35f885a44f21fcc80aa2bc9050319466b88fe58fe3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.50", + "syn 2.0.68", ] [[package]] name = "bevy_window" -version = "0.12.1" +version = "0.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "41ee72bf7f974000e9b31bb971a89387f1432ba9413f35c4fef59fef49767260" +checksum = "c9ea5777f933bf7ecaeb3af1a30845720ec730e007972ca7d4aba2d3512abe24" dependencies = [ - "bevy_a11y 0.12.1", - "bevy_app 0.12.1", - "bevy_ecs 0.12.1", - "bevy_input 0.12.1", - "bevy_math 0.12.1", - "bevy_reflect 0.12.1", - "bevy_utils 0.12.1", - "raw-window-handle 0.5.2", -] - -[[package]] -name = "bevy_window" -version = "0.13.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cb627efd7622a61398ac0d3674f93c997cffe16f13c59fb8ae8a05c9e28de961" -dependencies = [ - "bevy_a11y 0.13.0", - "bevy_app 0.13.0", - "bevy_ecs 0.13.0", - "bevy_input 0.13.0", - "bevy_math 0.13.0", - "bevy_reflect 0.13.0", - "bevy_utils 0.13.0", - "raw-window-handle 0.6.0", + "bevy_a11y", + "bevy_app", + "bevy_ecs", + "bevy_math", + "bevy_reflect", + "bevy_utils", + "raw-window-handle", "smol_str", ] [[package]] name = "bevy_winit" -version = "0.12.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1eb71f287eca9006dda998784c7b931e400ae2cc4c505da315882a8b082f21ad" -dependencies = [ - "accesskit_winit 0.15.0", - "approx", - "bevy_a11y 0.12.1", - "bevy_app 0.12.1", - "bevy_derive 0.12.1", - "bevy_ecs 0.12.1", - "bevy_hierarchy 0.12.1", - "bevy_input 0.12.1", - "bevy_math 0.12.1", - "bevy_tasks 0.12.1", - "bevy_utils 0.12.1", - "bevy_window 0.12.1", - "crossbeam-channel", - "raw-window-handle 0.5.2", - "wasm-bindgen", - "web-sys", - "winit 0.28.7", -] - -[[package]] -name = "bevy_winit" -version = "0.13.0" +version = "0.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "55105324a201941ae587790f83f6d9caa327e0baa0205558ec41e5ee05a1f703" +checksum = "f8c2213bbf14debe819ec8ad4913f233c596002d087bc6f1f20d533e2ebaf8c6" dependencies = [ - "accesskit_winit 0.17.0", + "accesskit_winit", "approx", - "bevy_a11y 0.13.0", - "bevy_app 0.13.0", - "bevy_derive 0.13.0", - "bevy_ecs 0.13.0", - "bevy_hierarchy 0.13.0", - "bevy_input 0.13.0", - "bevy_math 0.13.0", - "bevy_tasks 0.13.0", - "bevy_utils 0.13.0", - "bevy_window 0.13.0", + "bevy_a11y", + "bevy_app", + "bevy_derive", + "bevy_ecs", + "bevy_hierarchy", + "bevy_input", + "bevy_log", + "bevy_math", + "bevy_reflect", + "bevy_tasks", + "bevy_utils", + "bevy_window", + "cfg-if", "crossbeam-channel", - "raw-window-handle 0.6.0", + "raw-window-handle", "wasm-bindgen", "web-sys", - "winit 0.29.10", + "winit", ] [[package]] @@ -1953,7 +1259,7 @@ version = "0.69.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a00dc851838a2120612785d195287475a3ac45514741da670b735818822129a0" dependencies = [ - "bitflags 2.4.2", + "bitflags 2.6.0", "cexpr", "clang-sys", "itertools", @@ -1964,7 +1270,7 @@ dependencies = [ "regex", "rustc-hash", "shlex", - "syn 2.0.50", + "syn 2.0.68", ] [[package]] @@ -1996,18 +1302,24 @@ checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" [[package]] name = "bitflags" -version = "2.4.2" +version = "2.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ed570934406eb16438a4e976b1b4500774099c13b8cb96eec99f620f05090ddf" +checksum = "b048fb63fd8b5923fc5aa7b340d8e156aec7ec02f0c78fa8a6ddc2613f6f71de" dependencies = [ "serde", ] +[[package]] +name = "bitstream-io" +version = "2.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "415f8399438eb5e4b2f73ed3152a3448b98149dda642a957ee704e1daa5cf1d8" + [[package]] name = "blake3" -version = "1.5.0" +version = "1.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0231f06152bf547e9c2b5194f247cd97aacf6dcd8b15d8e5ec0663f64580da87" +checksum = "30cca6d3674597c30ddf2c587bf8d9d65c9a84d2326d941cc79c9842dfe0ef52" dependencies = [ "arrayref", "arrayvec", @@ -2022,95 +1334,58 @@ version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0d8c1fef690941d3e7788d328517591fecc684c084084702d6ff1641e993699a" -[[package]] -name = "block-sys" -version = "0.1.0-beta.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0fa55741ee90902547802152aaf3f8e5248aab7e21468089560d4c8840561146" -dependencies = [ - "objc-sys 0.2.0-beta.2", -] - -[[package]] -name = "block-sys" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ae85a0696e7ea3b835a453750bf002770776609115e6d25c6d2ff28a8200f7e7" -dependencies = [ - "objc-sys 0.3.2", -] - -[[package]] -name = "block2" -version = "0.2.0-alpha.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8dd9e63c1744f755c2f60332b88de39d341e5e86239014ad839bd71c106dec42" -dependencies = [ - "block-sys 0.1.0-beta.1", - "objc2-encode 2.0.0-pre.2", -] - [[package]] name = "block2" -version = "0.3.0" +version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "15b55663a85f33501257357e6421bb33e769d5c9ffb5ba0921c975a123e35e68" +checksum = "2c132eebf10f5cad5289222520a4a058514204aed6d791f1cf4fe8088b82d15f" dependencies = [ - "block-sys 0.2.1", - "objc2 0.4.1", + "objc2", ] [[package]] name = "blocking" -version = "1.5.1" +version = "1.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6a37913e8dc4ddcc604f0c6d3bf2887c995153af3611de9e23c352b44c1b9118" +checksum = "703f41c54fc768e63e091340b424302bb1c29ef4aa0c7f10fe849dfb114d29ea" dependencies = [ - "async-channel 2.2.0", - "async-lock 3.3.0", + "async-channel", "async-task", - "fastrand 2.0.1", "futures-io", - "futures-lite 2.2.0", + "futures-lite", "piper", - "tracing", ] [[package]] -name = "bstr" -version = "1.9.1" +name = "built" +version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "05efc5cfd9110c8416e471df0e96702d58690178e206e61b7173706673c93706" -dependencies = [ - "memchr", - "regex-automata 0.4.5", - "serde", -] +checksum = "c6a6c0b39c38fd754ac338b00a88066436389c0f029da5d37d1e01091d9b7c17" [[package]] name = "bumpalo" -version = "3.15.3" +version = "3.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8ea184aa71bb362a1157c896979544cc23974e08fd265f29ea96b59f0b4a555b" +checksum = "79296716171880943b8470b5f8d03aa55eb2e645a4874bdbb28adb49162e012c" [[package]] name = "bytemuck" -version = "1.14.3" +version = "1.16.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a2ef034f05691a48569bd920a96c81b9d91bbad1ab5ac7c4616c1f6ef36cb79f" +checksum = "b236fc92302c97ed75b38da1f4917b5cdda4984745740f153a5d3059e48d725e" dependencies = [ "bytemuck_derive", ] [[package]] name = "bytemuck_derive" -version = "1.5.0" +version = "1.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "965ab7eb5f8f97d2a083c799f3a1b994fc397b2fe2da5d1da1626ce15a39f2b1" +checksum = "1ee891b04274a59bd38b412188e24b849617b2e45a0fd8d057deb63e7403761b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.50", + "syn 2.0.68", ] [[package]] @@ -2119,11 +1394,17 @@ version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b" +[[package]] +name = "byteorder-lite" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8f1fe948ff07f4bd06c30984e69f5b4899c516a3ef74f34df92a2df2ab535495" + [[package]] name = "bytes" -version = "1.5.0" +version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a2bd12c1caf447e69cd4528f47f94d203fd2582878ecb9e9465484c4148a8223" +checksum = "514de17de45fdb8dc022b1a7975556c53c86f9f0aa5f534b98977b171857c2c9" [[package]] name = "calloop" @@ -2131,7 +1412,7 @@ version = "0.12.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fba7adb4dd5aa98e5553510223000e7148f621165ec5f9acd7113f6ca4995298" dependencies = [ - "bitflags 2.4.2", + "bitflags 2.6.0", "log", "polling", "rustix", @@ -2153,11 +1434,13 @@ dependencies = [ [[package]] name = "cc" -version = "1.0.88" +version = "1.0.104" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "02f341c093d19155a6e41631ce5971aac4e9a868262212153124c15fa22d1cdc" +checksum = "74b6a57f98764a267ff415d50a25e6e166f3831a5071af4995296ea97d210490" dependencies = [ + "jobserver", "libc", + "once_cell", ] [[package]] @@ -2175,6 +1458,16 @@ dependencies = [ "nom", ] +[[package]] +name = "cfg-expr" +version = "0.15.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d067ad48b8650848b989a59a86c6c36a995d02d2bf778d45c3c5d57bc2718f02" +dependencies = [ + "smallvec", + "target-lexicon", +] + [[package]] name = "cfg-if" version = "1.0.0" @@ -2187,29 +1480,35 @@ version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fd16c4719339c4530435d38e511904438d07cce7950afa3718a84ac36c10e89e" +[[package]] +name = "cfg_aliases" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "613afe47fcd5fac7ccf1db93babcb082c5994d996f20b8b159f2ad1658eb5724" + [[package]] name = "chrono" -version = "0.4.34" +version = "0.4.38" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5bc015644b92d5890fab7489e49d21f879d5c990186827d42ec511919404f38b" +checksum = "a21f936df1771bf62b77f047b726c4625ff2e8aa607c01ec06e5a05bd8463401" dependencies = [ "android-tzdata", "iana-time-zone", "js-sys", "num-traits", "wasm-bindgen", - "windows-targets 0.52.3", + "windows-targets 0.52.6", ] [[package]] name = "clang-sys" -version = "1.7.0" +version = "1.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "67523a3b4be3ce1989d607a828d036249522dd9c1c8de7f4dd2dae43a37369d1" +checksum = "0b023947811758c97c59bf9d1c188fd619ad4718dcaa767947df1cadb14f39f4" dependencies = [ "glob", "libc", - "libloading 0.8.1", + "libloading 0.8.4", ] [[package]] @@ -2224,9 +1523,9 @@ dependencies = [ [[package]] name = "clipboard-win" -version = "5.2.0" +version = "5.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "12f9a0700e0127ba15d1d52dd742097f821cd9c65939303a44d970465040a297" +checksum = "79f4473f5144e20d9aceaf2972478f06ddf687831eafeeb434fbaf0acc4144ad" dependencies = [ "error-code", ] @@ -2256,12 +1555,6 @@ dependencies = [ "com_macros", ] -[[package]] -name = "com-rs" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bf43edc576402991846b093a7ca18a3477e0ef9c588cde84964b5d3e43016642" - [[package]] name = "com_macros" version = "0.6.0" @@ -2286,9 +1579,9 @@ dependencies = [ [[package]] name = "combine" -version = "4.6.6" +version = "4.6.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "35ed6e9d84f0b51a7f52daf1c7d71dd136fd7a3f41a8462b8cdb8c78d920fad4" +checksum = "ba5a308b75df32fe02788e748662718f03fde005016435c444eea572398219fd" dependencies = [ "bytes", "memchr", @@ -2296,9 +1589,9 @@ dependencies = [ [[package]] name = "concurrent-queue" -version = "2.4.0" +version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d16048cd947b08fa32c24458a22f5dc5e835264f689f4f5653210c69fd107363" +checksum = "4ca0197aee26d1ae37445ee532fefce43251d24cc7c166799f4d46817f1d3973" dependencies = [ "crossbeam-utils", ] @@ -2313,6 +1606,16 @@ dependencies = [ "wasm-bindgen", ] +[[package]] +name = "console_log" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "be8aed40e4edbf4d3b4431ab260b63fdc40f5780a4766824329ea0f1eefe3c0f" +dependencies = [ + "log", + "web-sys", +] + [[package]] name = "const-fnv1a-hash" version = "1.1.0" @@ -2339,9 +1642,9 @@ checksum = "f7144d30dcf0fafbce74250a3963025d8d52177934239851c917d29f1df280c2" [[package]] name = "constgebra" -version = "0.1.3" +version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "edd23e864550e6dafc1e41ac78ce4f1ccddc8672b40c403524a04ff3f0518420" +checksum = "e1aaf9b65849a68662ac6c0810c8893a765c960b907dd7cfab9c4a50bf764fbc" dependencies = [ "const_soft_float", ] @@ -2371,29 +1674,16 @@ dependencies = [ ] [[package]] -name = "core-foundation-sys" -version = "0.8.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "06ea2b9bc92be3c2baa9334a323ebca2d6f074ff852cd1d7b11064035cd3868f" - -[[package]] -name = "core-graphics" -version = "0.22.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2581bbab3b8ffc6fcbd550bf46c355135d16e9ff2a6ea032ad6b9bf1d7efe4fb" -dependencies = [ - "bitflags 1.3.2", - "core-foundation", - "core-graphics-types", - "foreign-types 0.3.2", - "libc", -] +name = "core-foundation-sys" +version = "0.8.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "06ea2b9bc92be3c2baa9334a323ebca2d6f074ff852cd1d7b11064035cd3868f" [[package]] name = "core-graphics" -version = "0.23.1" +version = "0.23.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "970a29baf4110c26fedbc7f82107d42c23f7e88e404c4577ed73fe99ff85a212" +checksum = "c07782be35f9e1140080c6b96f0d44b739e2278479f64e02fdab4e32dfd8b081" dependencies = [ "bitflags 1.3.2", "core-foundation", @@ -2435,43 +1725,41 @@ dependencies = [ [[package]] name = "cpal" -version = "0.15.2" +version = "0.15.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6d959d90e938c5493000514b446987c07aed46c668faaa7d34d6c7a67b1a578c" +checksum = "873dab07c8f743075e57f524c583985fbaf745602acbe916a01539364369a779" dependencies = [ "alsa", "core-foundation-sys", "coreaudio-rs", "dasp_sample", - "jni 0.19.0", + "jni", "js-sys", "libc", "mach2", - "ndk 0.7.0", + "ndk 0.8.0", "ndk-context", "oboe", - "once_cell", - "parking_lot", "wasm-bindgen", "wasm-bindgen-futures", "web-sys", - "windows 0.46.0", + "windows 0.54.0", ] [[package]] name = "crc32fast" -version = "1.4.0" +version = "1.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b3855a8a784b474f333699ef2bbca9db2c4a1f6d9088a90a2d25b1eb53111eaa" +checksum = "a97769d94ddab943e4510d138150169a2758b5ef3eb191a9ee688de3e23ef7b3" dependencies = [ "cfg-if", ] [[package]] name = "crossbeam-channel" -version = "0.5.11" +version = "0.5.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "176dc175b78f56c0f321911d9c8eb2b77a78a4860b9c19db83835fea1a46649b" +checksum = "33480d6946193aa8033910124896ca395333cae7e2d1113d1fef6c3272217df2" dependencies = [ "crossbeam-utils", ] @@ -2497,9 +1785,9 @@ dependencies = [ [[package]] name = "crossbeam-utils" -version = "0.8.19" +version = "0.8.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "248e3bacc7dc6baa3b21e405ee045c3047101a49145e7e9eca583ab4c2ca5345" +checksum = "22ec99545bb0ed0ea7bb9b8e1e9122ea386ff8a48c0922e43f36d45ab09e0e80" [[package]] name = "crunchy" @@ -2515,23 +1803,12 @@ checksum = "96a6ac251f4a2aca6b3f91340350eab87ae57c3f127ffeb585e92bd336717991" [[package]] name = "d3d12" -version = "0.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e16e44ab292b1dddfdaf7be62cfd8877df52f2f3fde5858d95bab606be259f20" -dependencies = [ - "bitflags 2.4.2", - "libloading 0.8.1", - "winapi", -] - -[[package]] -name = "d3d12" -version = "0.19.0" +version = "0.20.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3e3d747f100290a1ca24b752186f61f6637e1deffe3bf6320de6fcb29510a307" +checksum = "b28bfe653d79bd16c77f659305b195b82bb5ce0c0eb2a4846b82ddbd77586813" dependencies = [ - "bitflags 2.4.2", - "libloading 0.8.1", + "bitflags 2.6.0", + "libloading 0.8.4", "winapi", ] @@ -2543,20 +1820,9 @@ checksum = "0c87e182de0887fd5361989c677c4e8f5000cd9491d6d563161a8f3a5519fc7f" [[package]] name = "data-encoding" -version = "2.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7e962a19be5cfc3f3bf6dd8f61eb50107f356ad6270fbb3ed41476571db78be5" - -[[package]] -name = "derive_more" -version = "0.99.17" +version = "2.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4fb810d30a7c1953f91334de7244731fc3f3c10d7fe163338a35b9f640960321" -dependencies = [ - "proc-macro2", - "quote", - "syn 1.0.109", -] +checksum = "e8566979429cf69b49a5c740c60791108e86440e8be149bbea4fe54d2c32d6e2" [[package]] name = "directories" @@ -2591,209 +1857,119 @@ version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "330c60081dcc4c72131f8eb70510f1ac07223e5d4163db481a04a0befcffa412" dependencies = [ - "libloading 0.8.1", + "libloading 0.8.4", ] [[package]] -name = "downcast-rs" -version = "1.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9ea835d29036a4087793836fa931b08837ad5e957da9e23886b29586fb9b6650" - -[[package]] -name = "duplicate" -version = "1.0.0" +name = "document-features" +version = "0.2.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "de78e66ac9061e030587b2a2e75cc88f22304913c907b11307bca737141230cb" +checksum = "ef5282ad69563b5fc40319526ba27e0e7363d552a896f0297d54f767717f9b95" dependencies = [ - "heck", - "proc-macro-error", + "litrs", ] [[package]] -name = "ecolor" -version = "0.23.0" +name = "downcast-rs" +version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cfdf4e52dbbb615cfd30cf5a5265335c217b5fd8d669593cea74a517d9c605af" -dependencies = [ - "bytemuck", -] +checksum = "75b325c5dbd37f80359721ad39aca5a29fb04c89279657cffdda8736d0c0b9d2" [[package]] -name = "ecolor" -version = "0.26.2" +name = "dpi" +version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "03cfe80b1890e1a8cdbffc6044d6872e814aaf6011835a2a5e2db0e5c5c4ef4e" -dependencies = [ - "bytemuck", -] +checksum = "f25c0e292a7ca6d6498557ff1df68f32c99850012b6ea401cf8daf771f22ff53" [[package]] -name = "egui" -version = "0.23.0" +name = "ecolor" +version = "0.28.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8bd69fed5fcf4fbb8225b24e80ea6193b61e17a625db105ef0c4d71dde6eb8b7" +checksum = "2e6b451ff1143f6de0f33fc7f1b68fecfd2c7de06e104de96c4514de3f5396f8" dependencies = [ - "ahash", - "epaint 0.23.0", - "nohash-hasher", + "bytemuck", + "emath", ] [[package]] name = "egui" -version = "0.26.2" +version = "0.28.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "180f595432a5b615fc6b74afef3955249b86cfea72607b40740a4cd60d5297d0" +checksum = "20c97e70a2768de630f161bb5392cbd3874fcf72868f14df0e002e82e06cb798" dependencies = [ "ahash", - "epaint 0.26.2", + "emath", + "epaint", "nohash-hasher", ] -[[package]] -name = "egui-gizmo" -version = "0.12.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f732ad247afe275d6cf901e0f134025ad735007c8f4d82e667a6871f1b4a5441" -dependencies = [ - "egui 0.23.0", - "glam 0.24.2", -] - -[[package]] -name = "egui_dock" -version = "0.8.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a52f67bcab0eb6050cf8051c614966c1c57129fab23dbeae9c157214779053c7" -dependencies = [ - "duplicate", - "egui 0.23.0", - "paste", -] - [[package]] name = "either" -version = "1.10.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "11157ac094ffbdde99aa67b23417ebdd801842852b500e395a45a9c0aac03e4a" - -[[package]] -name = "emath" -version = "0.23.0" +version = "1.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1ef2b29de53074e575c18b694167ccbe6e5191f7b25fe65175a0d905a32eeec0" -dependencies = [ - "bytemuck", -] +checksum = "60b1af1c220855b6ceac025d3f6ecdd2b7c4894bfe9cd9bda4fbb4bc7c0d4cf0" [[package]] name = "emath" -version = "0.26.2" +version = "0.28.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6916301ecf80448f786cdf3eb51d9dbdd831538732229d49119e2d4312eaaf09" +checksum = "0a6a21708405ea88f63d8309650b4d77431f4bc28fb9d8e6f77d3963b51249e6" dependencies = [ "bytemuck", ] [[package]] name = "encase" -version = "0.6.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8fce2eeef77fd4a293a54b62aa00ac9daebfbcda4bf8998c5a815635b004aa1c" -dependencies = [ - "const_panic", - "encase_derive 0.6.1", - "glam 0.24.2", - "thiserror", -] - -[[package]] -name = "encase" -version = "0.7.0" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "95ed933078d2e659745df651f4c180511cd582e5b9414ff896e7d50d207e3103" +checksum = "5a9299a95fa5671ddf29ecc22b00e121843a65cb9ff24911e394b4ae556baf36" dependencies = [ "const_panic", - "encase_derive 0.7.0", - "glam 0.25.0", + "encase_derive", + "glam", "thiserror", ] [[package]] name = "encase_derive" -version = "0.6.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0e520cde08cbf4f7cc097f61573ec06ce467019803de8ae82fb2823fa1554a0e" -dependencies = [ - "encase_derive_impl 0.6.1", -] - -[[package]] -name = "encase_derive" -version = "0.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f4ce1449c7d19eba6cc0abd231150ad81620a8dce29601d7f8d236e5d431d72a" -dependencies = [ - "encase_derive_impl 0.7.0", -] - -[[package]] -name = "encase_derive_impl" -version = "0.6.1" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3fe2568f851fd6144a45fa91cfed8fe5ca8fc0b56ba6797bfc1ed2771b90e37c" +checksum = "07e09decb3beb1fe2db6940f598957b2e1f7df6206a804d438ff6cb2a9cddc10" dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.50", + "encase_derive_impl", ] [[package]] name = "encase_derive_impl" -version = "0.7.0" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "92959a9e8d13eaa13b8ae8c7b583c3bf1669ca7a8e7708a088d12587ba86effc" +checksum = "fd31dbbd9743684d339f907a87fe212cb7b51d75b9e8e74181fe363199ee9b47" dependencies = [ "proc-macro2", "quote", - "syn 2.0.50", + "syn 2.0.68", ] [[package]] name = "encoding_rs" -version = "0.8.33" +version = "0.8.34" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7268b386296a025e474d5140678f75d6de9493ae55a5d709eeb9dd08149945e1" +checksum = "b45de904aa0b010bce2ab45264d0631681847fa7b6f2eaa7dab7619943bc4f59" dependencies = [ "cfg-if", ] [[package]] name = "epaint" -version = "0.23.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "58067b840d009143934d91d8dcb8ded054d8301d7c11a517ace0a99bb1e1595e" -dependencies = [ - "ab_glyph", - "ahash", - "bytemuck", - "ecolor 0.23.0", - "emath 0.23.0", - "nohash-hasher", - "parking_lot", -] - -[[package]] -name = "epaint" -version = "0.26.2" +version = "0.28.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "77b9fdf617dd7f58b0c8e6e9e4a1281f730cde0831d40547da446b2bb76a47af" +checksum = "3f0dcc0a0771e7500e94cd1cb797bd13c9f23b9409bdc3c824e2cbc562b7fa01" dependencies = [ "ab_glyph", "ahash", "bytemuck", - "ecolor 0.26.2", - "emath 0.26.2", + "ecolor", + "emath", "nohash-hasher", "parking_lot", ] @@ -2806,27 +1982,19 @@ checksum = "5443807d6dff69373d433ab9ef5378ad8df50ca6298caf15de6e52e24aaf54d5" [[package]] name = "erased-serde" -version = "0.3.31" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6c138974f9d5e7fe373eb04df7cae98833802ae4b11c24ac7039a21d5af4b26c" -dependencies = [ - "serde", -] - -[[package]] -name = "erased-serde" -version = "0.4.3" +version = "0.4.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "388979d208a049ffdfb22fa33b9c81942215b940910bccfe258caeb25d125cb3" +checksum = "24e2389d65ab4fab27dc2a5de7b191e1f6617d1f1c8855c0dc569c94a4cbb18d" dependencies = [ "serde", + "typeid", ] [[package]] name = "errno" -version = "0.3.8" +version = "0.3.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a258e46cdc063eb8519c00b9fc845fc47bcfca4130e2f08e88665ceda8474245" +checksum = "534c5cf6194dfab3db3242765c03bbe257cf92f22b38f6bc0c58d59108a820ba" dependencies = [ "libc", "windows-sys 0.52.0", @@ -2834,15 +2002,15 @@ dependencies = [ [[package]] name = "error-code" -version = "3.1.0" +version = "3.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "26a147e1a6641a55d994b3e4e9fa4d9b180c8d652c09b363af8c9bf1b8e04139" +checksum = "a0474425d51df81997e2f90a21591180b38eccf27292d755f3e30750225c175b" [[package]] name = "euclid" -version = "0.22.9" +version = "0.22.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "87f253bc5c813ca05792837a0ff4b3a580336b224512d48f7eda1d7dd9210787" +checksum = "e0f0eb73b934648cd7a4a61f1b15391cd95dab0b4da6e2e66c2a072c144b4a20" dependencies = [ "num-traits", ] @@ -2855,20 +2023,9 @@ checksum = "0206175f82b8d6bf6652ff7d71a1e27fd2e4efde587fd368662814d6ec1d9ce0" [[package]] name = "event-listener" -version = "4.0.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "67b215c49b2b248c855fb73579eb1f4f26c38ffdc12973e20e07b91d78d5646e" -dependencies = [ - "concurrent-queue", - "parking", - "pin-project-lite", -] - -[[package]] -name = "event-listener" -version = "5.1.0" +version = "5.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b7ad6fd685ce13acd6d9541a30f6db6567a7a24c9ffd4ba2955d29e3f22c8b27" +checksum = "6032be9bd27023a771701cc49f9f053c751055f71efb2e0ae5c15809093675ba" dependencies = [ "concurrent-queue", "parking", @@ -2877,21 +2034,11 @@ dependencies = [ [[package]] name = "event-listener-strategy" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "958e4d70b6d5e81971bebec42271ec641e7ff4e170a6fa605f2b8a8b65cb97d3" -dependencies = [ - "event-listener 4.0.3", - "pin-project-lite", -] - -[[package]] -name = "event-listener-strategy" -version = "0.5.0" +version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "feedafcaa9b749175d5ac357452a9d41ea2911da598fde46ce1fe02c37751291" +checksum = "0f214dc438f977e6d4e3500aaa277f5ad94ca83fbbd9b1a15713ce2344ccc5a1" dependencies = [ - "event-listener 5.1.0", + "event-listener 5.3.1", "pin-project-lite", ] @@ -2913,18 +2060,9 @@ dependencies = [ [[package]] name = "fastrand" -version = "1.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e51093e27b0797c359783294ca4f0a911c270184cb10f85783b118614a1501be" -dependencies = [ - "instant", -] - -[[package]] -name = "fastrand" -version = "2.0.1" +version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "25cbce373ec4653f1a01a31e8a5e5ec0c622dc27ff9c4e6606eefef5cbbed4a5" +checksum = "9fc0510504f03c51ada170672ac806f1f105a88aa97a5281117e1ddc3368e51a" [[package]] name = "fdeflate" @@ -2962,11 +2100,17 @@ version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0ce7134b9999ecaf8bcd65542e436736ef32ddca1b3e06094cb6ec5755203b80" +[[package]] +name = "fixedbitset" +version = "0.5.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1d674e81391d1e1ab681a28d99df07927c6d4aa5b027d7da16ba32d1d21ecd99" + [[package]] name = "flate2" -version = "1.0.28" +version = "1.0.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "46303f565772937ffe1d394a4fac6f411c6013172fadde9dcdb1e147a086940e" +checksum = "5f54427cfd1c7829e2a139fcefea601bf088ebca651d2bf53ebc600eac295dae" dependencies = [ "crc32fast", "miniz_oxide", @@ -3014,7 +2158,7 @@ checksum = "1a5c6c585bc94aaf2c7b51dd4c2ba22680844aba4c687be581871a6f518c5742" dependencies = [ "proc-macro2", "quote", - "syn 2.0.50", + "syn 2.0.68", ] [[package]] @@ -3070,26 +2214,11 @@ checksum = "a44623e20b9681a318efdd71c299b6b222ed6f231972bfe2f224ebad6311f0c1" [[package]] name = "futures-lite" -version = "1.13.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "49a9d51ce47660b1e808d3c990b4709f2f415d928835a17dfd16991515c46bce" -dependencies = [ - "fastrand 1.9.0", - "futures-core", - "futures-io", - "memchr", - "parking", - "pin-project-lite", - "waker-fn", -] - -[[package]] -name = "futures-lite" -version = "2.2.0" +version = "2.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "445ba825b27408685aaecefd65178908c36c6e96aaf6d8599419d46e624192ba" +checksum = "52527eb5074e35e9339c6b4e8d12600c7128b68fb25dcb9fa9dec18f7c25f3a5" dependencies = [ - "fastrand 2.0.1", + "fastrand", "futures-core", "futures-io", "parking", @@ -3132,9 +2261,9 @@ dependencies = [ [[package]] name = "getrandom" -version = "0.2.12" +version = "0.2.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "190092ea657667030ac6a35e305e62fc4dd69fd98ac98631e5d3a2b1575a12b5" +checksum = "c4567c8db10ae91089c99af84c68c38da3ec2f087c3f82960bcdbf3656b6f4d7" dependencies = [ "cfg-if", "js-sys", @@ -3155,9 +2284,9 @@ dependencies = [ [[package]] name = "gilrs" -version = "0.10.4" +version = "0.10.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d8b2e57a9cb946b5d04ae8638c5f554abb5a9f82c4c950fd5b1fee6d119592fb" +checksum = "b54e5e39844ab5cddaf3bbbdfdc2923a6cb34e36818b95618da4e3f26302c24c" dependencies = [ "fnv", "gilrs-core", @@ -3168,9 +2297,9 @@ dependencies = [ [[package]] name = "gilrs-core" -version = "0.5.10" +version = "0.5.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0af1827b7dd2f36d740ae804c1b3ea0d64c12533fb61ff91883005143a0e8c5a" +checksum = "b922f294d9f062af517ea0bd0a036ddcf11c2842211c2f9c71a3ceee859e10b6" dependencies = [ "core-foundation", "inotify 0.10.2", @@ -3179,19 +2308,19 @@ dependencies = [ "libc", "libudev-sys", "log", - "nix 0.27.1", + "nix", "uuid", "vec_map", "wasm-bindgen", "web-sys", - "windows 0.52.0", + "windows 0.57.0", ] [[package]] name = "gimli" -version = "0.28.1" +version = "0.29.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4271d37baee1b8c7e4b708028c57d816cf9d2434acb33a549475f78c181f6253" +checksum = "40ecd4077b5ae9fd2e9e169b102c6c330d0605168eb0e8bf79952b256dbefffd" [[package]] name = "gl_generator" @@ -3206,21 +2335,12 @@ dependencies = [ [[package]] name = "glam" -version = "0.24.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b5418c17512bdf42730f9032c74e1ae39afc408745ebb2acf72fbc4691c17945" -dependencies = [ - "bytemuck", - "serde", -] - -[[package]] -name = "glam" -version = "0.25.0" +version = "0.27.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "151665d9be52f9bb40fc7966565d39666f2d1e69233571b71b87791c7e0528b3" +checksum = "9e05e7e6723e3455f4818c7b26e855439f7546cf617ef669d1adedb8669e5cb9" dependencies = [ "bytemuck", + "rand", "serde", ] @@ -3230,18 +2350,6 @@ version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d2fabcfbdc87f4758337ca535fb41a6d701b65693ce38287d856d1674551ec9b" -[[package]] -name = "glow" -version = "0.12.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ca0fe580e4b60a8ab24a868bc08e2f03cbcb20d3d676601fa909386713333728" -dependencies = [ - "js-sys", - "slotmap", - "wasm-bindgen", - "web-sys", -] - [[package]] name = "glow" version = "0.13.1" @@ -3256,9 +2364,9 @@ dependencies = [ [[package]] name = "gltf" -version = "1.4.0" +version = "1.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3b78f069cf941075835822953c345b9e1edd67ae347b81ace3aea9de38c2ef33" +checksum = "e3ce1918195723ce6ac74e80542c5a96a40c2b26162c1957a5cd70799b8cacf7" dependencies = [ "byteorder", "gltf-json", @@ -3268,21 +2376,21 @@ dependencies = [ [[package]] name = "gltf-derive" -version = "1.4.0" +version = "1.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "438ffe1a5540d75403feaf23636b164e816e93f6f03131674722b3886ce32a57" +checksum = "14070e711538afba5d6c807edb74bcb84e5dbb9211a3bf5dea0dfab5b24f4c51" dependencies = [ "inflections", "proc-macro2", "quote", - "syn 2.0.50", + "syn 2.0.68", ] [[package]] name = "gltf-json" -version = "1.4.0" +version = "1.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "655951ba557f2bc69ea4b0799446bae281fa78efae6319968bdd2c3e9a06d8e1" +checksum = "e6176f9d60a7eab0a877e8e96548605dedbde9190a7ae1e80bbcc1c9af03ab14" dependencies = [ "gltf-derive", "serde", @@ -3301,9 +2409,9 @@ dependencies = [ [[package]] name = "glyph_brush_layout" -version = "0.2.3" +version = "0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cc32c2334f00ca5ac3695c5009ae35da21da8c62d255b5b96d56e2597a637a38" +checksum = "7b1e288bfd2f6c0313f78bf5aa538356ad481a3bb97e9b7f93220ab0066c5992" dependencies = [ "ab_glyph", "approx", @@ -3316,7 +2424,7 @@ version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fbcd2dba93594b227a1f57ee09b8b9da8892c34d55aa332e034a228d0fe6a171" dependencies = [ - "bitflags 2.4.2", + "bitflags 2.6.0", "gpu-alloc-types", ] @@ -3326,20 +2434,7 @@ version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "98ff03b468aa837d70984d55f5d3f846f6ec31fe34bbb97c4f85219caeee1ca4" dependencies = [ - "bitflags 2.4.2", -] - -[[package]] -name = "gpu-allocator" -version = "0.22.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ce95f9e2e11c2c6fadfce42b5af60005db06576f231f5c92550fdded43c423e8" -dependencies = [ - "backtrace", - "log", - "thiserror", - "winapi", - "windows 0.44.0", + "bitflags 2.6.0", ] [[package]] @@ -3357,29 +2452,29 @@ dependencies = [ [[package]] name = "gpu-descriptor" -version = "0.2.4" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cc11df1ace8e7e564511f53af41f3e42ddc95b56fd07b3f4445d2a6048bc682c" +checksum = "9c08c1f623a8d0b722b8b99f821eb0ba672a1618f0d3b16ddbee1cedd2dd8557" dependencies = [ - "bitflags 2.4.2", + "bitflags 2.6.0", "gpu-descriptor-types", - "hashbrown 0.14.3", + "hashbrown", ] [[package]] name = "gpu-descriptor-types" -version = "0.1.2" +version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6bf0b36e6f090b7e1d8a4b49c0cb81c1f8376f72198c65dd3ad9ff3556b8b78c" +checksum = "fdf242682df893b86f33a73828fb09ca4b2d3bb6cc95249707fc684d27484b91" dependencies = [ - "bitflags 2.4.2", + "bitflags 2.6.0", ] [[package]] name = "grid" -version = "0.10.0" +version = "0.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eec1c01eb1de97451ee0d60de7d81cf1e72aabefb021616027f3d1c3ec1c723c" +checksum = "be136d9dacc2a13cc70bb6c8f902b414fb2641f8db1314637c6b7933411a8f82" [[package]] name = "guillotiere" @@ -3393,9 +2488,9 @@ dependencies = [ [[package]] name = "h2" -version = "0.3.24" +version = "0.3.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bb2c4422095b67ee78da96fbb51a4cc413b3b25883c7717ff7ca1ab31022c9c9" +checksum = "81fe527a889e1532da5c525686d96d4c2e74cdd345badf8dfef9f6b39dd5f5e8" dependencies = [ "bytes", "fnv", @@ -3403,7 +2498,7 @@ dependencies = [ "futures-sink", "futures-util", "http", - "indexmap 2.2.3", + "indexmap", "slab", "tokio", "tokio-util", @@ -3412,9 +2507,9 @@ dependencies = [ [[package]] name = "half" -version = "2.3.1" +version = "2.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bc52e53916c08643f1b56ec082790d1e86a32e58dc5268f897f313fbae7b4872" +checksum = "6dd08c532ae367adf81c312a4580bc67f1d0fe8bc9c460520283f4c0ff277888" dependencies = [ "cfg-if", "crunchy", @@ -3422,46 +2517,25 @@ dependencies = [ [[package]] name = "hashbrown" -version = "0.12.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888" - -[[package]] -name = "hashbrown" -version = "0.14.3" +version = "0.14.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "290f1a1d9242c78d09ce40a5e87e7554ee637af1351968159f4952f028f75604" +checksum = "e5274423e17b7c9fc20b6e7e208532f9b19825d82dfd615708b70edd83df41f1" dependencies = [ "ahash", "allocator-api2", "serde", ] -[[package]] -name = "hassle-rs" -version = "0.10.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1397650ee315e8891a0df210707f0fc61771b0cc518c3023896064c5407cb3b0" -dependencies = [ - "bitflags 1.3.2", - "com-rs", - "libc", - "libloading 0.7.4", - "thiserror", - "widestring", - "winapi", -] - [[package]] name = "hassle-rs" version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "af2a7e73e1f34c48da31fb668a907f250794837e08faa144fd24f0b8b741e890" dependencies = [ - "bitflags 2.4.2", + "bitflags 2.6.0", "com", "libc", - "libloading 0.8.1", + "libloading 0.8.4", "thiserror", "widestring", "winapi", @@ -3469,34 +2543,30 @@ dependencies = [ [[package]] name = "heck" -version = "0.4.1" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "95505c38b4572b2d910cecb0281560f54b440a19336cbbcb27bf6ce6adc6f5a8" +checksum = "2304e00983f87ffb38b55b444b5e3b60a884b5d30c0fca7d82fe33449bbe55ea" [[package]] name = "hermit-abi" -version = "0.3.8" +version = "0.3.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "379dada1584ad501b383485dd706b8afb7a70fcbc7f4da7d780638a5a6124a60" +checksum = "d231dfb89cfffdbc30e7fc41579ed6066ad03abda9e567ccafae602b97ec5024" [[package]] -name = "hexasphere" -version = "9.1.0" +name = "hermit-abi" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7cb3df16a7bcb1b5bc092abd55e14f77ca70aea14445026e264586fc62889a10" -dependencies = [ - "constgebra", - "glam 0.24.2", -] +checksum = "fbf6a919d6cf397374f7dfeeea91d974c7c0a7221d0d0f4f20d859d329e53fcc" [[package]] name = "hexasphere" -version = "10.0.0" +version = "12.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f33ddb7f7143d9e703c072e88b98cd8b9719f174137a671429351bd2ee43c02a" +checksum = "edd6b038160f086b0a7496edae34169ae22f328793cbe2b627a5a3d8373748ec" dependencies = [ "constgebra", - "glam 0.25.0", + "glam", ] [[package]] @@ -3516,9 +2586,9 @@ dependencies = [ [[package]] name = "http" -version = "0.2.11" +version = "0.2.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8947b1a6fad4393052c7ba1f4cd97bed3e953a95c79c92ad9b051a04611d9fbb" +checksum = "601cbb57e577e2f5ef5be8e7b83f0f63994f25aa94d673e54a92d5c516d101f1" dependencies = [ "bytes", "fnv", @@ -3538,9 +2608,9 @@ dependencies = [ [[package]] name = "httparse" -version = "1.8.0" +version = "1.9.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d897f394bad6a705d5f4104762e116a75639e470d80901eed05a860a95cb1904" +checksum = "0fcc0b4a115bf80b728eb8ea024ad5bd707b615bfed49e0665b6e0f86fd082d9" [[package]] name = "httpdate" @@ -3550,9 +2620,9 @@ checksum = "df3b46402a9d5adb4c86a0cf463f42e19994e3ee891101b1841f30a545cb49a9" [[package]] name = "hyper" -version = "0.14.28" +version = "0.14.29" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bf96e135eb83a2a8ddf766e426a841d8ddd7449d5f00d34ea02b41d2f19eef80" +checksum = "f361cde2f109281a220d4307746cdfd5ee3f410da58a70377762396775634b33" dependencies = [ "bytes", "futures-channel", @@ -3596,7 +2666,7 @@ dependencies = [ "iana-time-zone-haiku", "js-sys", "wasm-bindgen", - "windows-core", + "windows-core 0.52.0", ] [[package]] @@ -3608,17 +2678,6 @@ dependencies = [ "cc", ] -[[package]] -name = "icrate" -version = "0.0.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "99d3aaff8a54577104bafdf686ff18565c3b6903ca5782a2026ef06e2c7aa319" -dependencies = [ - "block2 0.3.0", - "dispatch", - "objc2 0.4.1", -] - [[package]] name = "idna" version = "0.5.0" @@ -3631,40 +2690,60 @@ dependencies = [ [[package]] name = "image" -version = "0.24.9" +version = "0.25.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5690139d2f55868e080017335e4b94cb7414274c74f1669c84fb5feba2c9f69d" +checksum = "fd54d660e773627692c524beaad361aca785a4f9f5730ce91f42aabe5bce3d11" dependencies = [ "bytemuck", "byteorder", "color_quant", "exr", "gif", - "jpeg-decoder", + "image-webp", "num-traits", "png", "qoi", + "ravif", + "rayon", + "rgb", "tiff", + "zune-core", + "zune-jpeg", +] + +[[package]] +name = "image-webp" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d730b085583c4d789dfd07fdcf185be59501666a90c97c40162b37e4fdad272d" +dependencies = [ + "byteorder-lite", + "thiserror", ] [[package]] -name = "indexmap" -version = "1.9.3" +name = "imgref" +version = "1.10.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "44feda355f4159a7c757171a77de25daf6411e217b4cabd03bd6650690468126" + +[[package]] +name = "immutable-chunkmap" +version = "2.0.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bd070e393353796e801d209ad339e89596eb4c8d430d18ede6a1cced8fafbd99" +checksum = "4419f022e55cc63d5bbd6b44b71e1d226b9c9480a47824c706e9d54e5c40c5eb" dependencies = [ - "autocfg", - "hashbrown 0.12.3", + "arrayvec", ] [[package]] name = "indexmap" -version = "2.2.3" +version = "2.2.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "233cf39063f058ea2caae4091bf4a3ef70a653afbc026f5c4a4135d114e3c177" +checksum = "168fb715dda47215e360912c096649d23d58bf392ac62f73919e831745e40f26" dependencies = [ "equivalent", - "hashbrown 0.14.3", + "hashbrown", ] [[package]] @@ -3705,22 +2784,21 @@ dependencies = [ ] [[package]] -name = "instant" -version = "0.1.12" +name = "interpolate_name" +version = "0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a5bbe824c507c5da5956355e86a746d82e0e1464f65d862cc5e71da70e94b2c" +checksum = "c34819042dc3d3971c46c2190835914dfbe0c3c13f61449b2997f4e9722dfa60" dependencies = [ - "cfg-if", - "js-sys", - "wasm-bindgen", - "web-sys", + "proc-macro2", + "quote", + "syn 2.0.68", ] [[package]] name = "io-kit-sys" -version = "0.4.0" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4769cb30e5dcf1710fc6730d3e94f78c47723a014a567de385e113c737394640" +checksum = "617ee6cf8e3f66f3b4ea67a4058564628cde41901316e19f559e14c7c72c5e7b" dependencies = [ "core-foundation-sys", "mach2", @@ -3743,37 +2821,9 @@ dependencies = [ [[package]] name = "itoa" -version = "1.0.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b1a46d1a171d865aa5f83f92695765caa047a9b4cbae2cbf37dbd613a793fd4c" - -[[package]] -name = "jni" -version = "0.19.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c6df18c2e3db7e453d3c6ac5b3e9d5182664d28788126d39b91f2d1e22b017ec" -dependencies = [ - "cesu8", - "combine", - "jni-sys", - "log", - "thiserror", - "walkdir", -] - -[[package]] -name = "jni" -version = "0.20.0" +version = "1.0.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "039022cdf4d7b1cf548d31f60ae783138e5fd42013f6271049d7df7afadef96c" -dependencies = [ - "cesu8", - "combine", - "jni-sys", - "log", - "thiserror", - "walkdir", -] +checksum = "49f1f14873335454500d59611f1cf4a4b0f786f9ac11f4312a78e4cf2566695b" [[package]] name = "jni" @@ -3798,32 +2848,27 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8eaf4bc02d17cbdd7ff4c7438cafcdf7fb9a4613313ad11b4f8fefe7d3fa0130" [[package]] -name = "jpeg-decoder" -version = "0.3.1" +name = "jobserver" +version = "0.1.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f5d4a7da358eff58addd2877a45865158f0d78c911d43a5784ceb7bbf52833b0" +checksum = "d2b099aaa34a9751c5bf0878add70444e1ed2dd73f347be99003d4577277de6e" dependencies = [ - "rayon", + "libc", ] [[package]] -name = "js-sys" -version = "0.3.68" +name = "jpeg-decoder" +version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "406cda4b368d531c842222cf9d2600a9a4acce8d29423695379c6868a143a9ee" -dependencies = [ - "wasm-bindgen", -] +checksum = "f5d4a7da358eff58addd2877a45865158f0d78c911d43a5784ceb7bbf52833b0" [[package]] -name = "khronos-egl" -version = "4.1.0" +name = "js-sys" +version = "0.3.69" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8c2352bd1d0bceb871cb9d40f24360c8133c11d7486b68b5381c1dd1a32015e3" +checksum = "29c15563dc2726973df627357ce0c9ddddbea194836909d655df6a75d2cf296d" dependencies = [ - "libc", - "libloading 0.7.4", - "pkg-config", + "wasm-bindgen", ] [[package]] @@ -3833,7 +2878,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6aae1df220ece3c0ada96b8153459b67eebe9ae9212258bb0134ae60416fdf76" dependencies = [ "libc", - "libloading 0.8.1", + "libloading 0.8.4", "pkg-config", ] @@ -3880,9 +2925,9 @@ checksum = "10257499f089cd156ad82d0a9cd57d9501fa2c989068992a97eb3c27836f206b" [[package]] name = "lazy_static" -version = "1.4.0" +version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" +checksum = "bbd2bcb4c963f2ddae06a2efc7e9f3591312473c50c6685e1f298068316e66fe" [[package]] name = "lazycell" @@ -3909,9 +2954,20 @@ dependencies = [ [[package]] name = "libc" -version = "0.2.153" +version = "0.2.155" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "97b3888a4aecf77e811145cadf6eef5901f4782c53886191b2f693f24761847c" + +[[package]] +name = "libfuzzer-sys" +version = "0.4.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c198f91728a82281a64e1f4f9eeb25d82cb32a5de251c6bd1b5154d63a8e7bd" +checksum = "a96cfd5557eb82f2b83fed4955246c988d331975a002961b07c81584d107e7f7" +dependencies = [ + "arbitrary", + "cc", + "once_cell", +] [[package]] name = "libloading" @@ -3925,34 +2981,33 @@ dependencies = [ [[package]] name = "libloading" -version = "0.8.1" +version = "0.8.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c571b676ddfc9a8c12f1f3d3085a7b163966a8fd8098a90640953ce5f6170161" +checksum = "e310b3a6b5907f99202fcdb4960ff45b93735d7c7d96b760fcff8db2dc0e103d" dependencies = [ "cfg-if", - "windows-sys 0.48.0", + "windows-targets 0.52.6", ] [[package]] name = "libredox" -version = "0.0.1" +version = "0.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "85c833ca1e66078851dba29046874e38f08b2c883700aa29a03ddd3b23814ee8" +checksum = "3af92c55d7d839293953fcd0fda5ecfe93297cfde6ffbdec13b41d99c0ba6607" dependencies = [ - "bitflags 2.4.2", + "bitflags 2.6.0", "libc", "redox_syscall 0.4.1", ] [[package]] name = "libredox" -version = "0.0.2" +version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3af92c55d7d839293953fcd0fda5ecfe93297cfde6ffbdec13b41d99c0ba6607" +checksum = "c0ff37bd590ca25063e35af745c343cb7a0271906fb7b37e4813e8f79f00268d" dependencies = [ - "bitflags 2.4.2", + "bitflags 2.6.0", "libc", - "redox_syscall 0.4.1", ] [[package]] @@ -3967,15 +3022,21 @@ dependencies = [ [[package]] name = "linux-raw-sys" -version = "0.4.13" +version = "0.4.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "78b3ae25bc7c8c38cec158d1f2757ee79e9b3740fbc7ccf0e59e4b08d793fa89" + +[[package]] +name = "litrs" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "01cda141df6706de531b6c46c3a33ecca755538219bd484262fa09410c13539c" +checksum = "b4ce301924b7887e9d637144fdade93f9dfff9b60981d4ac161db09720d39aa5" [[package]] name = "lock_api" -version = "0.4.11" +version = "0.4.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3c168f8615b12bc01f9c17e2eb0cc07dcae1940121185446edc3744920e8ef45" +checksum = "07af8b9cdd281b7915f413fa73f29ebd5d55d0d3f0155584dade1ff18cea1b17" dependencies = [ "autocfg", "scopeguard", @@ -3983,9 +3044,18 @@ dependencies = [ [[package]] name = "log" -version = "0.4.20" +version = "0.4.22" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a7a70ba024b9dc04c27ea2f0c0548feb474ec5c54bba33a7f72f873a39d07b24" + +[[package]] +name = "loop9" +version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b5e6163cb8c49088c2c36f57875e58ccd8c87c7427f7fbd50ea6710b2f3f2e8f" +checksum = "0fae87c125b03c1d2c0150c90365d7d6bcc53fb73a9acaef207d2d065860f062" +dependencies = [ + "imgref", +] [[package]] name = "mach2" @@ -4014,11 +3084,21 @@ dependencies = [ "regex-automata 0.1.10", ] +[[package]] +name = "maybe-rayon" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8ea1f30cedd69f0a2954655f7188c6a834246d2bcf1e315e2ac40c4b24dc9519" +dependencies = [ + "cfg-if", + "rayon", +] + [[package]] name = "memchr" -version = "2.7.1" +version = "2.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "523dc4f511e55ab87b694dc30d0f820d60906ef06413f93d4d7a1385599cc149" +checksum = "78ca9ab1a0babb1e7d5695e3530886289c18cf2f87ec19a575a0abdce112e3a3" [[package]] name = "memmap2" @@ -4031,26 +3111,11 @@ dependencies = [ [[package]] name = "metal" -version = "0.26.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "623b5e6cefd76e58f774bd3cc0c6f5c7615c58c03a97815245a25c3c9bdee318" -dependencies = [ - "bitflags 2.4.2", - "block", - "core-graphics-types", - "foreign-types 0.5.0", - "log", - "objc", - "paste", -] - -[[package]] -name = "metal" -version = "0.27.0" +version = "0.28.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c43f73953f8cbe511f021b58f18c3ce1c3d1ae13fe953293e13345bf83217f25" +checksum = "5637e166ea14be6063a3f8ba5ccb9a4159df7d8f6d61c02fc3d480b1f90dcfcb" dependencies = [ - "bitflags 2.4.2", + "bitflags 2.6.0", "block", "core-graphics-types", "foreign-types 0.5.0", @@ -4073,9 +3138,9 @@ checksum = "68354c5c6bd36d73ff3feceb05efa59b6acb7626617f4962be322a825e61f79a" [[package]] name = "miniz_oxide" -version = "0.7.2" +version = "0.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9d811f3e15f28568be3407c8e7fdb6514c1cda3cb30683f15b6a1a1dc4ea14a7" +checksum = "b8a240ddb74feaf34a79a7add65a741f3167852fba007066dcac1ca548d89c08" dependencies = [ "adler", "simd-adler32", @@ -4083,9 +3148,9 @@ dependencies = [ [[package]] name = "mio" -version = "0.8.10" +version = "0.8.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f3d0b296e374a4e6f3c7b0a1f5a51d748a0d34c85e7dc48fc3fa9a87657fe09" +checksum = "a4a650543ca06a924e8b371db273b2756685faae30f8487da1b56505a8f78b0c" dependencies = [ "libc", "log", @@ -4095,41 +3160,21 @@ dependencies = [ [[package]] name = "naga" -version = "0.13.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c1ceaaa4eedaece7e4ec08c55c640ba03dbb73fb812a6570a59bcf1930d0f70e" -dependencies = [ - "bit-set", - "bitflags 2.4.2", - "codespan-reporting", - "hexf-parse", - "indexmap 1.9.3", - "log", - "num-traits", - "pp-rs", - "rustc-hash", - "spirv 0.2.0+1.5.4", - "termcolor", - "thiserror", - "unicode-xid", -] - -[[package]] -name = "naga" -version = "0.19.0" +version = "0.20.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8878eb410fc90853da3908aebfe61d73d26d4437ef850b70050461f939509899" +checksum = "e536ae46fcab0876853bd4a632ede5df4b1c2527a58f6c5a4150fe86be858231" dependencies = [ + "arrayvec", "bit-set", - "bitflags 2.4.2", + "bitflags 2.6.0", "codespan-reporting", "hexf-parse", - "indexmap 2.2.3", + "indexmap", "log", "num-traits", "pp-rs", "rustc-hash", - "spirv 0.3.0+sdk-1.3.268.0", + "spirv", "termcolor", "thiserror", "unicode-xid", @@ -4137,38 +3182,18 @@ dependencies = [ [[package]] name = "naga_oil" -version = "0.10.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4ac54c77b3529887f9668d3dd81e955e58f252b31a333f836e3548c06460b958" -dependencies = [ - "bit-set", - "codespan-reporting", - "data-encoding", - "indexmap 1.9.3", - "naga 0.13.0", - "once_cell", - "regex", - "regex-syntax 0.7.5", - "rustc-hash", - "thiserror", - "tracing", - "unicode-ident", -] - -[[package]] -name = "naga_oil" -version = "0.13.0" +version = "0.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c0ea62ae0f2787456afca7209ca180522b41f00cbe159ee369eba1e07d365cd1" +checksum = "275d9720a7338eedac966141089232514c84d76a246a58ef501af88c5edf402f" dependencies = [ "bit-set", "codespan-reporting", "data-encoding", - "indexmap 2.2.3", - "naga 0.19.0", + "indexmap", + "naga", "once_cell", "regex", - "regex-syntax 0.8.2", + "regex-syntax 0.8.4", "rustc-hash", "thiserror", "tracing", @@ -4177,11 +3202,10 @@ dependencies = [ [[package]] name = "native-tls" -version = "0.2.11" +version = "0.2.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "07226173c32f2926027b63cce4bcd8076c3552846cbe7925f3aaffeac0a3b92e" +checksum = "a8614eb2c83d59d1c8cc974dd3f920198647674a0a035e1af1fa58707e317466" dependencies = [ - "lazy_static", "libc", "log", "openssl", @@ -4195,30 +3219,30 @@ dependencies = [ [[package]] name = "ndk" -version = "0.7.0" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "451422b7e4718271c8b5b3aadf5adedba43dc76312454b387e98fae0fc951aa0" +checksum = "2076a31b7010b17a38c01907c45b945e8f11495ee4dd588309718901b1f7a5b7" dependencies = [ - "bitflags 1.3.2", + "bitflags 2.6.0", "jni-sys", - "ndk-sys 0.4.1+23.1.7779620", - "num_enum 0.5.11", - "raw-window-handle 0.5.2", + "log", + "ndk-sys 0.5.0+25.2.9519653", + "num_enum", "thiserror", ] [[package]] name = "ndk" -version = "0.8.0" +version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2076a31b7010b17a38c01907c45b945e8f11495ee4dd588309718901b1f7a5b7" +checksum = "c3f42e7bbe13d351b6bead8286a43aac9534b82bd3cc43e47037f012ebfd62d4" dependencies = [ - "bitflags 2.4.2", + "bitflags 2.6.0", "jni-sys", "log", - "ndk-sys 0.5.0+25.2.9519653", - "num_enum 0.7.2", - "raw-window-handle 0.6.0", + "ndk-sys 0.6.0+11769913", + "num_enum", + "raw-window-handle", "thiserror", ] @@ -4230,41 +3254,37 @@ checksum = "27b02d87554356db9e9a873add8782d4ea6e3e58ea071a9adb9a2e8ddb884a8b" [[package]] name = "ndk-sys" -version = "0.4.1+23.1.7779620" +version = "0.5.0+25.2.9519653" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3cf2aae958bd232cac5069850591667ad422d263686d75b52a065f9badeee5a3" +checksum = "8c196769dd60fd4f363e11d948139556a344e79d451aeb2fa2fd040738ef7691" dependencies = [ "jni-sys", ] [[package]] name = "ndk-sys" -version = "0.5.0+25.2.9519653" +version = "0.6.0+11769913" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8c196769dd60fd4f363e11d948139556a344e79d451aeb2fa2fd040738ef7691" +checksum = "ee6cda3051665f1fb8d9e08fc35c96d5a244fb1be711a03b71118828afc9a873" dependencies = [ "jni-sys", ] [[package]] -name = "nix" -version = "0.24.3" +name = "new_debug_unreachable" +version = "1.0.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fa52e972a9a719cecb6864fb88568781eb706bac2cd1d4f04a648542dbf78069" -dependencies = [ - "bitflags 1.3.2", - "cfg-if", - "libc", -] +checksum = "650eef8c711430f1a879fdd01d4745a7deea475becfb90269c06775983bbf086" [[package]] name = "nix" -version = "0.27.1" +version = "0.29.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2eb04e9c688eff1c89d72b407f168cf79bb9e867a9d3323ed6c01519eb9cc053" +checksum = "71e2746dc3a24dd78b3cfcb7be93368c6de9963d30f43a6a73998a9cf4b17b46" dependencies = [ - "bitflags 2.4.2", + "bitflags 2.6.0", "cfg-if", + "cfg_aliases 0.2.1", "libc", ] @@ -4291,13 +3311,10 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "610a5acd306ec67f907abe5567859a3c693fb9886eb1f012ab8f2a47bef3db51" [[package]] -name = "normpath" -version = "1.2.0" +name = "noop_proc_macro" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5831952a9476f2fed74b77d74182fa5ddc4d21c72ec45a333b250e3ed0272804" -dependencies = [ - "windows-sys 0.52.0", -] +checksum = "0676bb32a98c1a483ce53e500a81ad9c3d5b3f7c920c28c24e9cb0980d0b5bc8" [[package]] name = "notify" @@ -4305,7 +3322,7 @@ version = "6.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6205bd8bb1e454ad2e27422015fb5e4f2bcc7e08fa8f27058670d208324a4d2d" dependencies = [ - "bitflags 2.4.2", + "bitflags 2.6.0", "crossbeam-channel", "filetime", "fsevent-sys", @@ -4351,22 +3368,52 @@ dependencies = [ "winapi", ] +[[package]] +name = "num-bigint" +version = "0.4.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a5e44f723f1133c9deac646763579fdb3ac745e418f2a7af9cd0c431da1f20b9" +dependencies = [ + "num-integer", + "num-traits", +] + [[package]] name = "num-derive" -version = "0.3.3" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "876a53fff98e03a936a674b29568b0e605f06b29372c2489ff4de23f1949743d" +checksum = "ed3955f1a9c7c0c15e092f9c887db08b1fc683305fdf6eb6684f22555355e202" dependencies = [ "proc-macro2", "quote", - "syn 1.0.109", + "syn 2.0.68", +] + +[[package]] +name = "num-integer" +version = "0.1.46" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7969661fd2958a5cb096e56c8e1ad0444ac2bbcd0061bd28660485a44879858f" +dependencies = [ + "num-traits", +] + +[[package]] +name = "num-rational" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f83d14da390562dca69fc84082e73e548e1ad308d24accdedd2720017cb37824" +dependencies = [ + "num-bigint", + "num-integer", + "num-traits", ] [[package]] name = "num-traits" -version = "0.2.18" +version = "0.2.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "da0df0e5185db44f69b44f26786fe401b6c293d1907744beaa7fa62b2e5a517a" +checksum = "071dfc062690e90b734c0b2273ce72ad0ffa95f0c74596bc250dcfd960262841" dependencies = [ "autocfg", ] @@ -4377,149 +3424,252 @@ version = "1.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4161fcb6d602d4d2081af7c3a45852d875a03dd337a6bfdd6e06407b61342a43" dependencies = [ - "hermit-abi", + "hermit-abi 0.3.9", "libc", ] [[package]] name = "num_enum" -version = "0.5.11" +version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1f646caf906c20226733ed5b1374287eb97e3c2a5c227ce668c1f2ce20ae57c9" +checksum = "02339744ee7253741199f897151b38e72257d13802d4ee837285cc2990a90845" dependencies = [ - "num_enum_derive 0.5.11", + "num_enum_derive", ] [[package]] -name = "num_enum" -version = "0.6.1" +name = "num_enum_derive" +version = "0.7.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "681030a937600a36906c185595136d26abfebb4aa9c65701cefcaf8578bb982b" +dependencies = [ + "proc-macro-crate", + "proc-macro2", + "quote", + "syn 2.0.68", +] + +[[package]] +name = "objc" +version = "0.2.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "915b1b472bc21c53464d6c8461c9d3af805ba1ef837e1cac254428f4a77177b1" +dependencies = [ + "malloc_buf", +] + +[[package]] +name = "objc-foundation" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1add1b659e36c9607c7aab864a76c7a4c2760cd0cd2e120f3fb8b952c7e22bf9" +dependencies = [ + "block", + "objc", + "objc_id", +] + +[[package]] +name = "objc-sys" +version = "0.3.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cdb91bdd390c7ce1a8607f35f3ca7151b65afc0ff5ff3b34fa350f7d7c7e4310" + +[[package]] +name = "objc2" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "46a785d4eeff09c14c487497c162e92766fbb3e4059a71840cecc03d9a50b804" +dependencies = [ + "objc-sys", + "objc2-encode", +] + +[[package]] +name = "objc2-app-kit" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e4e89ad9e3d7d297152b17d39ed92cd50ca8063a89a9fa569046d41568891eff" +dependencies = [ + "bitflags 2.6.0", + "block2", + "libc", + "objc2", + "objc2-core-data", + "objc2-core-image", + "objc2-foundation", + "objc2-quartz-core", +] + +[[package]] +name = "objc2-cloud-kit" +version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a015b430d3c108a207fd776d2e2196aaf8b1cf8cf93253e3a097ff3085076a1" +checksum = "74dd3b56391c7a0596a295029734d3c1c5e7e510a4cb30245f8221ccea96b009" dependencies = [ - "num_enum_derive 0.6.1", + "bitflags 2.6.0", + "block2", + "objc2", + "objc2-core-location", + "objc2-foundation", ] [[package]] -name = "num_enum" -version = "0.7.2" +name = "objc2-contacts" +version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "02339744ee7253741199f897151b38e72257d13802d4ee837285cc2990a90845" +checksum = "a5ff520e9c33812fd374d8deecef01d4a840e7b41862d849513de77e44aa4889" dependencies = [ - "num_enum_derive 0.7.2", + "block2", + "objc2", + "objc2-foundation", ] [[package]] -name = "num_enum_derive" -version = "0.5.11" +name = "objc2-core-data" +version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dcbff9bc912032c62bf65ef1d5aea88983b420f4f839db1e9b0c281a25c9c799" +checksum = "617fbf49e071c178c0b24c080767db52958f716d9eabdf0890523aeae54773ef" dependencies = [ - "proc-macro-crate 1.3.1", - "proc-macro2", - "quote", - "syn 1.0.109", + "bitflags 2.6.0", + "block2", + "objc2", + "objc2-foundation", ] [[package]] -name = "num_enum_derive" -version = "0.6.1" +name = "objc2-core-image" +version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "96667db765a921f7b295ffee8b60472b686a51d4f21c2ee4ffdb94c7013b65a6" +checksum = "55260963a527c99f1819c4f8e3b47fe04f9650694ef348ffd2227e8196d34c80" dependencies = [ - "proc-macro-crate 1.3.1", - "proc-macro2", - "quote", - "syn 2.0.50", + "block2", + "objc2", + "objc2-foundation", + "objc2-metal", ] [[package]] -name = "num_enum_derive" -version = "0.7.2" +name = "objc2-core-location" +version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "681030a937600a36906c185595136d26abfebb4aa9c65701cefcaf8578bb982b" +checksum = "000cfee34e683244f284252ee206a27953279d370e309649dc3ee317b37e5781" dependencies = [ - "proc-macro-crate 3.1.0", - "proc-macro2", - "quote", - "syn 2.0.50", + "block2", + "objc2", + "objc2-contacts", + "objc2-foundation", ] [[package]] -name = "objc" -version = "0.2.7" +name = "objc2-encode" +version = "4.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "915b1b472bc21c53464d6c8461c9d3af805ba1ef837e1cac254428f4a77177b1" -dependencies = [ - "malloc_buf", - "objc_exception", -] +checksum = "7891e71393cd1f227313c9379a26a584ff3d7e6e7159e988851f0934c993f0f8" [[package]] -name = "objc-foundation" -version = "0.1.1" +name = "objc2-foundation" +version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1add1b659e36c9607c7aab864a76c7a4c2760cd0cd2e120f3fb8b952c7e22bf9" +checksum = "0ee638a5da3799329310ad4cfa62fbf045d5f56e3ef5ba4149e7452dcf89d5a8" dependencies = [ - "block", - "objc", - "objc_id", + "bitflags 2.6.0", + "block2", + "dispatch", + "libc", + "objc2", ] [[package]] -name = "objc-sys" -version = "0.2.0-beta.2" +name = "objc2-link-presentation" +version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "df3b9834c1e95694a05a828b59f55fa2afec6288359cda67146126b3f90a55d7" +checksum = "a1a1ae721c5e35be65f01a03b6d2ac13a54cb4fa70d8a5da293d7b0020261398" +dependencies = [ + "block2", + "objc2", + "objc2-app-kit", + "objc2-foundation", +] [[package]] -name = "objc-sys" -version = "0.3.2" +name = "objc2-metal" +version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c7c71324e4180d0899963fc83d9d241ac39e699609fc1025a850aadac8257459" +checksum = "dd0cba1276f6023976a406a14ffa85e1fdd19df6b0f737b063b95f6c8c7aadd6" +dependencies = [ + "bitflags 2.6.0", + "block2", + "objc2", + "objc2-foundation", +] [[package]] -name = "objc2" -version = "0.3.0-beta.3.patch-leaks.3" +name = "objc2-quartz-core" +version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7e01640f9f2cb1220bbe80325e179e532cb3379ebcd1bf2279d703c19fe3a468" +checksum = "e42bee7bff906b14b167da2bac5efe6b6a07e6f7c0a21a7308d40c960242dc7a" dependencies = [ - "block2 0.2.0-alpha.6", - "objc-sys 0.2.0-beta.2", - "objc2-encode 2.0.0-pre.2", + "bitflags 2.6.0", + "block2", + "objc2", + "objc2-foundation", + "objc2-metal", ] [[package]] -name = "objc2" -version = "0.4.1" +name = "objc2-symbols" +version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "559c5a40fdd30eb5e344fbceacf7595a81e242529fb4e21cf5f43fb4f11ff98d" +checksum = "0a684efe3dec1b305badae1a28f6555f6ddd3bb2c2267896782858d5a78404dc" dependencies = [ - "objc-sys 0.3.2", - "objc2-encode 3.0.0", + "objc2", + "objc2-foundation", ] [[package]] -name = "objc2-encode" -version = "2.0.0-pre.2" +name = "objc2-ui-kit" +version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "abfcac41015b00a120608fdaa6938c44cb983fee294351cc4bac7638b4e50512" +checksum = "b8bb46798b20cd6b91cbd113524c490f1686f4c4e8f49502431415f3512e2b6f" dependencies = [ - "objc-sys 0.2.0-beta.2", + "bitflags 2.6.0", + "block2", + "objc2", + "objc2-cloud-kit", + "objc2-core-data", + "objc2-core-image", + "objc2-core-location", + "objc2-foundation", + "objc2-link-presentation", + "objc2-quartz-core", + "objc2-symbols", + "objc2-uniform-type-identifiers", + "objc2-user-notifications", ] [[package]] -name = "objc2-encode" -version = "3.0.0" +name = "objc2-uniform-type-identifiers" +version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d079845b37af429bfe5dfa76e6d087d788031045b25cfc6fd898486fd9847666" +checksum = "44fa5f9748dbfe1ca6c0b79ad20725a11eca7c2218bceb4b005cb1be26273bfe" +dependencies = [ + "block2", + "objc2", + "objc2-foundation", +] [[package]] -name = "objc_exception" -version = "0.1.2" +name = "objc2-user-notifications" +version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ad970fb455818ad6cba4c122ad012fae53ae8b4795f86378bce65e4f6bab2ca4" +checksum = "76cfcbf642358e8689af64cee815d139339f3ed8ad05103ed5eaf73db8d84cb3" dependencies = [ - "cc", + "bitflags 2.6.0", + "block2", + "objc2", + "objc2-core-location", + "objc2-foundation", ] [[package]] @@ -4533,21 +3683,21 @@ dependencies = [ [[package]] name = "object" -version = "0.32.2" +version = "0.36.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a6a622008b6e321afc04970976f62ee297fdbaa6f95318ca343e3eebb9648441" +checksum = "081b846d1d56ddfc18fdf1a922e4f6e07a11768ea1b92dec44e42b72712ccfce" dependencies = [ "memchr", ] [[package]] name = "oboe" -version = "0.5.0" +version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8868cc237ee02e2d9618539a23a8d228b9bb3fc2e7a5b11eed3831de77c395d0" +checksum = "e8b61bebd49e5d43f5f8cc7ee2891c16e0f41ec7954d36bcb6c14c5e0de867fb" dependencies = [ - "jni 0.20.0", - "ndk 0.7.0", + "jni", + "ndk 0.8.0", "ndk-context", "num-derive", "num-traits", @@ -4556,9 +3706,9 @@ dependencies = [ [[package]] name = "oboe-sys" -version = "0.5.0" +version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7f44155e7fb718d3cfddcf70690b2b51ac4412f347cd9e4fbe511abe9cd7b5f2" +checksum = "6c8bb09a4a2b1d668170cfe0a7d5bc103f8999fb316c98099b6a9939c9f2e79d" dependencies = [ "cc", ] @@ -4578,24 +3728,13 @@ version = "1.19.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3fdb12b2476b595f9358c5161aa467c2438859caa136dec86c26fdd2efe17b92" -[[package]] -name = "opener" -version = "0.6.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6c62dcb6174f9cb326eac248f07e955d5d559c272730b6c03e396b443b562788" -dependencies = [ - "bstr", - "normpath", - "winapi", -] - [[package]] name = "openssl" version = "0.10.64" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "95a0481286a310808298130d22dd1fef0fa571e05a8f44ec801801e84b216b1f" dependencies = [ - "bitflags 2.4.2", + "bitflags 2.6.0", "cfg-if", "foreign-types 0.3.2", "libc", @@ -4612,7 +3751,7 @@ checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.50", + "syn 2.0.68", ] [[package]] @@ -4623,9 +3762,9 @@ checksum = "ff011a302c396a5197692431fc1948019154afc178baf7d8e37367442a4601cf" [[package]] name = "openssl-sys" -version = "0.9.101" +version = "0.9.102" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dda2b0f344e78efc2facf7d195d098df0dd72151b26ab98da807afc26c198dff" +checksum = "c597637d56fbc83893a35eb0dd04b2b8e7a50c91e64e9493e398b5df4fb45fa2" dependencies = [ "cc", "libc", @@ -4656,9 +3795,9 @@ checksum = "b15813163c1d831bf4a13c3610c05c0d03b39feb07f7e09fa234dac9b15aaf39" [[package]] name = "owned_ttf_parser" -version = "0.20.0" +version = "0.24.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d4586edfe4c648c71797a74c84bacb32b52b212eff5dfe2bb9f2c599844023e7" +checksum = "490d3a563d3122bf7c911a59b0add9389e5ec0f5f0c3ac6b91ff235a0e6a7f90" dependencies = [ "ttf-parser", ] @@ -4671,9 +3810,9 @@ checksum = "bb813b8af86854136c6922af0598d719255ecb2179515e6e7730d468f05c9cae" [[package]] name = "parking_lot" -version = "0.12.1" +version = "0.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3742b2c103b9f06bc9fff0a37ff4912935851bee6d36f3c02bcc755bcfec228f" +checksum = "f1bf18183cf54e8d6059647fc3063646a1801cf30896933ec2311622cc4b9a27" dependencies = [ "lock_api", "parking_lot_core", @@ -4681,22 +3820,22 @@ dependencies = [ [[package]] name = "parking_lot_core" -version = "0.9.9" +version = "0.9.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4c42a9226546d68acdd9c0a280d17ce19bfe27a46bf68784e4066115788d008e" +checksum = "1e401f977ab385c9e4e3ab30627d6f26d00e2c73eef317493c4ec6d468726cf8" dependencies = [ "cfg-if", "libc", - "redox_syscall 0.4.1", + "redox_syscall 0.5.2", "smallvec", - "windows-targets 0.48.5", + "windows-targets 0.52.6", ] [[package]] name = "paste" -version = "1.0.14" +version = "1.0.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "de3145af08024dea9fa9914f381a17b8fc6034dfb00f3a84013f7ff43f29ed4c" +checksum = "57c0d7b74b563b49d38dae00a0c37d4d6de9b432382b2892f0574ddcae73fd0a" [[package]] name = "percent-encoding" @@ -4706,19 +3845,41 @@ checksum = "e3148f5046208a5d56bcfc03053e3ca6334e51da8dfb19b6cdc8b306fae3283e" [[package]] name = "petgraph" -version = "0.6.4" +version = "0.6.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e1d3afd2628e69da2be385eb6f2fd57c8ac7977ceeff6dc166ff1657b0e386a9" +checksum = "b4c5cc86750666a3ed20bdaf5ca2a0344f9c67674cae0515bec2da16fbaa47db" dependencies = [ - "fixedbitset", - "indexmap 2.2.3", + "fixedbitset 0.4.2", + "indexmap", + "serde", + "serde_derive", +] + +[[package]] +name = "pin-project" +version = "1.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b6bf43b791c5b9e34c3d182969b4abb522f9343702850a2e57f460d00d09b4b3" +dependencies = [ + "pin-project-internal", +] + +[[package]] +name = "pin-project-internal" +version = "1.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2f38a4412a78282e09a2cf38d195ea5420d15ba0602cb375210efbc877243965" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.68", ] [[package]] name = "pin-project-lite" -version = "0.2.13" +version = "0.2.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8afb450f006bf6385ca15ef45d71d2288452bc3683ce2e2cacc0d18e4be60b58" +checksum = "bda66fc9667c18cb2758a2ac84d1167245054bcf85d5d1aaa6923f45801bdd02" [[package]] name = "pin-utils" @@ -4728,12 +3889,12 @@ checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" [[package]] name = "piper" -version = "0.2.1" +version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "668d31b1c4eba19242f2088b2bf3316b82ca31082a8335764db4e083db7485d4" +checksum = "ae1d5c74c9876f070d3e8fd503d748c7d974c3e48da8f41350fa5222ef9b4391" dependencies = [ "atomic-waker", - "fastrand 2.0.1", + "fastrand", "futures-io", ] @@ -4758,12 +3919,13 @@ dependencies = [ [[package]] name = "polling" -version = "3.5.0" +version = "3.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "24f040dee2588b4963afb4e420540439d126f73fdacf4a9c486a96d840bac3c9" +checksum = "a3ed00ed3fbf728b5816498ecd316d1716eecaced9c0c8d2c5a6740ca214985b" dependencies = [ "cfg-if", "concurrent-queue", + "hermit-abi 0.4.0", "pin-project-lite", "rustix", "tracing", @@ -4780,26 +3942,16 @@ dependencies = [ ] [[package]] -name = "presser" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e8cf8e6a8aa66ce33f63993ffc4ea4271eb5b0530a9002db8455ea6050c77bfa" - -[[package]] -name = "pretty-type-name" -version = "1.0.1" +name = "ppv-lite86" +version = "0.2.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f0f73cdaf19b52e6143685c3606206e114a4dfa969d6b14ec3894c88eb38bd4b" +checksum = "5b40af805b3121feab8a3c29f04d8ad262fa8e0561883e7653e024ae4479e6de" [[package]] -name = "proc-macro-crate" -version = "1.3.1" +name = "presser" +version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7f4c021e1093a56626774e81216a4ce732a735e5bad4868a03f3ed65ca0c3919" -dependencies = [ - "once_cell", - "toml_edit 0.19.15", -] +checksum = "e8cf8e6a8aa66ce33f63993ffc4ea4271eb5b0530a9002db8455ea6050c77bfa" [[package]] name = "proc-macro-crate" @@ -4811,44 +3963,33 @@ dependencies = [ ] [[package]] -name = "proc-macro-error" -version = "1.0.4" +name = "proc-macro2" +version = "1.0.86" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "da25490ff9892aab3fcf7c36f08cfb902dd3e71ca0f9f9517bea02a73a5ce38c" +checksum = "5e719e8df665df0d1c8fbfd238015744736151d4445ec0836b8e628aae103b77" dependencies = [ - "proc-macro-error-attr", - "proc-macro2", - "quote", - "syn 1.0.109", - "version_check", + "unicode-ident", ] [[package]] -name = "proc-macro-error-attr" -version = "1.0.4" +name = "profiling" +version = "1.0.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a1be40180e52ecc98ad80b184934baf3d0d29f979574e439af5a55274b35f869" +checksum = "43d84d1d7a6ac92673717f9f6d1518374ef257669c24ebc5ac25d5033828be58" dependencies = [ - "proc-macro2", - "quote", - "version_check", + "profiling-procmacros", ] [[package]] -name = "proc-macro2" -version = "1.0.78" +name = "profiling-procmacros" +version = "1.0.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e2422ad645d89c99f8f3e6b88a9fdeca7fabeac836b1002371c4367c8f984aae" +checksum = "8021cf59c8ec9c432cfc2526ac6b8aa508ecaf29cd415f271b8406c1b851c3fd" dependencies = [ - "unicode-ident", + "quote", + "syn 2.0.68", ] -[[package]] -name = "profiling" -version = "1.0.15" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "43d84d1d7a6ac92673717f9f6d1518374ef257669c24ebc5ac25d5033828be58" - [[package]] name = "qoi" version = "0.4.1" @@ -4858,20 +3999,26 @@ dependencies = [ "bytemuck", ] +[[package]] +name = "quick-error" +version = "2.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a993555f31e5a609f617c12db6250dedcac1b0a85076912c436e6fc9b2c8e6a3" + [[package]] name = "quick-xml" -version = "0.31.0" +version = "0.34.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1004a344b30a54e2ee58d66a71b32d2db2feb0a31f9a2d302bf0536f15de2a33" +checksum = "6f24d770aeca0eacb81ac29dfbc55ebcc09312fdd1f8bbecdc7e4a84e000e3b4" dependencies = [ "memchr", ] [[package]] name = "quote" -version = "1.0.35" +version = "1.0.36" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "291ec9ab5efd934aaf503a6466c5d5251535d108ee747472c3977cc5acc868ef" +checksum = "0fa76aaf39101c457836aec0ce2316dbdc3ab723cdda1c6bd4e6ad4208acaca7" dependencies = [ "proc-macro2", ] @@ -4882,6 +4029,36 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "17fd96390ed3feda12e1dfe2645ed587e0bea749e319333f104a33ff62f77a0b" +[[package]] +name = "rand" +version = "0.8.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404" +dependencies = [ + "libc", + "rand_chacha", + "rand_core", +] + +[[package]] +name = "rand_chacha" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88" +dependencies = [ + "ppv-lite86", + "rand_core", +] + +[[package]] +name = "rand_core" +version = "0.6.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c" +dependencies = [ + "getrandom", +] + [[package]] name = "range-alloc" version = "0.1.3" @@ -4889,22 +4066,66 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9c8a99fddc9f0ba0a85884b8d14e3592853e787d581ca1816c91349b10e4eeab" [[package]] -name = "raw-window-handle" -version = "0.5.2" +name = "rav1e" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cd87ce80a7665b1cce111f8a16c1f3929f6547ce91ade6addf4ec86a8dda5ce9" +dependencies = [ + "arbitrary", + "arg_enum_proc_macro", + "arrayvec", + "av1-grain", + "bitstream-io", + "built", + "cfg-if", + "interpolate_name", + "itertools", + "libc", + "libfuzzer-sys", + "log", + "maybe-rayon", + "new_debug_unreachable", + "noop_proc_macro", + "num-derive", + "num-traits", + "once_cell", + "paste", + "profiling", + "rand", + "rand_chacha", + "simd_helpers", + "system-deps", + "thiserror", + "v_frame", + "wasm-bindgen", +] + +[[package]] +name = "ravif" +version = "0.11.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f2ff9a1f06a88b01621b7ae906ef0211290d1c8a168a15542486a8f61c0833b9" +checksum = "67376f469e7e7840d0040bbf4b9b3334005bb167f814621326e4c7ab8cd6e944" +dependencies = [ + "avif-serialize", + "imgref", + "loop9", + "quick-error", + "rav1e", + "rayon", + "rgb", +] [[package]] name = "raw-window-handle" -version = "0.6.0" +version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "42a9830a0e1b9fb145ebb365b8bc4ccd75f290f98c0247deafbbe2c75cefb544" +checksum = "20675572f6f24e9e76ef639bc5552774ed45f1c30e2951e1e99c59888861c539" [[package]] name = "rayon" -version = "1.8.1" +version = "1.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fa7237101a77a10773db45d62004a272517633fbcc3df19d96455ede1122e051" +checksum = "b418a60154510ca1a002a752ca9714984e21e4241e804d32555251faf8b78ffa" dependencies = [ "either", "rayon-core", @@ -4928,43 +4149,43 @@ checksum = "a0d463f2884048e7153449a55166f91028d5b0ea53c79377099ce4e8cf0cf9bb" [[package]] name = "redox_syscall" -version = "0.3.5" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "567664f262709473930a4bf9e51bf2ebf3348f2e748ccc50dea20646858f8f29" +checksum = "4722d768eff46b75989dd134e5c353f0d6296e5aaa3132e776cbdb56be7731aa" dependencies = [ "bitflags 1.3.2", ] [[package]] name = "redox_syscall" -version = "0.4.1" +version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4722d768eff46b75989dd134e5c353f0d6296e5aaa3132e776cbdb56be7731aa" +checksum = "c82cf8cff14456045f55ec4241383baeff27af886adb72ffb2162f99911de0fd" dependencies = [ - "bitflags 1.3.2", + "bitflags 2.6.0", ] [[package]] name = "redox_users" -version = "0.4.4" +version = "0.4.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a18479200779601e498ada4e8c1e1f50e3ee19deb0259c25825a98b5603b2cb4" +checksum = "bd283d9651eeda4b2a83a43c1c91b266c40fd76ecd39a50a8c630ae69dc72891" dependencies = [ "getrandom", - "libredox 0.0.1", + "libredox 0.1.3", "thiserror", ] [[package]] name = "regex" -version = "1.10.3" +version = "1.10.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b62dbe01f0b06f9d8dc7d49e05a0785f153b00b2c227856282f671e0318c9b15" +checksum = "b91213439dad192326a0d7c6ee3955910425f441d7038e0d6933b0aec5c4517f" dependencies = [ "aho-corasick", "memchr", - "regex-automata 0.4.5", - "regex-syntax 0.8.2", + "regex-automata 0.4.7", + "regex-syntax 0.8.4", ] [[package]] @@ -4978,13 +4199,13 @@ dependencies = [ [[package]] name = "regex-automata" -version = "0.4.5" +version = "0.4.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5bb987efffd3c6d0d8f5f89510bb458559eab11e4f869acb20bf845e016259cd" +checksum = "38caf58cc5ef2fed281f89292ef23f6365465ed9a41b7a7754eb4e26496c92df" dependencies = [ "aho-corasick", "memchr", - "regex-syntax 0.8.2", + "regex-syntax 0.8.4", ] [[package]] @@ -4995,29 +4216,23 @@ checksum = "f162c6dd7b008981e4d40210aca20b4bd0f9b60ca9271061b07f78537722f2e1" [[package]] name = "regex-syntax" -version = "0.7.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dbb5fb1acd8a1a18b3dd5be62d25485eb770e05afb408a9627d14d451bae12da" - -[[package]] -name = "regex-syntax" -version = "0.8.2" +version = "0.8.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c08c74e62047bb2de4ff487b251e4a92e24f48745648451635cec7d591162d9f" +checksum = "7a66a03ae7c801facd77a29370b4faec201768915ac14a721ba36f20bc9c209b" [[package]] name = "renderdoc-sys" -version = "1.0.0" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "216080ab382b992234dda86873c18d4c48358f5cfcb70fd693d7f6f2131b628b" +checksum = "19b30a45b0cd0bcca8037f3d0dc3421eaf95327a17cad11964fb8179b4fc4832" [[package]] name = "reqwest" -version = "0.11.24" +version = "0.11.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c6920094eb85afde5e4a138be3f2de8bbdf28000f0029e72c45025a56b042251" +checksum = "dd67538700a17451e7cba03ac727fb961abb7607553461627b97de0b89cf4a62" dependencies = [ - "base64", + "base64 0.21.7", "bytes", "encoding_rs", "futures-core", @@ -5051,14 +4266,24 @@ dependencies = [ "winreg", ] +[[package]] +name = "rgb" +version = "0.8.40" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a7439be6844e40133eda024efd85bf07f59d0dd2f59b10c00dd6cfb92cc5c741" +dependencies = [ + "bytemuck", +] + [[package]] name = "rodio" -version = "0.17.3" +version = "0.18.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3b1bb7b48ee48471f55da122c0044fcc7600cfcc85db88240b89cb832935e611" +checksum = "d1fceb9d127d515af1586d8d0cc601e1245bdb0af38e75c865a156290184f5b3" dependencies = [ "cpal", "lewton", + "thiserror", ] [[package]] @@ -5067,17 +4292,17 @@ version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b91f7eff05f748767f183df4320a63d6936e9c6107d97c9e6bdd9784f4289c94" dependencies = [ - "base64", - "bitflags 2.4.2", + "base64 0.21.7", + "bitflags 2.6.0", "serde", "serde_derive", ] [[package]] name = "rustc-demangle" -version = "0.1.23" +version = "0.1.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d626bb9dae77e28219937af045c257c28bfd3f69333c512553507f5f9798cb76" +checksum = "719b953e2095829ee67db738b3bfa9fa368c94900df327b3f07fe6e794d2fe1f" [[package]] name = "rustc-hash" @@ -5087,11 +4312,11 @@ checksum = "08d43f7aa6b08d49f382cde6a7982047c3426db949b1424bc4b7ec9ae12c6ce2" [[package]] name = "rustix" -version = "0.38.31" +version = "0.38.34" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6ea3e1a662af26cd7a3ba09c0297a31af215563ecf42817c98df621387f4e949" +checksum = "70dc5ec042f7a43c4a73241207cecc9873a06d45debb38b329f8541d85c2730f" dependencies = [ - "bitflags 2.4.2", + "bitflags 2.6.0", "errno", "libc", "linux-raw-sys", @@ -5104,25 +4329,24 @@ version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1c74cae0a4cf6ccbbf5f359f08efdf8ee7e1dc532573bf0db71968cb56b1448c" dependencies = [ - "base64", + "base64 0.21.7", ] [[package]] name = "ruzstd" -version = "0.5.0" +version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "58c4eb8a81997cf040a091d1f7e1938aeab6749d3a0dfa73af43cdc32393483d" +checksum = "5022b253619b1ba797f243056276bed8ed1a73b0f5a7ce7225d524067644bf8f" dependencies = [ "byteorder", - "derive_more", "twox-hash", ] [[package]] name = "ryu" -version = "1.0.17" +version = "1.0.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e86697c916019a8588c99b5fac3cead74ec0b4b819707a682fd4d23fa0ce1ba1" +checksum = "f3cb5ba0dc43242ce17de99c180e96db90b235b8a9fdc9543c96d2209116bd9f" [[package]] name = "same-file" @@ -5156,9 +4380,9 @@ checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49" [[package]] name = "sctk-adwaita" -version = "0.8.1" +version = "0.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "82b2eaf3a5b264a521b988b2e73042e742df700c4f962cde845d1541adb46550" +checksum = "7555fcb4f753d095d734fdefebb0ad8c98478a21db500492d87c55913d3b0086" dependencies = [ "ab_glyph", "log", @@ -5169,11 +4393,11 @@ dependencies = [ [[package]] name = "security-framework" -version = "2.9.2" +version = "2.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "05b64fb303737d99b81884b2c63433e9ae28abebe5eb5045dcdd175dc2ecf4de" +checksum = "c627723fd09706bacdb5cf41499e95098555af3c3c29d014dc3c458ef6be11c0" dependencies = [ - "bitflags 1.3.2", + "bitflags 2.6.0", "core-foundation", "core-foundation-sys", "libc", @@ -5182,39 +4406,45 @@ dependencies = [ [[package]] name = "security-framework-sys" -version = "2.9.1" +version = "2.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e932934257d3b408ed8f30db49d85ea163bfe74961f017f405b025af298f0c7a" +checksum = "317936bbbd05227752583946b9e66d7ce3b489f84e11a94a510b4437fef407d7" dependencies = [ "core-foundation-sys", "libc", ] +[[package]] +name = "send_wrapper" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cd0b0ec5f1c1ca621c432a25813d8d60c88abe6d3e08a3eb9cf37d97a0fe3d73" + [[package]] name = "serde" -version = "1.0.197" +version = "1.0.203" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3fb1c873e1b9b056a4dc4c0c198b24c3ffa059243875552b2bd0933b1aee4ce2" +checksum = "7253ab4de971e72fb7be983802300c30b5a7f0c2e56fab8abfc6a214307c0094" dependencies = [ "serde_derive", ] [[package]] name = "serde_derive" -version = "1.0.197" +version = "1.0.203" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7eb0b34b42edc17f6b7cac84a52a1c5f0e1bb2227e997ca9011ea3dd34e8610b" +checksum = "500cbc0ebeb6f46627f50f3f5811ccf6bf00643be300b4c3eabc0ef55dc5b5ba" dependencies = [ "proc-macro2", "quote", - "syn 2.0.50", + "syn 2.0.68", ] [[package]] name = "serde_json" -version = "1.0.114" +version = "1.0.120" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c5f09b1bd632ef549eaa9f60a1f8de742bdbc698e6cee2095fc84dde5f549ae0" +checksum = "4e0d21c9a8cae1235ad58a00c11cb40d4b1e5c784f1ef2c537876ed6ffd8b7c5" dependencies = [ "itoa", "ryu", @@ -5223,9 +4453,9 @@ dependencies = [ [[package]] name = "serde_spanned" -version = "0.6.5" +version = "0.6.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eb3622f419d1296904700073ea6cc23ad690adbd66f13ea683df73298736f0c1" +checksum = "79e674e01f999af37c49f70a6ede167a8a60b2503e56c5599532a65baa5969a0" dependencies = [ "serde", ] @@ -5244,12 +4474,11 @@ dependencies = [ [[package]] name = "shadplay" -version = "0.0.1" +version = "0.0.2" dependencies = [ "anyhow", - "bevy 0.13.0", - "bevy_editor_pls", - "bevy_egui 0.25.0", + "bevy", + "bevy_egui", "bevy_panorbit_camera", "chrono", "copypasta", @@ -5279,9 +4508,9 @@ checksum = "0fda2ff0d084019ba4d7c6f371c95d8fd75ce3524c3cb8fb653a3023f6323e64" [[package]] name = "signal-hook-registry" -version = "1.4.1" +version = "1.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d8229b473baa5980ac72ef434c4415e70c4b5e71b423043adb4ba059f89c99a1" +checksum = "a9e9e0b4211b72e7b8b6e85c807d36c212bdb33ea8587f7569562a84df5465b1" dependencies = [ "libc", ] @@ -5292,6 +4521,15 @@ version = "0.3.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d66dc143e6b11c1eddc06d5c423cfc97062865baf299914ab64caa38182078fe" +[[package]] +name = "simd_helpers" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "95890f873bec569a0362c235787f3aca6e1e887302ba4840839bcc6459c42da6" +dependencies = [ + "quote", +] + [[package]] name = "slab" version = "0.4.9" @@ -5312,12 +4550,9 @@ dependencies = [ [[package]] name = "smallvec" -version = "1.13.1" +version = "1.13.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e6ecd384b10a64542d77071bd64bd7b231f4ed5940fba55e98c3de13824cf3d7" -dependencies = [ - "serde", -] +checksum = "3c5e1a9a646d36c3599cd173a41282daf47c44583ad367b8e6837255952e5c67" [[package]] name = "smithay-client-toolkit" @@ -5325,7 +4560,7 @@ version = "0.18.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "922fd3eeab3bd820d76537ce8f582b1cf951eceb5475c28500c7457d9d17f53a" dependencies = [ - "bitflags 2.4.2", + "bitflags 2.6.0", "calloop", "calloop-wayland-source", "cursor-icon", @@ -5340,7 +4575,7 @@ dependencies = [ "wayland-cursor", "wayland-protocols", "wayland-protocols-wlr", - "wayland-scanner 0.31.1", + "wayland-scanner", "xkeysym", ] @@ -5357,18 +4592,18 @@ dependencies = [ [[package]] name = "smol_str" -version = "0.2.1" +version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e6845563ada680337a52d43bb0b29f396f2d911616f6573012645b9e3d048a49" +checksum = "dd538fb6910ac1099850255cf94a94df6551fbdd602454387d0adb2d1ca6dead" dependencies = [ "serde", ] [[package]] name = "socket2" -version = "0.5.6" +version = "0.5.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "05ffd9c0a93b7543e062e759284fcf5f5e3b098501104bfbdde4d404db792871" +checksum = "ce305eb0b4296696835b71df73eb912e0f1ffd2556a501fcede6e0c50349191c" dependencies = [ "libc", "windows-sys 0.52.0", @@ -5383,23 +4618,13 @@ dependencies = [ "lock_api", ] -[[package]] -name = "spirv" -version = "0.2.0+1.5.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "246bfa38fe3db3f1dfc8ca5a2cdeb7348c78be2112740cc0ec8ef18b6d94f830" -dependencies = [ - "bitflags 1.3.2", - "num-traits", -] - [[package]] name = "spirv" version = "0.3.0+sdk-1.3.268.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "eda41003dc44290527a59b13432d4a0379379fa074b70174882adfbdfd917844" dependencies = [ - "bitflags 2.4.2", + "bitflags 2.6.0", ] [[package]] @@ -5416,9 +4641,9 @@ checksum = "6637bab7722d379c8b41ba849228d680cc12d0a45ba1fa2b48f2a30577a06731" [[package]] name = "svg_fmt" -version = "0.4.1" +version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8fb1df15f412ee2e9dfc1c504260fa695c1c3f10fe9f4a6ee2d2184d7d6450e2" +checksum = "20e16a0f46cf5fd675563ef54f26e83e20f2366bcf027bcb3cc3ed2b98aaf2ca" [[package]] name = "syn" @@ -5433,9 +4658,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.50" +version = "2.0.68" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "74f1bdc9872430ce9b75da68329d1c1746faf50ffac5f19e02b71e37ff881ffb" +checksum = "901fa70d88b9d6c98022e23b4136f9f3e54e4662c3bc1bd1d84a42a9a0f0c1e9" dependencies = [ "proc-macro2", "quote", @@ -5450,23 +4675,9 @@ checksum = "2047c6ded9c721764247e62cd3b03c09ffc529b2ba5b10ec482ae507a4a70160" [[package]] name = "sysinfo" -version = "0.29.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cd727fc423c2060f6c92d9534cef765c65a6ed3f428a03d7def74a8c4348e666" -dependencies = [ - "cfg-if", - "core-foundation-sys", - "libc", - "ntapi", - "once_cell", - "winapi", -] - -[[package]] -name = "sysinfo" -version = "0.30.5" +version = "0.30.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1fb4f3438c8f6389c864e61221cbc97e9bca98b4daf39a5beb7bea660f528bb2" +checksum = "732ffa00f53e6b2af46208fba5718d9662a421049204e156328b66791ffa15ae" dependencies = [ "cfg-if", "core-foundation-sys", @@ -5497,26 +4708,46 @@ dependencies = [ "libc", ] +[[package]] +name = "system-deps" +version = "6.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a3e535eb8dded36d55ec13eddacd30dec501792ff23a0b1682c38601b8cf2349" +dependencies = [ + "cfg-expr", + "heck", + "pkg-config", + "toml", + "version-compare", +] + [[package]] name = "taffy" -version = "0.3.18" +version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3c2287b6d7f721ada4cddf61ade5e760b2c6207df041cac9bfaa192897362fd3" +checksum = "e8b61630cba2afd2c851821add2e1bb1b7851a2436e839ab73b56558b009035e" dependencies = [ "arrayvec", "grid", "num-traits", + "serde", "slotmap", ] +[[package]] +name = "target-lexicon" +version = "0.12.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e1fc403891a21bcfb7c37834ba66a547a8f402146eba7265b5a6d88059c9ff2f" + [[package]] name = "tempfile" -version = "3.10.0" +version = "3.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a365e8cd18e44762ef95d87f284f4b5cd04107fec2ff3052bd6a3e6069669e67" +checksum = "85b77fafb263dd9d05cbeac119526425676db3784113aa9295c88498cbf8bff1" dependencies = [ "cfg-if", - "fastrand 2.0.1", + "fastrand", "rustix", "windows-sys 0.52.0", ] @@ -5532,22 +4763,22 @@ dependencies = [ [[package]] name = "thiserror" -version = "1.0.57" +version = "1.0.61" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e45bcbe8ed29775f228095caf2cd67af7a4ccf756ebff23a306bf3e8b47b24b" +checksum = "c546c80d6be4bc6a00c0f01730c08df82eaa7a7a61f11d656526506112cc1709" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.57" +version = "1.0.61" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a953cb265bef375dae3de6663da4d3804eee9682ea80d8e2542529b73c531c81" +checksum = "46c3384250002a6d5af4d114f2845d37b57521033f30d5c3f46c4d70e1197533" dependencies = [ "proc-macro2", "quote", - "syn 2.0.50", + "syn 2.0.68", ] [[package]] @@ -5598,9 +4829,9 @@ dependencies = [ [[package]] name = "tinyvec" -version = "1.6.0" +version = "1.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "87cc5ceb3875bb20c2890005a4e226a4651264a5c75edb2421b52861a0a0cb50" +checksum = "ce6b6a2fb3a985e99cebfaefa9faa3024743da73304ca1c683a36429613d3d22" dependencies = [ "tinyvec_macros", ] @@ -5613,9 +4844,9 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "tokio" -version = "1.36.0" +version = "1.38.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "61285f6515fa018fb2d1e46eb21223fff441ee8db5d0f1435e8ab4f5cdb80931" +checksum = "ba4f4a02a7a80d6f274636f0aa95c7e383b912d41fe721a31f29e29698585a4a" dependencies = [ "backtrace", "bytes", @@ -5632,13 +4863,13 @@ dependencies = [ [[package]] name = "tokio-macros" -version = "2.2.0" +version = "2.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5b8a1e28f2deaa14e508979454cb3a223b10b938b45af148bc0986de36f1923b" +checksum = "5f5ae998a069d4b5aba8ee9dad856af7d520c3699e6159b185c2acd48155d39a" dependencies = [ "proc-macro2", "quote", - "syn 2.0.50", + "syn 2.0.68", ] [[package]] @@ -5653,83 +4884,60 @@ dependencies = [ [[package]] name = "tokio-util" -version = "0.7.10" +version = "0.7.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5419f34732d9eb6ee4c3578b7989078579b7f039cbbb9ca2c4da015749371e15" +checksum = "9cf6b47b3771c49ac75ad09a6162f53ad4b8088b76ac60e8ec1455b31a189fe1" dependencies = [ "bytes", "futures-core", "futures-sink", "pin-project-lite", "tokio", - "tracing", ] [[package]] name = "toml" -version = "0.8.10" +version = "0.8.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9a9aad4a3066010876e8dcf5a8a06e70a558751117a145c6ce2b82c2e2054290" +checksum = "6f49eb2ab21d2f26bd6db7bf383edc527a7ebaee412d17af4d40fdccd442f335" dependencies = [ "serde", "serde_spanned", "toml_datetime", - "toml_edit 0.22.6", + "toml_edit 0.22.14", ] [[package]] name = "toml_datetime" -version = "0.6.5" +version = "0.6.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3550f4e9685620ac18a50ed434eb3aec30db8ba93b0287467bca5826ea25baf1" +checksum = "4badfd56924ae69bcc9039335b2e017639ce3f9b001c393c1b2d1ef846ce2cbf" dependencies = [ "serde", ] -[[package]] -name = "toml_edit" -version = "0.19.15" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1b5bb770da30e5cbfde35a2d7b9b8a2c4b8ef89548a7a6aeab5c9a576e3e7421" -dependencies = [ - "indexmap 2.2.3", - "toml_datetime", - "winnow 0.5.40", -] - -[[package]] -name = "toml_edit" -version = "0.20.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "70f427fce4d84c72b5b732388bf4a9f4531b53f74e2887e3ecb2481f68f66d81" -dependencies = [ - "indexmap 2.2.3", - "toml_datetime", - "winnow 0.5.40", -] - [[package]] name = "toml_edit" version = "0.21.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6a8534fd7f78b5405e860340ad6575217ce99f38d4d5c8f2442cb5ecb50090e1" dependencies = [ - "indexmap 2.2.3", + "indexmap", "toml_datetime", "winnow 0.5.40", ] [[package]] name = "toml_edit" -version = "0.22.6" +version = "0.22.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2c1b5fd4128cc8d3e0cb74d4ed9a9cc7c7284becd4df68f5f940e1ad123606f6" +checksum = "f21c7aaf97f1bd9ca9d4f9e73b0a6c74bd5afef56f2bc931943a6e1c37e04e38" dependencies = [ - "indexmap 2.2.3", + "indexmap", "serde", "serde_spanned", "toml_datetime", - "winnow 0.6.2", + "winnow 0.6.13", ] [[package]] @@ -5757,7 +4965,7 @@ checksum = "34704c8d6ebcbc939824180af020566b01a7c01f80641264eba0999f6c2b6be7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.50", + "syn 2.0.68", ] [[package]] @@ -5770,17 +4978,6 @@ dependencies = [ "valuable", ] -[[package]] -name = "tracing-log" -version = "0.1.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f751112709b4e791d8ce53e32c4ed2d353565a795ce84da2285393f41557bdf2" -dependencies = [ - "log", - "once_cell", - "tracing-core", -] - [[package]] name = "tracing-log" version = "0.2.0" @@ -5807,7 +5004,7 @@ dependencies = [ "thread_local", "tracing", "tracing-core", - "tracing-log 0.2.0", + "tracing-log", ] [[package]] @@ -5829,9 +5026,9 @@ checksum = "e421abadd41a4225275504ea4d6566923418b7f05506fbc9c0fe86ba7396114b" [[package]] name = "ttf-parser" -version = "0.20.0" +version = "0.24.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "17f77d76d837a7830fe1d4f12b7b4ba4192c1888001c7164257e4bc6d21d96b4" +checksum = "8686b91785aff82828ed725225925b33b4fde44c4bb15876e5f7c832724c420a" [[package]] name = "twox-hash" @@ -5843,6 +5040,12 @@ dependencies = [ "static_assertions", ] +[[package]] +name = "typeid" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "059d83cc991e7a42fc37bd50941885db0888e34209f8cfd9aab07ddec03bc9cf" + [[package]] name = "unicode-bidi" version = "0.3.15" @@ -5872,9 +5075,9 @@ checksum = "d4c87d22b6e3f4a18d4d40ef354e97c90fcb14dd91d7dc0aa9d8a1172ebf7202" [[package]] name = "unicode-width" -version = "0.1.11" +version = "0.1.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e51733f11c9c4f72aa0c160008246859e340b00807569a0da0e7a1079b27ba85" +checksum = "0336d538f7abc86d282a4189614dfaa90810dfc2c6f6427eaf88e16311dd225d" [[package]] name = "unicode-xid" @@ -5884,9 +5087,9 @@ checksum = "f962df74c8c05a667b5ee8bcf162993134c104e96440b663c8daa176dc772d8c" [[package]] name = "url" -version = "2.5.0" +version = "2.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "31e6302e3bb753d46e83516cae55ae196fc0c309407cf11ab35cc51a4c2a4633" +checksum = "22784dbdf76fdde8af1aeda5622b546b422b6fc585325248a2bf9f5e41e94d6c" dependencies = [ "form_urlencoded", "idna", @@ -5895,14 +5098,25 @@ dependencies = [ [[package]] name = "uuid" -version = "1.7.0" +version = "1.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f00cc9702ca12d3c81455259621e676d0f7251cec66a21e98fe2e9a37db93b2a" +checksum = "5de17fd2f7da591098415cff336e12965a28061ddace43b59cb3c430179c9439" dependencies = [ "getrandom", "serde", ] +[[package]] +name = "v_frame" +version = "0.3.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d6f32aaa24bacd11e488aa9ba66369c7cd514885742c9fe08cfe85884db3e92b" +dependencies = [ + "aligned-vec", + "num-traits", + "wasm-bindgen", +] + [[package]] name = "valuable" version = "0.1.0" @@ -5922,22 +5136,22 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f1bddf1187be692e79c5ffeab891132dfb0f236ed36a43c7ed39f1165ee20191" [[package]] -name = "version_check" -version = "0.9.4" +name = "version-compare" +version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "49874b5167b65d7193b8aba1567f5c7d93d001cafc34600cee003eda787e483f" +checksum = "852e951cb7832cb45cb1169900d19760cfa39b82bc0ea9c0e5a14ae88411c98b" [[package]] -name = "waker-fn" -version = "1.1.1" +name = "version_check" +version = "0.9.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f3c4517f54858c779bbcbf228f4fca63d121bf85fbecb2dc578cdf4a39395690" +checksum = "49874b5167b65d7193b8aba1567f5c7d93d001cafc34600cee003eda787e483f" [[package]] name = "walkdir" -version = "2.4.0" +version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d71d857dc86794ca4c280d616f7da00d2dbfd8cd788846559a6813e6aa4b54ee" +checksum = "29790946404f91d9c5d06f9874efddea1dc06c5efe94541a7d6863108e3a5e4b" dependencies = [ "same-file", "winapi-util", @@ -5960,9 +5174,9 @@ checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" [[package]] name = "wasm-bindgen" -version = "0.2.91" +version = "0.2.92" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c1e124130aee3fb58c5bdd6b639a0509486b0338acaaae0c84a5124b0f588b7f" +checksum = "4be2531df63900aeb2bca0daaaddec08491ee64ceecbee5076636a3b026795a8" dependencies = [ "cfg-if", "wasm-bindgen-macro", @@ -5970,24 +5184,24 @@ dependencies = [ [[package]] name = "wasm-bindgen-backend" -version = "0.2.91" +version = "0.2.92" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c9e7e1900c352b609c8488ad12639a311045f40a35491fb69ba8c12f758af70b" +checksum = "614d787b966d3989fa7bb98a654e369c762374fd3213d212cfc0251257e747da" dependencies = [ "bumpalo", "log", "once_cell", "proc-macro2", "quote", - "syn 2.0.50", + "syn 2.0.68", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-futures" -version = "0.4.41" +version = "0.4.42" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "877b9c3f61ceea0e56331985743b13f3d25c406a7098d45180fb5f09bc19ed97" +checksum = "76bc14366121efc8dbb487ab05bcc9d346b3b5ec0eaa76e46594cabbe51762c0" dependencies = [ "cfg-if", "js-sys", @@ -5997,9 +5211,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro" -version = "0.2.91" +version = "0.2.92" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b30af9e2d358182b5c7449424f017eba305ed32a7010509ede96cdc4696c46ed" +checksum = "a1f8823de937b71b9460c0c34e25f3da88250760bec0ebac694b49997550d726" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -6007,28 +5221,28 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.91" +version = "0.2.92" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "642f325be6301eb8107a83d12a8ac6c1e1c54345a7ef1a9261962dfefda09e66" +checksum = "e94f17b526d0a461a191c78ea52bbce64071ed5c04c9ffe424dcb38f74171bb7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.50", + "syn 2.0.68", "wasm-bindgen-backend", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-shared" -version = "0.2.91" +version = "0.2.92" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4f186bd2dcf04330886ce82d6f33dd75a7bfcf69ecf5763b89fcde53b6ac9838" +checksum = "af190c94f2773fdb3729c55b007a722abb5384da03bc0986df4c289bf5567e96" [[package]] name = "wayland-backend" -version = "0.3.3" +version = "0.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9d50fa61ce90d76474c87f5fc002828d81b32677340112b4ef08079a9d459a40" +checksum = "269c04f203640d0da2092d1b8d89a2d081714ae3ac2f1b53e99f205740517198" dependencies = [ "cc", "downcast-rs", @@ -6040,14 +5254,14 @@ dependencies = [ [[package]] name = "wayland-client" -version = "0.31.2" +version = "0.31.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "82fb96ee935c2cea6668ccb470fb7771f6215d1691746c2d896b447a00ad3f1f" +checksum = "08bd0f46c069d3382a36c8666c1b9ccef32b8b04f41667ca1fef06a1adcc2982" dependencies = [ - "bitflags 2.4.2", + "bitflags 2.6.0", "rustix", "wayland-backend", - "wayland-scanner 0.31.1", + "wayland-scanner", ] [[package]] @@ -6056,16 +5270,16 @@ version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "625c5029dbd43d25e6aa9615e88b829a5cad13b2819c4ae129fdbb7c31ab4c7e" dependencies = [ - "bitflags 2.4.2", + "bitflags 2.6.0", "cursor-icon", "wayland-backend", ] [[package]] name = "wayland-cursor" -version = "0.31.1" +version = "0.31.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "71ce5fa868dd13d11a0d04c5e2e65726d0897be8de247c0c5a65886e283231ba" +checksum = "09414bcf0fd8d9577d73e9ac4659ebc45bcc9cff1980a350543ad8e50ee263b2" dependencies = [ "rustix", "wayland-client", @@ -6078,10 +5292,10 @@ version = "0.31.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8f81f365b8b4a97f422ac0e8737c438024b5951734506b0e1d775c73030561f4" dependencies = [ - "bitflags 2.4.2", + "bitflags 2.6.0", "wayland-backend", "wayland-client", - "wayland-scanner 0.31.1", + "wayland-scanner", ] [[package]] @@ -6090,11 +5304,11 @@ version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "23803551115ff9ea9bce586860c5c5a971e360825a0309264102a9495a5ff479" dependencies = [ - "bitflags 2.4.2", + "bitflags 2.6.0", "wayland-backend", "wayland-client", "wayland-protocols", - "wayland-scanner 0.31.1", + "wayland-scanner", ] [[package]] @@ -6103,29 +5317,18 @@ version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ad1f61b76b6c2d8742e10f9ba5c3737f6530b4c243132c2a2ccc8aa96fe25cd6" dependencies = [ - "bitflags 2.4.2", + "bitflags 2.6.0", "wayland-backend", "wayland-client", "wayland-protocols", - "wayland-scanner 0.31.1", -] - -[[package]] -name = "wayland-scanner" -version = "0.29.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f4303d8fa22ab852f789e75a967f0a2cdc430a607751c0499bada3e451cbd53" -dependencies = [ - "proc-macro2", - "quote", - "xml-rs", + "wayland-scanner", ] [[package]] name = "wayland-scanner" -version = "0.31.1" +version = "0.31.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "63b3a62929287001986fb58c789dce9b67604a397c15c611ad9f747300b6c283" +checksum = "edf466fc49a4feb65a511ca403fec3601494d0dee85dbf37fff6fa0dd4eec3b6" dependencies = [ "proc-macro2", "quick-xml", @@ -6134,9 +5337,9 @@ dependencies = [ [[package]] name = "wayland-sys" -version = "0.31.1" +version = "0.31.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "15a0c8eaff5216d07f226cb7a549159267f3467b289d9a2e52fd3ef5aae2b7af" +checksum = "4a6754825230fa5b27bafaa28c30b3c9e72c55530581220cef401fa422c0fae7" dependencies = [ "dlib", "log", @@ -6146,9 +5349,9 @@ dependencies = [ [[package]] name = "web-sys" -version = "0.3.67" +version = "0.3.69" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "58cd2333b6e0be7a39605f0e255892fd7418a682d8da8fe042fe25128794d2ed" +checksum = "77afa9a11836342370f4817622a2f0f418b134426d91a82dfb48f532d2ec13ef" dependencies = [ "js-sys", "wasm-bindgen", @@ -6156,9 +5359,9 @@ dependencies = [ [[package]] name = "web-time" -version = "0.2.4" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aa30049b1c872b72c89866d458eae9f20380ab280ffd1b1e18df2d3e2d98cfe0" +checksum = "5a6580f308b1fad9207618087a65c04e7a10bc77e02c8e84e9b00dd4b12fa0bb" dependencies = [ "js-sys", "wasm-bindgen", @@ -6166,17 +5369,18 @@ dependencies = [ [[package]] name = "webbrowser" -version = "0.8.12" +version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "82b2391658b02c27719fc5a0a73d6e696285138e8b12fba9d4baa70451023c71" +checksum = "425ba64c1e13b1c6e8c5d2541c8fac10022ca584f33da781db01b5756aef1f4e" dependencies = [ + "block2", "core-foundation", "home", - "jni 0.21.1", + "jni", "log", "ndk-context", - "objc", - "raw-window-handle 0.5.2", + "objc2", + "objc2-foundation", "url", "web-sys", ] @@ -6189,214 +5393,118 @@ checksum = "53a85b86a771b1c87058196170769dd264f66c0782acf1ae6cc51bfd64b39082" [[package]] name = "wgpu" -version = "0.17.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "752e44d3998ef35f71830dd1ad3da513e628e2e4d4aedb0ab580f850827a0b41" -dependencies = [ - "arrayvec", - "cfg-if", - "js-sys", - "log", - "naga 0.13.0", - "parking_lot", - "profiling", - "raw-window-handle 0.5.2", - "smallvec", - "static_assertions", - "wasm-bindgen", - "wasm-bindgen-futures", - "web-sys", - "wgpu-core 0.17.1", - "wgpu-hal 0.17.2", - "wgpu-types 0.17.0", -] - -[[package]] -name = "wgpu" -version = "0.19.1" +version = "0.20.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0bfe9a310dcf2e6b85f00c46059aaeaf4184caa8e29a1ecd4b7a704c3482332d" +checksum = "90e37c7b9921b75dfd26dd973fdcbce36f13dfa6e2dc82aece584e0ed48c355c" dependencies = [ "arrayvec", "cfg-if", - "cfg_aliases", + "cfg_aliases 0.1.1", + "document-features", "js-sys", "log", - "naga 0.19.0", + "naga", "parking_lot", "profiling", - "raw-window-handle 0.6.0", + "raw-window-handle", "smallvec", "static_assertions", "wasm-bindgen", "wasm-bindgen-futures", "web-sys", - "wgpu-core 0.19.0", - "wgpu-hal 0.19.1", - "wgpu-types 0.19.0", -] - -[[package]] -name = "wgpu-core" -version = "0.17.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0f8a44dd301a30ceeed3c27d8c0090433d3da04d7b2a4042738095a424d12ae7" -dependencies = [ - "arrayvec", - "bit-vec", - "bitflags 2.4.2", - "codespan-reporting", - "log", - "naga 0.13.0", - "parking_lot", - "profiling", - "raw-window-handle 0.5.2", - "rustc-hash", - "smallvec", - "thiserror", - "web-sys", - "wgpu-hal 0.17.2", - "wgpu-types 0.17.0", + "wgpu-core", + "wgpu-hal", + "wgpu-types", ] [[package]] name = "wgpu-core" -version = "0.19.0" +version = "0.21.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6b15e451d4060ada0d99a64df44e4d590213496da7c4f245572d51071e8e30ed" +checksum = "d50819ab545b867d8a454d1d756b90cd5f15da1f2943334ca314af10583c9d39" dependencies = [ "arrayvec", "bit-vec", - "bitflags 2.4.2", - "cfg_aliases", + "bitflags 2.6.0", + "cfg_aliases 0.1.1", "codespan-reporting", - "indexmap 2.2.3", + "document-features", + "indexmap", "log", - "naga 0.19.0", + "naga", "once_cell", "parking_lot", "profiling", - "raw-window-handle 0.6.0", - "rustc-hash", - "smallvec", - "thiserror", - "web-sys", - "wgpu-hal 0.19.1", - "wgpu-types 0.19.0", -] - -[[package]] -name = "wgpu-hal" -version = "0.17.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9a80bf0e3c77399bb52850cb0830af9bad073d5cfcb9dd8253bef8125c42db17" -dependencies = [ - "android_system_properties", - "arrayvec", - "ash", - "bit-set", - "bitflags 2.4.2", - "block", - "core-graphics-types", - "d3d12 0.7.0", - "glow 0.12.3", - "gpu-alloc", - "gpu-allocator 0.22.0", - "gpu-descriptor", - "hassle-rs 0.10.0", - "js-sys", - "khronos-egl 4.1.0", - "libc", - "libloading 0.8.1", - "log", - "metal 0.26.0", - "naga 0.13.0", - "objc", - "parking_lot", - "profiling", - "range-alloc", - "raw-window-handle 0.5.2", - "renderdoc-sys", + "raw-window-handle", "rustc-hash", "smallvec", "thiserror", - "wasm-bindgen", "web-sys", - "wgpu-types 0.17.0", - "winapi", + "wgpu-hal", + "wgpu-types", ] [[package]] name = "wgpu-hal" -version = "0.19.1" +version = "0.21.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e3bb47856236bfafc0bc591a925eb036ac19cd987624a447ff353e7a7e7e6f72" +checksum = "172e490a87295564f3fcc0f165798d87386f6231b04d4548bca458cbbfd63222" dependencies = [ "android_system_properties", "arrayvec", "ash", "bit-set", - "bitflags 2.4.2", + "bitflags 2.6.0", "block", - "cfg_aliases", + "cfg_aliases 0.1.1", "core-graphics-types", - "d3d12 0.19.0", - "glow 0.13.1", + "d3d12", + "glow", "glutin_wgl_sys", "gpu-alloc", - "gpu-allocator 0.25.0", + "gpu-allocator", "gpu-descriptor", - "hassle-rs 0.11.0", + "hassle-rs", "js-sys", - "khronos-egl 6.0.0", + "khronos-egl", "libc", - "libloading 0.8.1", + "libloading 0.8.4", "log", - "metal 0.27.0", - "naga 0.19.0", + "metal", + "naga", + "ndk-sys 0.5.0+25.2.9519653", "objc", "once_cell", "parking_lot", "profiling", "range-alloc", - "raw-window-handle 0.6.0", + "raw-window-handle", "renderdoc-sys", "rustc-hash", "smallvec", "thiserror", "wasm-bindgen", "web-sys", - "wgpu-types 0.19.0", + "wgpu-types", "winapi", ] [[package]] name = "wgpu-types" -version = "0.17.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ee64d7398d0c2f9ca48922c902ef69c42d000c759f3db41e355f4a570b052b67" -dependencies = [ - "bitflags 2.4.2", - "js-sys", - "web-sys", -] - -[[package]] -name = "wgpu-types" -version = "0.19.0" +version = "0.20.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "895fcbeb772bfb049eb80b2d6e47f6c9af235284e9703c96fc0218a42ffd5af2" +checksum = "1353d9a46bff7f955a680577f34c69122628cc2076e1d6f3a9be6ef00ae793ef" dependencies = [ - "bitflags 2.4.2", + "bitflags 2.6.0", "js-sys", "web-sys", ] [[package]] name = "widestring" -version = "1.0.2" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "653f141f39ec16bba3c5abe400a0c60da7468261cc2cbf36805022876bc721a8" +checksum = "7219d36b6eac893fa81e84ebe06485e7dcbb616177469b142df14f1f4deb1311" [[package]] name = "winapi" @@ -6416,11 +5524,11 @@ checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" [[package]] name = "winapi-util" -version = "0.1.6" +version = "0.1.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f29e6f9198ba0d26b4c9f07dbe6f9ed633e1f3d5b8b414090084349e46a52596" +checksum = "4d4cc384e1e73b93bafa6fb4f1df8c41695c8a91cf9c4c64358067d15a7b6c6b" dependencies = [ - "winapi", + "windows-sys 0.52.0", ] [[package]] @@ -6431,72 +5539,118 @@ checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" [[package]] name = "windows" -version = "0.44.0" +version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9e745dab35a0c4c77aa3ce42d595e13d2003d6902d6b08c9ef5fc326d08da12b" +checksum = "e48a53791691ab099e5e2ad123536d0fff50652600abaf43bbf952894110d0be" dependencies = [ - "windows-targets 0.42.2", + "windows-core 0.52.0", + "windows-targets 0.52.6", ] [[package]] name = "windows" -version = "0.46.0" +version = "0.54.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cdacb41e6a96a052c6cb63a144f24900236121c6f63f4f8219fef5977ecb0c25" +checksum = "9252e5725dbed82865af151df558e754e4a3c2c30818359eb17465f1346a1b49" dependencies = [ - "windows-targets 0.42.2", + "windows-core 0.54.0", + "windows-implement 0.53.0", + "windows-interface 0.53.0", + "windows-targets 0.52.6", ] [[package]] name = "windows" -version = "0.48.0" +version = "0.57.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e686886bc078bc1b0b600cac0147aadb815089b6e4da64016cbd754b6342700f" +checksum = "12342cb4d8e3b046f3d80effd474a7a02447231330ef77d71daa6fbc40681143" dependencies = [ - "windows-implement", - "windows-interface", - "windows-targets 0.48.5", + "windows-core 0.57.0", + "windows-targets 0.52.6", ] [[package]] -name = "windows" +name = "windows-core" version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e48a53791691ab099e5e2ad123536d0fff50652600abaf43bbf952894110d0be" +checksum = "33ab640c8d7e35bf8ba19b884ba838ceb4fba93a4e8c65a9059d08afcfc683d9" dependencies = [ - "windows-core", - "windows-targets 0.52.3", + "windows-targets 0.52.6", ] [[package]] name = "windows-core" -version = "0.52.0" +version = "0.54.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "33ab640c8d7e35bf8ba19b884ba838ceb4fba93a4e8c65a9059d08afcfc683d9" +checksum = "12661b9c89351d684a50a8a643ce5f608e20243b9fb84687800163429f161d65" +dependencies = [ + "windows-result", + "windows-targets 0.52.6", +] + +[[package]] +name = "windows-core" +version = "0.57.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d2ed2439a290666cd67ecce2b0ffaad89c2a56b976b736e6ece670297897832d" dependencies = [ - "windows-targets 0.52.3", + "windows-implement 0.57.0", + "windows-interface 0.57.0", + "windows-result", + "windows-targets 0.52.6", ] [[package]] name = "windows-implement" -version = "0.48.0" +version = "0.53.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5e2ee588991b9e7e6c8338edf3333fbe4da35dc72092643958ebb43f0ab2c49c" +checksum = "942ac266be9249c84ca862f0a164a39533dc2f6f33dc98ec89c8da99b82ea0bd" dependencies = [ "proc-macro2", "quote", - "syn 1.0.109", + "syn 2.0.68", +] + +[[package]] +name = "windows-implement" +version = "0.57.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9107ddc059d5b6fbfbffdfa7a7fe3e22a226def0b2608f72e9d552763d3e1ad7" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.68", ] [[package]] name = "windows-interface" -version = "0.48.0" +version = "0.53.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e6fb8df20c9bcaa8ad6ab513f7b40104840c8867d5751126e4df3b08388d0cc7" +checksum = "da33557140a288fae4e1d5f8873aaf9eb6613a9cf82c3e070223ff177f598b60" dependencies = [ "proc-macro2", "quote", - "syn 1.0.109", + "syn 2.0.68", +] + +[[package]] +name = "windows-interface" +version = "0.57.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "29bee4b38ea3cde66011baa44dba677c432a78593e202392d1e9070cf2a7fca7" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.68", +] + +[[package]] +name = "windows-result" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5e383302e8ec8515204254685643de10811af0ed97ea37210dc26fb0032647f8" +dependencies = [ + "windows-targets 0.52.6", ] [[package]] @@ -6523,7 +5677,7 @@ version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "282be5f36a8ce781fad8c8ae18fa3f9beff57ec1b52cb3de0789201425d9a33d" dependencies = [ - "windows-targets 0.52.3", + "windows-targets 0.52.6", ] [[package]] @@ -6558,17 +5712,18 @@ dependencies = [ [[package]] name = "windows-targets" -version = "0.52.3" +version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d380ba1dc7187569a8a9e91ed34b8ccfc33123bbacb8c0aed2d1ad7f3ef2dc5f" +checksum = "9b724f72796e036ab90c1021d4780d4d3d648aca59e491e6b98e725b84e99973" dependencies = [ - "windows_aarch64_gnullvm 0.52.3", - "windows_aarch64_msvc 0.52.3", - "windows_i686_gnu 0.52.3", - "windows_i686_msvc 0.52.3", - "windows_x86_64_gnu 0.52.3", - "windows_x86_64_gnullvm 0.52.3", - "windows_x86_64_msvc 0.52.3", + "windows_aarch64_gnullvm 0.52.6", + "windows_aarch64_msvc 0.52.6", + "windows_i686_gnu 0.52.6", + "windows_i686_gnullvm", + "windows_i686_msvc 0.52.6", + "windows_x86_64_gnu 0.52.6", + "windows_x86_64_gnullvm 0.52.6", + "windows_x86_64_msvc 0.52.6", ] [[package]] @@ -6585,9 +5740,9 @@ checksum = "2b38e32f0abccf9987a4e3079dfb67dcd799fb61361e53e2882c3cbaf0d905d8" [[package]] name = "windows_aarch64_gnullvm" -version = "0.52.3" +version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "68e5dcfb9413f53afd9c8f86e56a7b4d86d9a2fa26090ea2dc9e40fba56c6ec6" +checksum = "32a4622180e7a0ec044bb555404c800bc9fd9ec262ec147edd5989ccd0c02cd3" [[package]] name = "windows_aarch64_msvc" @@ -6603,9 +5758,9 @@ checksum = "dc35310971f3b2dbbf3f0690a219f40e2d9afcf64f9ab7cc1be722937c26b4bc" [[package]] name = "windows_aarch64_msvc" -version = "0.52.3" +version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8dab469ebbc45798319e69eebf92308e541ce46760b49b18c6b3fe5e8965b30f" +checksum = "09ec2a7bb152e2252b53fa7803150007879548bc709c039df7627cabbd05d469" [[package]] name = "windows_i686_gnu" @@ -6621,9 +5776,15 @@ checksum = "a75915e7def60c94dcef72200b9a8e58e5091744960da64ec734a6c6e9b3743e" [[package]] name = "windows_i686_gnu" -version = "0.52.3" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8e9b5ad5ab802e97eb8e295ac6720e509ee4c243f69d781394014ebfe8bbfa0b" + +[[package]] +name = "windows_i686_gnullvm" +version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2a4e9b6a7cac734a8b4138a4e1044eac3404d8326b6c0f939276560687a033fb" +checksum = "0eee52d38c090b3caa76c563b86c3a4bd71ef1a819287c19d586d7334ae8ed66" [[package]] name = "windows_i686_msvc" @@ -6639,9 +5800,9 @@ checksum = "8f55c233f70c4b27f66c523580f78f1004e8b5a8b659e05a4eb49d4166cca406" [[package]] name = "windows_i686_msvc" -version = "0.52.3" +version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "28b0ec9c422ca95ff34a78755cfa6ad4a51371da2a5ace67500cf7ca5f232c58" +checksum = "240948bc05c5e7c6dabba28bf89d89ffce3e303022809e73deaefe4f6ec56c66" [[package]] name = "windows_x86_64_gnu" @@ -6657,9 +5818,9 @@ checksum = "53d40abd2583d23e4718fddf1ebec84dbff8381c07cae67ff7768bbf19c6718e" [[package]] name = "windows_x86_64_gnu" -version = "0.52.3" +version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "704131571ba93e89d7cd43482277d6632589b18ecf4468f591fbae0a8b101614" +checksum = "147a5c80aabfbf0c7d901cb5895d1de30ef2907eb21fbbab29ca94c5b08b1a78" [[package]] name = "windows_x86_64_gnullvm" @@ -6675,9 +5836,9 @@ checksum = "0b7b52767868a23d5bab768e390dc5f5c55825b6d30b86c844ff2dc7414044cc" [[package]] name = "windows_x86_64_gnullvm" -version = "0.52.3" +version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "42079295511643151e98d61c38c0acc444e52dd42ab456f7ccfd5152e8ecf21c" +checksum = "24d5b23dc417412679681396f2b49f3de8c1473deb516bd34410872eff51ed0d" [[package]] name = "windows_x86_64_msvc" @@ -6693,73 +5854,47 @@ checksum = "ed94fce61571a4006852b7389a063ab983c02eb1bb37b47f8272ce92d06d9538" [[package]] name = "windows_x86_64_msvc" -version = "0.52.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0770833d60a970638e989b3fa9fd2bb1aaadcf88963d1659fd7d9990196ed2d6" - -[[package]] -name = "winit" -version = "0.28.7" +version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9596d90b45384f5281384ab204224876e8e8bf7d58366d9b795ad99aa9894b94" -dependencies = [ - "android-activity 0.4.3", - "bitflags 1.3.2", - "cfg_aliases", - "core-foundation", - "core-graphics 0.22.3", - "dispatch", - "instant", - "libc", - "log", - "mio", - "ndk 0.7.0", - "objc2 0.3.0-beta.3.patch-leaks.3", - "once_cell", - "orbclient", - "percent-encoding", - "raw-window-handle 0.5.2", - "redox_syscall 0.3.5", - "wasm-bindgen", - "wayland-scanner 0.29.5", - "web-sys", - "windows-sys 0.45.0", - "x11-dl", -] +checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec" [[package]] name = "winit" -version = "0.29.10" +version = "0.30.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4c824f11941eeae66ec71111cc2674373c772f482b58939bb4066b642aa2ffcf" +checksum = "49f45a7b7e2de6af35448d7718dab6d95acec466eb3bb7a56f4d31d1af754004" dependencies = [ "ahash", - "android-activity 0.5.2", + "android-activity", "atomic-waker", - "bitflags 2.4.2", + "bitflags 2.6.0", + "block2", "bytemuck", "calloop", - "cfg_aliases", + "cfg_aliases 0.2.1", + "concurrent-queue", "core-foundation", - "core-graphics 0.23.1", + "core-graphics", "cursor-icon", - "icrate", + "dpi", "js-sys", "libc", - "log", "memmap2", - "ndk 0.8.0", - "ndk-sys 0.5.0+25.2.9519653", - "objc2 0.4.1", - "once_cell", + "ndk 0.9.0", + "objc2", + "objc2-app-kit", + "objc2-foundation", + "objc2-ui-kit", "orbclient", "percent-encoding", - "raw-window-handle 0.6.0", - "redox_syscall 0.3.5", + "pin-project", + "raw-window-handle", + "redox_syscall 0.4.1", "rustix", "sctk-adwaita", "smithay-client-toolkit", "smol_str", + "tracing", "unicode-segmentation", "wasm-bindgen", "wasm-bindgen-futures", @@ -6769,7 +5904,7 @@ dependencies = [ "wayland-protocols-plasma", "web-sys", "web-time", - "windows-sys 0.48.0", + "windows-sys 0.52.0", "x11-dl", "x11rb", "xkbcommon-dl", @@ -6786,9 +5921,9 @@ dependencies = [ [[package]] name = "winnow" -version = "0.6.2" +version = "0.6.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a4191c47f15cc3ec71fcb4913cb83d58def65dd3787610213c649283b5ce178" +checksum = "59b5e5f6c299a3c7890b876a2a587f3115162487e704907d9b6cd29473052ba1" dependencies = [ "memchr", ] @@ -6805,9 +5940,9 @@ dependencies = [ [[package]] name = "x11-clipboard" -version = "0.9.1" +version = "0.9.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "613c2be3e772af2bbb57c5a94413675f5ec668bac00a71ada2ced28c420ef087" +checksum = "b98785a09322d7446e28a13203d2cae1059a0dd3dfb32cb06d0a225f023d8286" dependencies = [ "libc", "x11rb", @@ -6826,14 +5961,14 @@ dependencies = [ [[package]] name = "x11rb" -version = "0.13.0" +version = "0.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f8f25ead8c7e4cba123243a6367da5d3990e0d3affa708ea19dce96356bd9f1a" +checksum = "5d91ffca73ee7f68ce055750bf9f6eca0780b8c85eff9bc046a3b0da41755e12" dependencies = [ "as-raw-xcb-connection", "gethostname", "libc", - "libloading 0.8.1", + "libloading 0.8.4", "once_cell", "rustix", "x11rb-protocol", @@ -6841,9 +5976,9 @@ dependencies = [ [[package]] name = "x11rb-protocol" -version = "0.13.0" +version = "0.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e63e71c4b8bd9ffec2c963173a4dc4cbde9ee96961d4fcb4429db9929b606c34" +checksum = "ec107c4503ea0b4a98ef47356329af139c0a4f7750e621cf2973cd3385ebcb3d" [[package]] name = "xcursor" @@ -6863,7 +5998,7 @@ version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d039de8032a9a8856a6be89cea3e5d12fdd82306ab7c94d74e6deab2460651c5" dependencies = [ - "bitflags 2.4.2", + "bitflags 2.6.0", "dlib", "log", "once_cell", @@ -6872,36 +6007,42 @@ dependencies = [ [[package]] name = "xkeysym" -version = "0.2.0" +version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "054a8e68b76250b253f671d1268cb7f1ae089ec35e195b2efb2a4e9a836d0621" +checksum = "b9cc00251562a284751c9973bace760d86c0276c471b4be569fe6b068ee97a56" [[package]] name = "xml-rs" -version = "0.8.19" +version = "0.8.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0fcb9cbac069e033553e8bb871be2fbdffcab578eb25bd0f7c508cedc6dcd75a" +checksum = "791978798f0597cfc70478424c2b4fdc2b7a8024aaff78497ef00f24ef674193" [[package]] name = "zerocopy" -version = "0.7.32" +version = "0.7.35" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "74d4d3961e53fa4c9a25a8637fc2bfaf2595b3d3ae34875568a5cf64787716be" +checksum = "1b9b4fd18abc82b8136838da5d50bae7bdea537c574d8dc1a34ed098d6c166f0" dependencies = [ "zerocopy-derive", ] [[package]] name = "zerocopy-derive" -version = "0.7.32" +version = "0.7.35" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9ce1b18ccd8e73a9321186f97e46f9f04b778851177567b1975109d26a08d2a6" +checksum = "fa4f8080344d4671fb4e831a13ad1e68092748387dfc4f55e356242fae12ce3e" dependencies = [ "proc-macro2", "quote", - "syn 2.0.50", + "syn 2.0.68", ] +[[package]] +name = "zune-core" +version = "0.4.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3f423a2c17029964870cfaabb1f13dfab7d092a62a29a89264f4d36990ca414a" + [[package]] name = "zune-inflate" version = "0.2.54" @@ -6910,3 +6051,12 @@ checksum = "73ab332fe2f6680068f3582b16a24f90ad7096d5d39b974d1c0aff0125116f02" dependencies = [ "simd-adler32", ] + +[[package]] +name = "zune-jpeg" +version = "0.4.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ec866b44a2a1fd6133d363f073ca1b179f438f99e7e5bfb1e33f7181facfe448" +dependencies = [ + "zune-core", +] diff --git a/Cargo.toml b/Cargo.toml old mode 100755 new mode 100644 index b70faca..9585a28 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "shadplay" -version = "0.0.1" +version = "0.0.2" resolver = "2" edition = "2021" authors = ["jeremy webb "] @@ -21,18 +21,22 @@ ui = [] [dependencies] anyhow = "1.0.79" -bevy = { version = "*", features = ["file_watcher", "dynamic_linking", "jpeg"] } -bevy_egui = "0.25.0" -bevy_panorbit_camera = "0.15.0" +bevy = { version = "0.14.0", features = ["file_watcher", "jpeg"] } +bevy_egui = { version = "0.28.*" } +bevy_panorbit_camera = { git = "https://github.com/kristoff3r/bevy_panorbit_camera" } #"0.18.3?" chrono = "0.4.34" copypasta = "0.10.0" directories = "5.0.1" -image = "0.24.9" +image = "0.25.1" serde = { version = "1.0.190", features = ["derive"] } toml = "0.8.10" [dev-dependencies] -bevy_editor_pls = "0.6.0" # Required for Might +bevy = { version = "0.14.0", features = [ + "file_watcher", + "jpeg", + "dynamic_linking", +] } [build-dependencies] reqwest = "0.11.*" diff --git a/README.md b/README.md index ad7f554..c2e69be 100755 --- a/README.md +++ b/README.md @@ -111,6 +111,13 @@ then: ______________________________________________________________________ +## Stability: + +The shaders in `assets/shaders/shadertoy-ports` _should_ usually work with the latest bevy. +With _other_ shaders, for example those prefaced with `BROKEN_...` your mileage will vary, I cannot update all the shaders every single time `wgsl/bevy/naga/wgpu` change stuff :(, if _you_ can than see below: + +______________________________________________________________________ + ## Contributing: See the [guide](./CONTRIBUTING.md) diff --git a/assets/shaders/electro_cube.wgsl b/assets/shaders/electro_cube.wgsl index 8b7166f..564abb4 100755 --- a/assets/shaders/electro_cube.wgsl +++ b/assets/shaders/electro_cube.wgsl @@ -1,5 +1,5 @@ #import bevy_pbr::mesh_view_bindings globals view -#import bevy_pbr::forward_io::VertexOutput +#import bevy_sprite::mesh2d_vertex_output::VertexOutput #import bevy_pbr::utils PI HALF_PI #import bevy_pbr::mesh_functions diff --git a/assets/shaders/fast_dots.wgsl b/assets/shaders/fast_dots.wgsl index 0d099e1..7e655da 100755 --- a/assets/shaders/fast_dots.wgsl +++ b/assets/shaders/fast_dots.wgsl @@ -1,5 +1,5 @@ #import bevy_pbr::mesh_view_bindings globals -#import bevy_pbr::forward_io::VertexOutput +#import bevy_sprite::mesh2d_vertex_output::VertexOutput @fragment fn fragment(in: VertexOutput) -> @location(0) vec4 { diff --git a/assets/shaders/howto-mouse.wgsl b/assets/shaders/howto-mouse.wgsl index bb67878..52624b3 100755 --- a/assets/shaders/howto-mouse.wgsl +++ b/assets/shaders/howto-mouse.wgsl @@ -1,5 +1,5 @@ /// How to use the mouse, in shadplay. -#import bevy_pbr::forward_io::VertexOutput +#import bevy_sprite::mesh2d_vertex_output::VertexOutput #import bevy_sprite::mesh2d_view_bindings globals #import shadplay::shader_utils::common NEG_HALF_PI, shader_toy_default, rotate2D diff --git a/assets/shaders/myshader.wgsl b/assets/shaders/myshader.wgsl index e8f8158..932a701 100755 --- a/assets/shaders/myshader.wgsl +++ b/assets/shaders/myshader.wgsl @@ -7,10 +7,10 @@ #import shadplay::shader_utils::common NEG_HALF_PI, shader_toy_default, rotate2D #import bevy_render::view View -@group(0) @binding(0) var view: View; +// @group(0) @binding(0) var view: View; -@group(2) @binding(1) var texture: texture_2d; -@group(2) @binding(2) var texture_sampler: sampler; +@group(2) @binding(101) var texture: texture_2d; +@group(2) @binding(102) var texture_sampler: sampler; const SPEED:f32 = 1.0; diff --git a/assets/shaders/myshader_2d.wgsl b/assets/shaders/myshader_2d.wgsl index 111da79..dcc9696 100755 --- a/assets/shaders/myshader_2d.wgsl +++ b/assets/shaders/myshader_2d.wgsl @@ -6,16 +6,17 @@ #import bevy_sprite::mesh2d_view_bindings::globals #import shadplay::shader_utils::common::{NEG_HALF_PI, shader_toy_default, rotate2D, TWO_PI} #import bevy_render::view::View -#import bevy_pbr::forward_io::VertexOutput; +#import bevy_sprite::mesh2d_vertex_output::VertexOutput @group(0) @binding(0) var view: View; const SPEED:f32 = 1.0; @fragment -fn fragment(in: VertexOutput) -> @location(0) vec4 { +fn fragment(mesh: VertexOutput) -> @location(0) vec4 { // ensure our uv coords match shadertoy/the-lil-book-of-shaders - var uv = (in.uv * 2.0) - 1.0; + var uv = mesh.uv; + uv = (uv * 2.0) - 1.0; let resolution = view.viewport.zw; let t = globals.time * SPEED; uv.x *= resolution.x / resolution.y; diff --git a/assets/shaders/shadertoy-ports/cyber-anim-arrowX.wgsl b/assets/shaders/shadertoy-ports/cyber-anim-arrowX.wgsl index e56082a..80a1f86 100755 --- a/assets/shaders/shadertoy-ports/cyber-anim-arrowX.wgsl +++ b/assets/shaders/shadertoy-ports/cyber-anim-arrowX.wgsl @@ -3,7 +3,7 @@ /// Source: https://www.shadertoy.com/view/DsjfDt /// Authour: https://www.shadertoy.com/user/float1987 /// -#import bevy_pbr::forward_io::VertexOutput +#import bevy_sprite::mesh2d_vertex_output::VertexOutput #import bevy_sprite::mesh2d_view_bindings globals #import shadplay::shader_utils::common rotate2D, QUARTER_PI diff --git a/assets/shaders/shadertoy-ports/polar-coords-experiments.wgsl b/assets/shaders/shadertoy-ports/polar-coords-experiments.wgsl index a366f35..57b7e35 100755 --- a/assets/shaders/shadertoy-ports/polar-coords-experiments.wgsl +++ b/assets/shaders/shadertoy-ports/polar-coords-experiments.wgsl @@ -1,5 +1,5 @@ //! This is a shadertoy port of 'polar-coordinates-experiments' by toridango https://www.shadertoy.com/view/ttsGz8 -#import bevy_pbr::forward_io::VertexOutput +#import bevy_sprite::mesh2d_vertex_output::VertexOutput #import bevy_pbr::utils PI #import bevy_sprite::mesh2d_view_bindings globals diff --git a/assets/shaders/shadertoy-ports/water-caustic-tileable.wgsl b/assets/shaders/shadertoy-ports/water-caustic-tileable.wgsl index e738b7a..45dbdd3 100755 --- a/assets/shaders/shadertoy-ports/water-caustic-tileable.wgsl +++ b/assets/shaders/shadertoy-ports/water-caustic-tileable.wgsl @@ -3,7 +3,7 @@ /// I have been unable to find the original. /// ***************************** /// -#import bevy_pbr::forward_io::VertexOutput +#import bevy_sprite::mesh2d_vertex_output::VertexOutput #import bevy_sprite::mesh2d_view_bindings globals #import shadplay::shader_utils::common NEG_HALF_PI, shader_toy_default, rotate2D, TAU diff --git a/bevy-shader-book.md b/bevy-shader-book.md old mode 100755 new mode 100644 index e6bc1e2..40ade1a --- a/bevy-shader-book.md +++ b/bevy-shader-book.md @@ -4,3136 +4,28482 @@ This document is really to give you an easy, one-stop-shop to reference all the ## Table of Contents -- [crates/bevy_gizmos/src/lines](#crates/bevy_gizmos/src/lines) -- [crates/bevy_gizmos/src/line_joints](#crates/bevy_gizmos/src/line_joints) -- [crates/bevy_sprite/src/render/sprite](#crates/bevy_sprite/src/render/sprite) -- [crates/bevy_sprite/src/mesh2d/mesh2d_functions](#crates/bevy_sprite/src/mesh2d/mesh2d_functions) -- [crates/bevy_sprite/src/mesh2d/mesh2d](#crates/bevy_sprite/src/mesh2d/mesh2d) -- [crates/bevy_sprite/src/mesh2d/color_material](#crates/bevy_sprite/src/mesh2d/color_material) -- [crates/bevy_sprite/src/mesh2d/mesh2d_view_bindings](#crates/bevy_sprite/src/mesh2d/mesh2d_view_bindings) -- [crates/bevy_sprite/src/mesh2d/mesh2d_types](#crates/bevy_sprite/src/mesh2d/mesh2d_types) -- [crates/bevy_sprite/src/mesh2d/mesh2d_view_types](#crates/bevy_sprite/src/mesh2d/mesh2d_view_types) -- [crates/bevy_sprite/src/mesh2d/mesh2d_vertex_output](#crates/bevy_sprite/src/mesh2d/mesh2d_vertex_output) -- [crates/bevy_sprite/src/mesh2d/mesh2d_bindings](#crates/bevy_sprite/src/mesh2d/mesh2d_bindings) -- [crates/bevy_sprite/src/mesh2d/wireframe2d](#crates/bevy_sprite/src/mesh2d/wireframe2d) -- [crates/bevy_ui/src/render/ui](#crates/bevy_ui/src/render/ui) -- [crates/bevy_ui/src/render/ui_material](#crates/bevy_ui/src/render/ui_material) -- [crates/bevy_ui/src/render/ui_vertex_output](#crates/bevy_ui/src/render/ui_vertex_output) -- [crates/bevy_core_pipeline/src/bloom/bloom](#crates/bevy_core_pipeline/src/bloom/bloom) -- [crates/bevy_core_pipeline/src/fxaa/fxaa](#crates/bevy_core_pipeline/src/fxaa/fxaa) -- [crates/bevy_core_pipeline/src/skybox/skybox](#crates/bevy_core_pipeline/src/skybox/skybox) -- [crates/bevy_core_pipeline/src/taa/taa](#crates/bevy_core_pipeline/src/taa/taa) -- [crates/bevy_core_pipeline/src/tonemapping/tonemapping_shared](#crates/bevy_core_pipeline/src/tonemapping/tonemapping_shared) -- [crates/bevy_core_pipeline/src/tonemapping/tonemapping](#crates/bevy_core_pipeline/src/tonemapping/tonemapping) -- [crates/bevy_core_pipeline/src/fullscreen_vertex_shader/fullscreen](#crates/bevy_core_pipeline/src/fullscreen_vertex_shader/fullscreen) -- [crates/bevy_core_pipeline/src/contrast_adaptive_sharpening/robust_contrast_adaptive_sharpening](#crates/bevy_core_pipeline/src/contrast_adaptive_sharpening/robust_contrast_adaptive_sharpening) -- [crates/bevy_core_pipeline/src/deferred/copy_deferred_lighting_id](#crates/bevy_core_pipeline/src/deferred/copy_deferred_lighting_id) -- [crates/bevy_core_pipeline/src/blit/blit](#crates/bevy_core_pipeline/src/blit/blit) -- [crates/bevy_render/src/globals](#crates/bevy_render/src/globals) -- [crates/bevy_render/src/maths](#crates/bevy_render/src/maths) -- [crates/bevy_render/src/view/view](#crates/bevy_render/src/view/view) -- [crates/bevy_render/src/view/window/screenshot](#crates/bevy_render/src/view/window/screenshot) -- [crates/bevy_pbr/src/render/pbr_functions](#crates/bevy_pbr/src/render/pbr_functions) -- [crates/bevy_pbr/src/render/pbr](#crates/bevy_pbr/src/render/pbr) -- [crates/bevy_pbr/src/render/pbr_lighting](#crates/bevy_pbr/src/render/pbr_lighting) -- [crates/bevy_pbr/src/render/wireframe](#crates/bevy_pbr/src/render/wireframe) -- [crates/bevy_pbr/src/render/mesh_preprocess](#crates/bevy_pbr/src/render/mesh_preprocess) -- [crates/bevy_pbr/src/render/pbr_prepass](#crates/bevy_pbr/src/render/pbr_prepass) -- [crates/bevy_pbr/src/render/parallax_mapping](#crates/bevy_pbr/src/render/parallax_mapping) -- [crates/bevy_pbr/src/render/pbr_fragment](#crates/bevy_pbr/src/render/pbr_fragment) -- [crates/bevy_pbr/src/render/utils](#crates/bevy_pbr/src/render/utils) -- [crates/bevy_pbr/src/render/mesh_types](#crates/bevy_pbr/src/render/mesh_types) -- [crates/bevy_pbr/src/render/shadows](#crates/bevy_pbr/src/render/shadows) -- [crates/bevy_pbr/src/render/fog](#crates/bevy_pbr/src/render/fog) -- [crates/bevy_pbr/src/render/pbr_bindings](#crates/bevy_pbr/src/render/pbr_bindings) -- [crates/bevy_pbr/src/render/pbr_types](#crates/bevy_pbr/src/render/pbr_types) -- [crates/bevy_pbr/src/render/mesh](#crates/bevy_pbr/src/render/mesh) -- [crates/bevy_pbr/src/render/pbr_ambient](#crates/bevy_pbr/src/render/pbr_ambient) -- [crates/bevy_pbr/src/render/mesh_bindings](#crates/bevy_pbr/src/render/mesh_bindings) -- [crates/bevy_pbr/src/render/pbr_prepass_functions](#crates/bevy_pbr/src/render/pbr_prepass_functions) -- [crates/bevy_pbr/src/render/shadow_sampling](#crates/bevy_pbr/src/render/shadow_sampling) -- [crates/bevy_pbr/src/render/mesh_view_types](#crates/bevy_pbr/src/render/mesh_view_types) -- [crates/bevy_pbr/src/render/forward_io](#crates/bevy_pbr/src/render/forward_io) -- [crates/bevy_pbr/src/render/rgb9e5](#crates/bevy_pbr/src/render/rgb9e5) -- [crates/bevy_pbr/src/render/morph](#crates/bevy_pbr/src/render/morph) -- [crates/bevy_pbr/src/render/clustered_forward](#crates/bevy_pbr/src/render/clustered_forward) -- [crates/bevy_pbr/src/render/mesh_functions](#crates/bevy_pbr/src/render/mesh_functions) -- [crates/bevy_pbr/src/render/pbr_transmission](#crates/bevy_pbr/src/render/pbr_transmission) -- [crates/bevy_pbr/src/render/mesh_view_bindings](#crates/bevy_pbr/src/render/mesh_view_bindings) -- [crates/bevy_pbr/src/render/view_transformations](#crates/bevy_pbr/src/render/view_transformations) -- [crates/bevy_pbr/src/render/skinning](#crates/bevy_pbr/src/render/skinning) -- [crates/bevy_pbr/src/prepass/prepass_utils](#crates/bevy_pbr/src/prepass/prepass_utils) -- [crates/bevy_pbr/src/prepass/prepass_bindings](#crates/bevy_pbr/src/prepass/prepass_bindings) -- [crates/bevy_pbr/src/prepass/prepass](#crates/bevy_pbr/src/prepass/prepass) -- [crates/bevy_pbr/src/prepass/prepass_io](#crates/bevy_pbr/src/prepass/prepass_io) -- [crates/bevy_pbr/src/meshlet/copy_material_depth](#crates/bevy_pbr/src/meshlet/copy_material_depth) -- [crates/bevy_pbr/src/meshlet/write_index_buffer](#crates/bevy_pbr/src/meshlet/write_index_buffer) -- [crates/bevy_pbr/src/meshlet/meshlet_bindings](#crates/bevy_pbr/src/meshlet/meshlet_bindings) -- [crates/bevy_pbr/src/meshlet/downsample_depth](#crates/bevy_pbr/src/meshlet/downsample_depth) -- [crates/bevy_pbr/src/meshlet/cull_meshlets](#crates/bevy_pbr/src/meshlet/cull_meshlets) -- [crates/bevy_pbr/src/meshlet/meshlet_mesh_material](#crates/bevy_pbr/src/meshlet/meshlet_mesh_material) -- [crates/bevy_pbr/src/meshlet/visibility_buffer_resolve](#crates/bevy_pbr/src/meshlet/visibility_buffer_resolve) -- [crates/bevy_pbr/src/meshlet/dummy_visibility_buffer_resolve](#crates/bevy_pbr/src/meshlet/dummy_visibility_buffer_resolve) -- [crates/bevy_pbr/src/meshlet/visibility_buffer_raster](#crates/bevy_pbr/src/meshlet/visibility_buffer_raster) -- [crates/bevy_pbr/src/light_probe/environment_map](#crates/bevy_pbr/src/light_probe/environment_map) -- [crates/bevy_pbr/src/light_probe/light_probe](#crates/bevy_pbr/src/light_probe/light_probe) -- [crates/bevy_pbr/src/light_probe/irradiance_volume](#crates/bevy_pbr/src/light_probe/irradiance_volume) -- [crates/bevy_pbr/src/lightmap/lightmap](#crates/bevy_pbr/src/lightmap/lightmap) -- [crates/bevy_pbr/src/ssao/spatial_denoise](#crates/bevy_pbr/src/ssao/spatial_denoise) -- [crates/bevy_pbr/src/ssao/gtao](#crates/bevy_pbr/src/ssao/gtao) -- [crates/bevy_pbr/src/ssao/gtao_utils](#crates/bevy_pbr/src/ssao/gtao_utils) -- [crates/bevy_pbr/src/ssao/preprocess_depth](#crates/bevy_pbr/src/ssao/preprocess_depth) -- [crates/bevy_pbr/src/deferred/deferred_lighting](#crates/bevy_pbr/src/deferred/deferred_lighting) -- [crates/bevy_pbr/src/deferred/pbr_deferred_functions](#crates/bevy_pbr/src/deferred/pbr_deferred_functions) -- [crates/bevy_pbr/src/deferred/pbr_deferred_types](#crates/bevy_pbr/src/deferred/pbr_deferred_types) -- [assets/shaders/line_material](#assets/shaders/line_material) -- [assets/shaders/instancing](#assets/shaders/instancing) -- [assets/shaders/gpu_readback](#assets/shaders/gpu_readback) -- [assets/shaders/shader_defs](#assets/shaders/shader_defs) -- [assets/shaders/custom_material_import](#assets/shaders/custom_material_import) -- [assets/shaders/extended_material](#assets/shaders/extended_material) -- [assets/shaders/custom_gltf_2d](#assets/shaders/custom_gltf_2d) -- [assets/shaders/custom_vertex_attribute](#assets/shaders/custom_vertex_attribute) -- [assets/shaders/texture_binding_array](#assets/shaders/texture_binding_array) -- [assets/shaders/circle_shader](#assets/shaders/circle_shader) -- [assets/shaders/tonemapping_test_patterns](#assets/shaders/tonemapping_test_patterns) -- [assets/shaders/custom_material_screenspace_texture](#assets/shaders/custom_material_screenspace_texture) -- [assets/shaders/custom_material_2d](#assets/shaders/custom_material_2d) -- [assets/shaders/cubemap_unlit](#assets/shaders/cubemap_unlit) -- [assets/shaders/fallback_image_test](#assets/shaders/fallback_image_test) -- [assets/shaders/custom_material](#assets/shaders/custom_material) -- [assets/shaders/array_texture](#assets/shaders/array_texture) -- [assets/shaders/show_prepass](#assets/shaders/show_prepass) -- [assets/shaders/irradiance_volume_voxel_visualization](#assets/shaders/irradiance_volume_voxel_visualization) -- [assets/shaders/game_of_life](#assets/shaders/game_of_life) -- [assets/shaders/animate_shader](#assets/shaders/animate_shader) -- [assets/shaders/post_processing](#assets/shaders/post_processing) - -### crates/bevy_gizmos/src/lines +- [all_wgsl](#all_wgsl) +- [assets/shaders/myshader](#assets/shaders/myshader) +- [assets/shaders/fast_dots](#assets/shaders/fast_dots) +- [assets/shaders/howto-texture](#assets/shaders/howto-texture) +- [assets/shaders/electro_cube](#assets/shaders/electro_cube) +- [assets/shaders/four_to_the_floor](#assets/shaders/four_to_the_floor) +- [assets/shaders/WIP-black-hole](#assets/shaders/WIP-black-hole) +- [assets/shaders/WIP-waterPool](#assets/shaders/WIP-waterPool) +- [assets/shaders/indexing_into_vec_with_loop](#assets/shaders/indexing_into_vec_with_loop) +- [assets/shaders/rain_generator](#assets/shaders/rain_generator) +- [assets/shaders/WIP-total_noob](#assets/shaders/WIP-total_noob) +- [assets/shaders/grid_with_colours](#assets/shaders/grid_with_colours) +- [assets/shaders/aura](#assets/shaders/aura) +- [assets/shaders/myshader_2d](#assets/shaders/myshader_2d) +- [assets/shaders/howto-mouse](#assets/shaders/howto-mouse) +- [assets/shaders/common/notes](#assets/shaders/common/notes) +- [assets/shaders/shadertoy-ports/light-spirals](#assets/shaders/shadertoy-ports/light-spirals) +- [assets/shaders/shadertoy-ports/cyber-anim-arrowX](#assets/shaders/shadertoy-ports/cyber-anim-arrowX) +- [assets/shaders/shadertoy-ports/cosmic](#assets/shaders/shadertoy-ports/cosmic) +- [assets/shaders/shadertoy-ports/voronoi_simple](#assets/shaders/shadertoy-ports/voronoi_simple) +- [assets/shaders/shadertoy-ports/flame](#assets/shaders/shadertoy-ports/flame) +- [assets/shaders/shadertoy-ports/warp](#assets/shaders/shadertoy-ports/warp) +- [assets/shaders/shadertoy-ports/BROKEN_fbm_lightning](#assets/shaders/shadertoy-ports/BROKEN_fbm_lightning) +- [assets/shaders/shadertoy-ports/universe_within](#assets/shaders/shadertoy-ports/universe_within) +- [assets/shaders/shadertoy-ports/kishimisu](#assets/shaders/shadertoy-ports/kishimisu) +- [assets/shaders/shadertoy-ports/semi-circle-wave](#assets/shaders/shadertoy-ports/semi-circle-wave) +- [assets/shaders/shadertoy-ports/star](#assets/shaders/shadertoy-ports/star) +- [assets/shaders/shadertoy-ports/water-caustic-tileable](#assets/shaders/shadertoy-ports/water-caustic-tileable) +- [assets/shaders/shadertoy-ports/sailing-beyond](#assets/shaders/shadertoy-ports/sailing-beyond) +- [assets/shaders/shadertoy-ports/BROKEN_tuesday_tinkering](#assets/shaders/shadertoy-ports/BROKEN_tuesday_tinkering) +- [assets/shaders/shadertoy-ports/discoteq2](#assets/shaders/shadertoy-ports/discoteq2) +- [assets/shaders/shadertoy-ports/polar-coords-experiments](#assets/shaders/shadertoy-ports/polar-coords-experiments) +- [assets/shaders/shadertoy-ports/shadertoy](#assets/shaders/shadertoy-ports/shadertoy) +- [assets/shaders/shadertoy-ports/octagon_fun](#assets/shaders/shadertoy-ports/octagon_fun) +- [assets/shaders/shadertoy-ports/w10](#assets/shaders/shadertoy-ports/w10) +- [assets/Gallery/perlin-waves/perlin-waves](#assets/Gallery/perlin-waves/perlin-waves) +- [assets/Gallery/lines/dotted_line](#assets/Gallery/lines/dotted_line) +- [assets/Gallery/fbmCloud/fmb_cloud](#assets/Gallery/fbmCloud/fmb_cloud) +- [bevy/assets/shaders/custom_ui_material](#bevy/assets/shaders/custom_ui_material) +- [bevy/assets/shaders/array_texture](#bevy/assets/shaders/array_texture) +- [bevy/assets/shaders/texture_binding_array](#bevy/assets/shaders/texture_binding_array) +- [bevy/assets/shaders/cubemap_unlit](#bevy/assets/shaders/cubemap_unlit) +- [bevy/assets/shaders/custom_material](#bevy/assets/shaders/custom_material) +- [bevy/assets/shaders/post_processing](#bevy/assets/shaders/post_processing) +- [bevy/assets/shaders/gpu_readback](#bevy/assets/shaders/gpu_readback) +- [bevy/assets/shaders/custom_material_2d](#bevy/assets/shaders/custom_material_2d) +- [bevy/assets/shaders/shader_defs](#bevy/assets/shaders/shader_defs) +- [bevy/assets/shaders/line_material](#bevy/assets/shaders/line_material) +- [bevy/assets/shaders/animate_shader](#bevy/assets/shaders/animate_shader) +- [bevy/assets/shaders/extended_material](#bevy/assets/shaders/extended_material) +- [bevy/assets/shaders/custom_vertex_attribute](#bevy/assets/shaders/custom_vertex_attribute) +- [bevy/assets/shaders/show_prepass](#bevy/assets/shaders/show_prepass) +- [bevy/assets/shaders/water_material](#bevy/assets/shaders/water_material) +- [bevy/assets/shaders/custom_material_import](#bevy/assets/shaders/custom_material_import) +- [bevy/assets/shaders/game_of_life](#bevy/assets/shaders/game_of_life) +- [bevy/assets/shaders/fallback_image_test](#bevy/assets/shaders/fallback_image_test) +- [bevy/assets/shaders/irradiance_volume_voxel_visualization](#bevy/assets/shaders/irradiance_volume_voxel_visualization) +- [bevy/assets/shaders/custom_phase_item](#bevy/assets/shaders/custom_phase_item) +- [bevy/assets/shaders/instancing](#bevy/assets/shaders/instancing) +- [bevy/assets/shaders/tonemapping_test_patterns](#bevy/assets/shaders/tonemapping_test_patterns) +- [bevy/assets/shaders/custom_gltf_2d](#bevy/assets/shaders/custom_gltf_2d) +- [bevy/assets/shaders/custom_material_screenspace_texture](#bevy/assets/shaders/custom_material_screenspace_texture) +- [bevy/crates/bevy_core_pipeline/src/deferred/copy_deferred_lighting_id](#bevy/crates/bevy_core_pipeline/src/deferred/copy_deferred_lighting_id) +- [bevy/crates/bevy_core_pipeline/src/motion_blur/motion_blur](#bevy/crates/bevy_core_pipeline/src/motion_blur/motion_blur) +- [bevy/crates/bevy_core_pipeline/src/tonemapping/tonemapping_shared](#bevy/crates/bevy_core_pipeline/src/tonemapping/tonemapping_shared) +- [bevy/crates/bevy_core_pipeline/src/tonemapping/lut_bindings](#bevy/crates/bevy_core_pipeline/src/tonemapping/lut_bindings) +- [bevy/crates/bevy_core_pipeline/src/tonemapping/tonemapping](#bevy/crates/bevy_core_pipeline/src/tonemapping/tonemapping) +- [bevy/crates/bevy_core_pipeline/src/fullscreen_vertex_shader/fullscreen](#bevy/crates/bevy_core_pipeline/src/fullscreen_vertex_shader/fullscreen) +- [bevy/crates/bevy_core_pipeline/src/smaa/smaa](#bevy/crates/bevy_core_pipeline/src/smaa/smaa) +- [bevy/crates/bevy_core_pipeline/src/auto_exposure/auto_exposure](#bevy/crates/bevy_core_pipeline/src/auto_exposure/auto_exposure) +- [bevy/crates/bevy_core_pipeline/src/skybox/skybox_prepass](#bevy/crates/bevy_core_pipeline/src/skybox/skybox_prepass) +- [bevy/crates/bevy_core_pipeline/src/skybox/skybox](#bevy/crates/bevy_core_pipeline/src/skybox/skybox) +- [bevy/crates/bevy_core_pipeline/src/taa/taa](#bevy/crates/bevy_core_pipeline/src/taa/taa) +- [bevy/crates/bevy_core_pipeline/src/contrast_adaptive_sharpening/robust_contrast_adaptive_sharpening](#bevy/crates/bevy_core_pipeline/src/contrast_adaptive_sharpening/robust_contrast_adaptive_sharpening) +- [bevy/crates/bevy_core_pipeline/src/blit/blit](#bevy/crates/bevy_core_pipeline/src/blit/blit) +- [bevy/crates/bevy_core_pipeline/src/dof/dof](#bevy/crates/bevy_core_pipeline/src/dof/dof) +- [bevy/crates/bevy_core_pipeline/src/bloom/bloom](#bevy/crates/bevy_core_pipeline/src/bloom/bloom) +- [bevy/crates/bevy_core_pipeline/src/fxaa/fxaa](#bevy/crates/bevy_core_pipeline/src/fxaa/fxaa) +- [bevy/crates/bevy_ui/src/render/ui](#bevy/crates/bevy_ui/src/render/ui) +- [bevy/crates/bevy_ui/src/render/ui_material](#bevy/crates/bevy_ui/src/render/ui_material) +- [bevy/crates/bevy_ui/src/render/ui_vertex_output](#bevy/crates/bevy_ui/src/render/ui_vertex_output) +- [bevy/crates/bevy_pbr/src/deferred/deferred_lighting](#bevy/crates/bevy_pbr/src/deferred/deferred_lighting) +- [bevy/crates/bevy_pbr/src/deferred/pbr_deferred_types](#bevy/crates/bevy_pbr/src/deferred/pbr_deferred_types) +- [bevy/crates/bevy_pbr/src/deferred/pbr_deferred_functions](#bevy/crates/bevy_pbr/src/deferred/pbr_deferred_functions) +- [bevy/crates/bevy_pbr/src/render/pbr_functions](#bevy/crates/bevy_pbr/src/render/pbr_functions) +- [bevy/crates/bevy_pbr/src/render/morph](#bevy/crates/bevy_pbr/src/render/morph) +- [bevy/crates/bevy_pbr/src/render/shadow_sampling](#bevy/crates/bevy_pbr/src/render/shadow_sampling) +- [bevy/crates/bevy_pbr/src/render/pbr_bindings](#bevy/crates/bevy_pbr/src/render/pbr_bindings) +- [bevy/crates/bevy_pbr/src/render/view_transformations](#bevy/crates/bevy_pbr/src/render/view_transformations) +- [bevy/crates/bevy_pbr/src/render/mesh_functions](#bevy/crates/bevy_pbr/src/render/mesh_functions) +- [bevy/crates/bevy_pbr/src/render/skinning](#bevy/crates/bevy_pbr/src/render/skinning) +- [bevy/crates/bevy_pbr/src/render/pbr_types](#bevy/crates/bevy_pbr/src/render/pbr_types) +- [bevy/crates/bevy_pbr/src/render/utils](#bevy/crates/bevy_pbr/src/render/utils) +- [bevy/crates/bevy_pbr/src/render/mesh_types](#bevy/crates/bevy_pbr/src/render/mesh_types) +- [bevy/crates/bevy_pbr/src/render/pbr_lighting](#bevy/crates/bevy_pbr/src/render/pbr_lighting) +- [bevy/crates/bevy_pbr/src/render/pbr](#bevy/crates/bevy_pbr/src/render/pbr) +- [bevy/crates/bevy_pbr/src/render/parallax_mapping](#bevy/crates/bevy_pbr/src/render/parallax_mapping) +- [bevy/crates/bevy_pbr/src/render/fog](#bevy/crates/bevy_pbr/src/render/fog) +- [bevy/crates/bevy_pbr/src/render/pbr_prepass](#bevy/crates/bevy_pbr/src/render/pbr_prepass) +- [bevy/crates/bevy_pbr/src/render/mesh_preprocess](#bevy/crates/bevy_pbr/src/render/mesh_preprocess) +- [bevy/crates/bevy_pbr/src/render/pbr_fragment](#bevy/crates/bevy_pbr/src/render/pbr_fragment) +- [bevy/crates/bevy_pbr/src/render/forward_io](#bevy/crates/bevy_pbr/src/render/forward_io) +- [bevy/crates/bevy_pbr/src/render/mesh_view_types](#bevy/crates/bevy_pbr/src/render/mesh_view_types) +- [bevy/crates/bevy_pbr/src/render/pbr_ambient](#bevy/crates/bevy_pbr/src/render/pbr_ambient) +- [bevy/crates/bevy_pbr/src/render/mesh](#bevy/crates/bevy_pbr/src/render/mesh) +- [bevy/crates/bevy_pbr/src/render/wireframe](#bevy/crates/bevy_pbr/src/render/wireframe) +- [bevy/crates/bevy_pbr/src/render/pbr_transmission](#bevy/crates/bevy_pbr/src/render/pbr_transmission) +- [bevy/crates/bevy_pbr/src/render/rgb9e5](#bevy/crates/bevy_pbr/src/render/rgb9e5) +- [bevy/crates/bevy_pbr/src/render/clustered_forward](#bevy/crates/bevy_pbr/src/render/clustered_forward) +- [bevy/crates/bevy_pbr/src/render/mesh_view_bindings](#bevy/crates/bevy_pbr/src/render/mesh_view_bindings) +- [bevy/crates/bevy_pbr/src/render/pbr_prepass_functions](#bevy/crates/bevy_pbr/src/render/pbr_prepass_functions) +- [bevy/crates/bevy_pbr/src/render/mesh_bindings](#bevy/crates/bevy_pbr/src/render/mesh_bindings) +- [bevy/crates/bevy_pbr/src/render/shadows](#bevy/crates/bevy_pbr/src/render/shadows) +- [bevy/crates/bevy_pbr/src/meshlet/cull_clusters](#bevy/crates/bevy_pbr/src/meshlet/cull_clusters) +- [bevy/crates/bevy_pbr/src/meshlet/fill_cluster_buffers](#bevy/crates/bevy_pbr/src/meshlet/fill_cluster_buffers) +- [bevy/crates/bevy_pbr/src/meshlet/visibility_buffer_raster](#bevy/crates/bevy_pbr/src/meshlet/visibility_buffer_raster) +- [bevy/crates/bevy_pbr/src/meshlet/meshlet_mesh_material](#bevy/crates/bevy_pbr/src/meshlet/meshlet_mesh_material) +- [bevy/crates/bevy_pbr/src/meshlet/visibility_buffer_resolve](#bevy/crates/bevy_pbr/src/meshlet/visibility_buffer_resolve) +- [bevy/crates/bevy_pbr/src/meshlet/meshlet_bindings](#bevy/crates/bevy_pbr/src/meshlet/meshlet_bindings) +- [bevy/crates/bevy_pbr/src/meshlet/copy_material_depth](#bevy/crates/bevy_pbr/src/meshlet/copy_material_depth) +- [bevy/crates/bevy_pbr/src/meshlet/dummy_visibility_buffer_resolve](#bevy/crates/bevy_pbr/src/meshlet/dummy_visibility_buffer_resolve) +- [bevy/crates/bevy_pbr/src/meshlet/downsample_depth](#bevy/crates/bevy_pbr/src/meshlet/downsample_depth) +- [bevy/crates/bevy_pbr/src/light_probe/environment_map](#bevy/crates/bevy_pbr/src/light_probe/environment_map) +- [bevy/crates/bevy_pbr/src/light_probe/irradiance_volume](#bevy/crates/bevy_pbr/src/light_probe/irradiance_volume) +- [bevy/crates/bevy_pbr/src/light_probe/light_probe](#bevy/crates/bevy_pbr/src/light_probe/light_probe) +- [bevy/crates/bevy_pbr/src/lightmap/lightmap](#bevy/crates/bevy_pbr/src/lightmap/lightmap) +- [bevy/crates/bevy_pbr/src/volumetric_fog/volumetric_fog](#bevy/crates/bevy_pbr/src/volumetric_fog/volumetric_fog) +- [bevy/crates/bevy_pbr/src/ssr/raymarch](#bevy/crates/bevy_pbr/src/ssr/raymarch) +- [bevy/crates/bevy_pbr/src/ssr/ssr](#bevy/crates/bevy_pbr/src/ssr/ssr) +- [bevy/crates/bevy_pbr/src/prepass/prepass_utils](#bevy/crates/bevy_pbr/src/prepass/prepass_utils) +- [bevy/crates/bevy_pbr/src/prepass/prepass_bindings](#bevy/crates/bevy_pbr/src/prepass/prepass_bindings) +- [bevy/crates/bevy_pbr/src/prepass/prepass_io](#bevy/crates/bevy_pbr/src/prepass/prepass_io) +- [bevy/crates/bevy_pbr/src/prepass/prepass](#bevy/crates/bevy_pbr/src/prepass/prepass) +- [bevy/crates/bevy_pbr/src/ssao/gtao](#bevy/crates/bevy_pbr/src/ssao/gtao) +- [bevy/crates/bevy_pbr/src/ssao/gtao_utils](#bevy/crates/bevy_pbr/src/ssao/gtao_utils) +- [bevy/crates/bevy_pbr/src/ssao/spatial_denoise](#bevy/crates/bevy_pbr/src/ssao/spatial_denoise) +- [bevy/crates/bevy_pbr/src/ssao/preprocess_depth](#bevy/crates/bevy_pbr/src/ssao/preprocess_depth) +- [bevy/crates/bevy_sprite/src/mesh2d/mesh2d_view_types](#bevy/crates/bevy_sprite/src/mesh2d/mesh2d_view_types) +- [bevy/crates/bevy_sprite/src/mesh2d/mesh2d_types](#bevy/crates/bevy_sprite/src/mesh2d/mesh2d_types) +- [bevy/crates/bevy_sprite/src/mesh2d/mesh2d_bindings](#bevy/crates/bevy_sprite/src/mesh2d/mesh2d_bindings) +- [bevy/crates/bevy_sprite/src/mesh2d/mesh2d_view_bindings](#bevy/crates/bevy_sprite/src/mesh2d/mesh2d_view_bindings) +- [bevy/crates/bevy_sprite/src/mesh2d/mesh2d_vertex_output](#bevy/crates/bevy_sprite/src/mesh2d/mesh2d_vertex_output) +- [bevy/crates/bevy_sprite/src/mesh2d/wireframe2d](#bevy/crates/bevy_sprite/src/mesh2d/wireframe2d) +- [bevy/crates/bevy_sprite/src/mesh2d/color_material](#bevy/crates/bevy_sprite/src/mesh2d/color_material) +- [bevy/crates/bevy_sprite/src/mesh2d/mesh2d](#bevy/crates/bevy_sprite/src/mesh2d/mesh2d) +- [bevy/crates/bevy_sprite/src/mesh2d/mesh2d_functions](#bevy/crates/bevy_sprite/src/mesh2d/mesh2d_functions) +- [bevy/crates/bevy_sprite/src/render/sprite](#bevy/crates/bevy_sprite/src/render/sprite) +- [bevy/crates/bevy_sprite/src/render/sprite_view_bindings](#bevy/crates/bevy_sprite/src/render/sprite_view_bindings) +- [bevy/crates/bevy_gizmos/src/line_joints](#bevy/crates/bevy_gizmos/src/line_joints) +- [bevy/crates/bevy_gizmos/src/lines](#bevy/crates/bevy_gizmos/src/lines) +- [bevy/crates/bevy_render/src/globals](#bevy/crates/bevy_render/src/globals) +- [bevy/crates/bevy_render/src/maths](#bevy/crates/bevy_render/src/maths) +- [bevy/crates/bevy_render/src/color_operations](#bevy/crates/bevy_render/src/color_operations) +- [bevy/crates/bevy_render/src/view/view](#bevy/crates/bevy_render/src/view/view) +- [bevy/crates/bevy_render/src/view/window/screenshot](#bevy/crates/bevy_render/src/view/window/screenshot) +- [bevy_shaders/cull_clusters](#bevy_shaders/cull_clusters) +- [bevy_shaders/smaa](#bevy_shaders/smaa) +- [bevy_shaders/sprite](#bevy_shaders/sprite) +- [bevy_shaders/taa](#bevy_shaders/taa) +- [bevy_shaders/pbr_functions](#bevy_shaders/pbr_functions) +- [bevy_shaders/environment_map](#bevy_shaders/environment_map) +- [bevy_shaders/morph](#bevy_shaders/morph) +- [bevy_shaders/gtao](#bevy_shaders/gtao) +- [bevy_shaders/shadow_sampling](#bevy_shaders/shadow_sampling) +- [bevy_shaders/custom_ui_material](#bevy_shaders/custom_ui_material) +- [bevy_shaders/ui](#bevy_shaders/ui) +- [bevy_shaders/mesh2d_view_types](#bevy_shaders/mesh2d_view_types) +- [bevy_shaders/mesh2d_types](#bevy_shaders/mesh2d_types) +- [bevy_shaders/array_texture](#bevy_shaders/array_texture) +- [bevy_shaders/fullscreen](#bevy_shaders/fullscreen) +- [bevy_shaders/view](#bevy_shaders/view) +- [bevy_shaders/pbr_bindings](#bevy_shaders/pbr_bindings) +- [bevy_shaders/texture_binding_array](#bevy_shaders/texture_binding_array) +- [bevy_shaders/fill_cluster_buffers](#bevy_shaders/fill_cluster_buffers) +- [bevy_shaders/blit](#bevy_shaders/blit) +- [bevy_shaders/fxaa](#bevy_shaders/fxaa) +- [bevy_shaders/view_transformations](#bevy_shaders/view_transformations) +- [bevy_shaders/raymarch](#bevy_shaders/raymarch) +- [bevy_shaders/motion_blur](#bevy_shaders/motion_blur) +- [bevy_shaders/ui_material](#bevy_shaders/ui_material) +- [bevy_shaders/cubemap_unlit](#bevy_shaders/cubemap_unlit) +- [bevy_shaders/mesh_functions](#bevy_shaders/mesh_functions) +- [bevy_shaders/lightmap](#bevy_shaders/lightmap) +- [bevy_shaders/custom_material](#bevy_shaders/custom_material) +- [bevy_shaders/line_joints](#bevy_shaders/line_joints) +- [bevy_shaders/irradiance_volume](#bevy_shaders/irradiance_volume) +- [bevy_shaders/mesh_vertex_output](#bevy_shaders/mesh_vertex_output) +- [bevy_shaders/skybox_prepass](#bevy_shaders/skybox_prepass) +- [bevy_shaders/deferred_lighting](#bevy_shaders/deferred_lighting) +- [bevy_shaders/skinning](#bevy_shaders/skinning) +- [bevy_shaders/visibility_buffer_raster](#bevy_shaders/visibility_buffer_raster) +- [bevy_shaders/prepass_utils](#bevy_shaders/prepass_utils) +- [bevy_shaders/pbr_types](#bevy_shaders/pbr_types) +- [bevy_shaders/post_processing](#bevy_shaders/post_processing) +- [bevy_shaders/utils](#bevy_shaders/utils) +- [bevy_shaders/gpu_readback](#bevy_shaders/gpu_readback) +- [bevy_shaders/robust_contrast_adaptive_sharpening](#bevy_shaders/robust_contrast_adaptive_sharpening) +- [bevy_shaders/lines](#bevy_shaders/lines) +- [bevy_shaders/pbr_deferred_types](#bevy_shaders/pbr_deferred_types) +- [bevy_shaders/mesh_types](#bevy_shaders/mesh_types) +- [bevy_shaders/custom_material_2d](#bevy_shaders/custom_material_2d) +- [bevy_shaders/shader_defs](#bevy_shaders/shader_defs) +- [bevy_shaders/screenshot](#bevy_shaders/screenshot) +- [bevy_shaders/pbr_lighting](#bevy_shaders/pbr_lighting) +- [bevy_shaders/pbr](#bevy_shaders/pbr) +- [bevy_shaders/skybox](#bevy_shaders/skybox) +- [bevy_shaders/tonemapping_shared](#bevy_shaders/tonemapping_shared) +- [bevy_shaders/line_material](#bevy_shaders/line_material) +- [bevy_shaders/animate_shader](#bevy_shaders/animate_shader) +- [bevy_shaders/copy_deferred_lighting_id](#bevy_shaders/copy_deferred_lighting_id) +- [bevy_shaders/bloom](#bevy_shaders/bloom) +- [bevy_shaders/parallax_mapping](#bevy_shaders/parallax_mapping) +- [bevy_shaders/extended_material](#bevy_shaders/extended_material) +- [bevy_shaders/prepass_bindings](#bevy_shaders/prepass_bindings) +- [bevy_shaders/fog](#bevy_shaders/fog) +- [bevy_shaders/pbr_prepass](#bevy_shaders/pbr_prepass) +- [bevy_shaders/meshlet_mesh_material](#bevy_shaders/meshlet_mesh_material) +- [bevy_shaders/mesh2d_bindings](#bevy_shaders/mesh2d_bindings) +- [bevy_shaders/mesh_preprocess](#bevy_shaders/mesh_preprocess) +- [bevy_shaders/mesh2d_view_bindings](#bevy_shaders/mesh2d_view_bindings) +- [bevy_shaders/custom_vertex_attribute](#bevy_shaders/custom_vertex_attribute) +- [bevy_shaders/ssr](#bevy_shaders/ssr) +- [bevy_shaders/gtao_utils](#bevy_shaders/gtao_utils) +- [bevy_shaders/globals](#bevy_shaders/globals) +- [bevy_shaders/sprite_view_bindings](#bevy_shaders/sprite_view_bindings) +- [bevy_shaders/prepass_io](#bevy_shaders/prepass_io) +- [bevy_shaders/show_prepass](#bevy_shaders/show_prepass) +- [bevy_shaders/pbr_fragment](#bevy_shaders/pbr_fragment) +- [bevy_shaders/spatial_denoise](#bevy_shaders/spatial_denoise) +- [bevy_shaders/water_material](#bevy_shaders/water_material) +- [bevy_shaders/visibility_buffer_resolve](#bevy_shaders/visibility_buffer_resolve) +- [bevy_shaders/mesh2d_vertex_output](#bevy_shaders/mesh2d_vertex_output) +- [bevy_shaders/forward_io](#bevy_shaders/forward_io) +- [bevy_shaders/meshlet_bindings](#bevy_shaders/meshlet_bindings) +- [bevy_shaders/mesh_view_types](#bevy_shaders/mesh_view_types) +- [bevy_shaders/custom_material_import](#bevy_shaders/custom_material_import) +- [bevy_shaders/copy_material_depth](#bevy_shaders/copy_material_depth) +- [bevy_shaders/lut_bindings](#bevy_shaders/lut_bindings) +- [bevy_shaders/game_of_life](#bevy_shaders/game_of_life) +- [bevy_shaders/fallback_image_test](#bevy_shaders/fallback_image_test) +- [bevy_shaders/pbr_ambient](#bevy_shaders/pbr_ambient) +- [bevy_shaders/mesh](#bevy_shaders/mesh) +- [bevy_shaders/prepass](#bevy_shaders/prepass) +- [bevy_shaders/maths](#bevy_shaders/maths) +- [bevy_shaders/wireframe2d](#bevy_shaders/wireframe2d) +- [bevy_shaders/wireframe](#bevy_shaders/wireframe) +- [bevy_shaders/dof](#bevy_shaders/dof) +- [bevy_shaders/ui_vertex_output](#bevy_shaders/ui_vertex_output) +- [bevy_shaders/dummy_visibility_buffer_resolve](#bevy_shaders/dummy_visibility_buffer_resolve) +- [bevy_shaders/irradiance_volume_voxel_visualization](#bevy_shaders/irradiance_volume_voxel_visualization) +- [bevy_shaders/volumetric_fog](#bevy_shaders/volumetric_fog) +- [bevy_shaders/custom_phase_item](#bevy_shaders/custom_phase_item) +- [bevy_shaders/write_index_buffer](#bevy_shaders/write_index_buffer) +- [bevy_shaders/circle_shader](#bevy_shaders/circle_shader) +- [bevy_shaders/color_material](#bevy_shaders/color_material) +- [bevy_shaders/pbr_transmission](#bevy_shaders/pbr_transmission) +- [bevy_shaders/rgb9e5](#bevy_shaders/rgb9e5) +- [bevy_shaders/cull_meshlets](#bevy_shaders/cull_meshlets) +- [bevy_shaders/clustered_forward](#bevy_shaders/clustered_forward) +- [bevy_shaders/mesh2d](#bevy_shaders/mesh2d) +- [bevy_shaders/instance_index](#bevy_shaders/instance_index) +- [bevy_shaders/tonemapping](#bevy_shaders/tonemapping) +- [bevy_shaders/color_operations](#bevy_shaders/color_operations) +- [bevy_shaders/instancing](#bevy_shaders/instancing) +- [bevy_shaders/mesh_view_bindings](#bevy_shaders/mesh_view_bindings) +- [bevy_shaders/preprocess_depth](#bevy_shaders/preprocess_depth) +- [bevy_shaders/tonemapping_test_patterns](#bevy_shaders/tonemapping_test_patterns) +- [bevy_shaders/custom_gltf_2d](#bevy_shaders/custom_gltf_2d) +- [bevy_shaders/light_probe](#bevy_shaders/light_probe) +- [bevy_shaders/pbr_deferred_functions](#bevy_shaders/pbr_deferred_functions) +- [bevy_shaders/pbr_prepass_functions](#bevy_shaders/pbr_prepass_functions) +- [bevy_shaders/mesh_bindings](#bevy_shaders/mesh_bindings) +- [bevy_shaders/mesh2d_functions](#bevy_shaders/mesh2d_functions) +- [bevy_shaders/shadows](#bevy_shaders/shadows) +- [bevy_shaders/downsample_depth](#bevy_shaders/downsample_depth) +- [bevy_shaders/custom_material_screenspace_texture](#bevy_shaders/custom_material_screenspace_texture) +- [bevy_shaders/auto_exposure](#bevy_shaders/auto_exposure) +- [src/shader_utils/common](#src/shader_utils/common) +- [screenshots/11-10-23/19-40-28/screeenshot](#screenshots/11-10-23/19-40-28/screeenshot) +- [screenshots/11-10-23/19-08-42/screeenshot](#screenshots/11-10-23/19-08-42/screeenshot) +- [screenshots/08-11-23/22-29-33/screenshot](#screenshots/08-11-23/22-29-33/screenshot) +- [screenshots/08-11-23/22-29-35/screenshot](#screenshots/08-11-23/22-29-35/screenshot) +- [screenshots/08-11-23/22-29-32/screenshot](#screenshots/08-11-23/22-29-32/screenshot) +- [screenshots/14-10-23/12-28-54/screenshot](#screenshots/14-10-23/12-28-54/screenshot) +- [screenshots/14-10-23/12-28-55/screenshot](#screenshots/14-10-23/12-28-55/screenshot) +- [screenshots/27-01-24/21-40-42/screenshot](#screenshots/27-01-24/21-40-42/screenshot) +- [screenshots/17-11-23/06-01-11/screenshot](#screenshots/17-11-23/06-01-11/screenshot) +- [screenshots/17-11-23/16-49-11/screenshot](#screenshots/17-11-23/16-49-11/screenshot) +- [screenshots/22-10-23/19-39-14/screenshot](#screenshots/22-10-23/19-39-14/screenshot) +- [screenshots/21-01-24/18-50-41/screenshot](#screenshots/21-01-24/18-50-41/screenshot) +- [screenshots/02-11-23/07-48-05/screenshot](#screenshots/02-11-23/07-48-05/screenshot) +- [screenshots/16-11-23/21-23-45/screenshot](#screenshots/16-11-23/21-23-45/screenshot) +- [screenshots/16-11-23/21-25-04/screenshot](#screenshots/16-11-23/21-25-04/screenshot) +- [screenshots/09-06-24/20-43-57/screenshot](#screenshots/09-06-24/20-43-57/screenshot) +- [screenshots/09-10-23/07-22-16/screeenshot](#screenshots/09-10-23/07-22-16/screeenshot) +- [screenshots/07-10-23/11-16-48/screeenshot](#screenshots/07-10-23/11-16-48/screeenshot) +- [screenshots/07-10-23/aspect_ratio_correction_showed_with_circle/screeenshot](#screenshots/07-10-23/aspect_ratio_correction_showed_with_circle/screeenshot) +- [screenshots/07-10-23/12-03-36/screeenshot](#screenshots/07-10-23/12-03-36/screeenshot) +- [screenshots/07-10-23/21-43-49/screeenshot](#screenshots/07-10-23/21-43-49/screeenshot) +- [screenshots/24-10-23/21-14-59/screenshot](#screenshots/24-10-23/21-14-59/screenshot) +- [screenshots/24-01-24/06-51-53/screenshot](#screenshots/24-01-24/06-51-53/screenshot) +- [screenshots/24-01-24/06-36-04/screenshot](#screenshots/24-01-24/06-36-04/screenshot) +- [screenshots/24-01-24/06-37-20/screenshot](#screenshots/24-01-24/06-37-20/screenshot) +- [screenshots/24-01-24/06-25-45/screenshot](#screenshots/24-01-24/06-25-45/screenshot) +- [screenshots/24-01-24/06-36-05/screenshot](#screenshots/24-01-24/06-36-05/screenshot) +- [screenshots/24-01-24/06-36-03/screenshot](#screenshots/24-01-24/06-36-03/screenshot) +- [screenshots/27-11-23/20-44-03/screenshot](#screenshots/27-11-23/20-44-03/screenshot) + +### all_wgsl ```rust -// TODO use common view binding -#import bevy_render::view::View - -@group(0) @binding(0) var view: View; - - -struct LineGizmoUniform { - line_width: f32, - depth_bias: f32, -#ifdef SIXTEEN_BYTE_ALIGNMENT - // WebGL2 structs must be 16 byte aligned. - _padding: vec2, -#endif -} +#import bevy_pbr::mesh_vertex_output VertexOutput +#import bevy_pbr::mesh_view_bindings view +#import bevy_pbr::pbr_types STANDARD_MATERIAL_FLAGS_DOUBLE_SIDED_BIT +#import bevy_core_pipeline::tonemapping tone_mapping +#import bevy_pbr::pbr_functions as fns -@group(1) @binding(0) var line_gizmo: LineGizmoUniform; +@group(1) @binding(0) var my_array_texture: texture_2d_array; +@group(1) @binding(1) var my_array_texture_sampler: sampler; -struct VertexInput { - @location(0) position_a: vec3, - @location(1) position_b: vec3, - @location(2) color_a: vec4, - @location(3) color_b: vec4, - @builtin(vertex_index) index: u32, -}; +@fragment +fn fragment( + @builtin(front_facing) is_front: bool, + mesh: VertexOutput, +) -> @location(0) vec4 { + let layer = i32(mesh.world_position.x) & 0x3; -struct VertexOutput { - @builtin(position) clip_position: vec4, - @location(0) color: vec4, - @location(1) uv: f32, -}; + // Prepare a 'processed' StandardMaterial by sampling all textures to resolve + // the material members + var pbr_input: fns::PbrInput = fns::pbr_input_new(); -const EPSILON: f32 = 4.88e-04; + pbr_input.material.base_color = texture_sample(my_array_texture, my_array_texture_sampler, mesh.uv, layer); +#ifdef VERTEX_COLORS + pbr_input.material.base_color = pbr_input.material.base_color * mesh.color; +#endif -@vertex -fn vertex(vertex: VertexInput) -> VertexOutput { - var positions = array, 6>( - vec2(-0.5, 0.), - vec2(-0.5, 1.), - vec2(0.5, 1.), - vec2(-0.5, 0.), - vec2(0.5, 1.), - vec2(0.5, 0.) + pbr_input.frag_coord = mesh.position; + pbr_input.world_position = mesh.world_position; + pbr_input.world_normal = fns::prepare_world_normal( + mesh.world_normal, + (pbr_input.material.flags & STANDARD_MATERIAL_FLAGS_DOUBLE_SIDED_BIT) != 0u, + is_front, ); - let position = positions[vertex.index]; - - // algorithm based on https://wwwtyro.net/2019/11/18/instanced-lines.html - var clip_a = view.view_proj * vec4(vertex.position_a, 1.); - var clip_b = view.view_proj * vec4(vertex.position_b, 1.); - - // Manual near plane clipping to avoid errors when doing the perspective divide inside this shader. - clip_a = clip_near_plane(clip_a, clip_b); - clip_b = clip_near_plane(clip_b, clip_a); - let clip = mix(clip_a, clip_b, position.y); - - let resolution = view.viewport.zw; - let screen_a = resolution * (0.5 * clip_a.xy / clip_a.w + 0.5); - let screen_b = resolution * (0.5 * clip_b.xy / clip_b.w + 0.5); - - let y_basis = normalize(screen_b - screen_a); - let x_basis = vec2(-y_basis.y, y_basis.x); - - var color = mix(vertex.color_a, vertex.color_b, position.y); - var line_width = line_gizmo.line_width; - var alpha = 1.; - - var uv: f32; -#ifdef PERSPECTIVE - line_width /= clip.w; + pbr_input.is_orthographic = view.projection[3].w == 1.0; - // get height of near clipping plane in world space - let pos0 = view.inverse_projection * vec4(0, -1, 0, 1); // Bottom of the screen - let pos1 = view.inverse_projection * vec4(0, 1, 0, 1); // Top of the screen - let near_clipping_plane_height = length(pos0.xyz - pos1.xyz); + pbr_input.N = fns::apply_normal_mapping( + pbr_input.material.flags, + mesh.world_normal, +#ifdef VERTEX_TANGENTS +#ifdef STANDARDMATERIAL_NORMAL_MAP + mesh.world_tangent, +#endif +#endif + mesh.uv, + view.mip_bias, + ); + pbr_input.V = fns::calculate_view(mesh.world_position, pbr_input.is_orthographic); - // We can't use vertex.position_X because we may have changed the clip positions with clip_near_plane - let position_a = view.inverse_view_proj * clip_a; - let position_b = view.inverse_view_proj * clip_b; - let world_distance = length(position_a.xyz - position_b.xyz); + return tone_mapping(fns::pbr(pbr_input), view.color_grading); +} +#import bevy_pbr::mesh_view_bindings +#import bevy_pbr::mesh_bindings +#import bevy_pbr::forward_io::VertexOutput - // Offset to compensate for moved clip positions. If removed dots on lines will slide when position a is ofscreen. - let clipped_offset = length(position_a.xyz - vertex.position_a); +@group(1) @binding(0) var test_texture_1d: texture_1d; +@group(1) @binding(1) var test_texture_1d_sampler: sampler; - uv = (clipped_offset + position.y * world_distance) * resolution.y / near_clipping_plane_height / line_gizmo.line_width; -#else - // Get the distance of b to the camera along camera axes - let camera_b = view.inverse_projection * clip_b; +@group(1) @binding(2) var test_texture_2d: texture_2d; +@group(1) @binding(3) var test_texture_2d_sampler: sampler; - // This differentiates between orthographic and perspective cameras. - // For orthographic cameras no depth adaptment (depth_adaptment = 1) is needed. - var depth_adaptment: f32; - if (clip_b.w == 1.0) { - depth_adaptment = 1.0; - } - else { - depth_adaptment = -camera_b.z; - } - uv = position.y * depth_adaptment * length(screen_b - screen_a) / line_gizmo.line_width; -#endif +@group(1) @binding(4) var test_texture_2d_array: texture_2d_array; +@group(1) @binding(5) var test_texture_2d_array_sampler: sampler; - // Line thinness fade from https://acegikmo.com/shapes/docs/#anti-aliasing - if line_width > 0.0 && line_width < 1. { - color.a *= line_width; - line_width = 1.; - } +@group(1) @binding(6) var test_texture_cube: texture_cube; +@group(1) @binding(7) var test_texture_cube_sampler: sampler; - let x_offset = line_width * position.x * x_basis; - let screen = mix(screen_a, screen_b, position.y) + x_offset; +@group(1) @binding(8) var test_texture_cube_array: texture_cube_array; +@group(1) @binding(9) var test_texture_cube_array_sampler: sampler; - var depth: f32; - if line_gizmo.depth_bias >= 0. { - depth = clip.z * (1. - line_gizmo.depth_bias); - } else { - // depth * (clip.w / depth)^-depth_bias. So that when -depth_bias is 1.0, this is equal to clip.w - // and when equal to 0.0, it is exactly equal to depth. - // the epsilon is here to prevent the depth from exceeding clip.w when -depth_bias = 1.0 - // clip.w represents the near plane in homogeneous clip space in bevy, having a depth - // of this value means nothing can be in front of this - // The reason this uses an exponential function is that it makes it much easier for the - // user to chose a value that is convenient for them - depth = clip.z * exp2(-line_gizmo.depth_bias * log2(clip.w / clip.z - EPSILON)); - } +@group(1) @binding(10) var test_texture_3d: texture_3d; +@group(1) @binding(11) var test_texture_3d_sampler: sampler; - var clip_position = vec4(clip.w * ((2. * screen) / resolution - 1.), depth, clip.w); +@fragment +fn fragment(in: VertexOutput) {} +#import bevy_pbr::mesh_view_bindings view +#import bevy_pbr::mesh_vertex_output VertexOutput +#import bevy_pbr::utils coords_to_viewport_uv - return VertexOutput(clip_position, color, uv); -} +@group(1) @binding(0) var texture: texture_2d; +@group(1) @binding(1) var texture_sampler: sampler; -fn clip_near_plane(a: vec4, b: vec4) -> vec4 { - // Move a if a is behind the near plane and b is in front. - if a.z > a.w && b.z <= b.w { - // Interpolate a towards b until it's at the near plane. - let distance_a = a.z - a.w; - let distance_b = b.z - b.w; - // Add an epsilon to the interpolator to ensure that the point is - // not just behind the clip plane due to floating-point imprecision. - let t = distance_a / (distance_a - distance_b) + EPSILON; - return mix(a, b, t); - } - return a; +@fragment +fn fragment( + mesh: VertexOutput, +) -> @location(0) vec4 { + let viewport_uv = coords_to_viewport_uv(mesh.position.xy, view.viewport); + let color = texture_sample(texture, texture_sampler, viewport_uv); + return color; } +#import bevy_pbr::forward_io::VertexOutput -struct FragmentInput { - @builtin(position) position: vec4, - @location(0) color: vec4, - @location(1) uv: f32, +struct CustomMaterial { + color: vec4, }; -struct FragmentOutput { - @location(0) color: vec4, -}; +@group(1) @binding(0) var material: CustomMaterial; +@group(1) @binding(1) var base_color_texture: texture_2d; +@group(1) @binding(2) var base_color_sampler: sampler; @fragment -fn fragment_solid(in: FragmentInput) -> FragmentOutput { - return FragmentOutput(in.color); +fn fragment( + mesh: VertexOutput, +) -> @location(0) vec4 { + return material.color * texture_sample(base_color_texture, base_color_sampler, mesh.uv); } -@fragment -fn fragment_dotted(in: FragmentInput) -> FragmentOutput { - var alpha: f32; -#ifdef PERSPECTIVE - alpha = 1 - floor(in.uv % 2.0); -#else - alpha = 1 - floor((in.uv * in.position.w) % 2.0); +#import bevy_pbr::mesh_view_bindings +#import bevy_pbr::mesh_bindings +#import bevy_pbr::mesh_vertex_output VertexOutput +#import bevy_pbr::utils PI + +#ifdef TONEMAP_IN_SHADER +#import bevy_core_pipeline::tonemapping tone_mapping #endif + +// Sweep across hues on y axis with value from 0.0 to +15EV across x axis +// quantized into 24 steps for both axis. +fn color_sweep(uv: vec2) -> vec3 { + var uv = uv; + let steps = 24.0; + uv.y = uv.y * (1.0 + 1.0 / steps); + let ratio = 2.0; - return FragmentOutput(vec4(in.color.xyz, in.color.w * alpha)); + let h = PI * 2.0 * floor(1.0 + steps * uv.y) / steps; + let L = floor(uv.x * steps * ratio) / (steps * ratio) - 0.5; + + var color = vec3(0.0); + if uv.y < 1.0 { + color = cos(h + vec3(0.0, 1.0, 2.0) * PI * 2.0 / 3.0); + let max_rgb = max(color.r, max(color.g, color.b)); + let min_rgb = min(color.r, min(color.g, color.b)); + color = exp(15.0 * L) * (color - min_rgb) / (max_rgb - min_rgb); + } else { + color = vec3(exp(15.0 * L)); + } + return color; } -``` - -### crates/bevy_gizmos/src/line_joints - -```rust -#import bevy_render::view::View - -@group(0) @binding(0) var view: View; +fn hsv_to_srgb(c: vec3) -> vec3 { + let K = vec4(1.0, 2.0 / 3.0, 1.0 / 3.0, 3.0); + let p = abs(fract(c.xxx + K.xyz) * 6.0 - K.www); + return c.z * mix(K.xxx, clamp(p - K.xxx, vec3(0.0), vec3(1.0)), c.y); +} +// Generates a continuous sRGB sweep. +fn continuous_hue(uv: vec2) -> vec3 { + return hsv_to_srgb(vec3(uv.x, 1.0, 1.0)) * max(0.0, exp2(uv.y * 9.0) - 1.0); +} -struct LineGizmoUniform { - line_width: f32, - depth_bias: f32, - resolution: u32, -#ifdef SIXTEEN_BYTE_ALIGNMENT - // WebGL2 structs must be 16 byte aligned. - _padding: f32, +@fragment +fn fragment( + in: VertexOutput, +) -> @location(0) vec4 { + var uv = in.uv; + var out = vec3(0.0); + if uv.y > 0.5 { + uv.y = 1.0 - uv.y; + out = color_sweep(vec2(uv.x, uv.y * 2.0)); + } else { + out = continuous_hue(vec2(uv.y * 2.0, uv.x)); + } + var color = vec4(out, 1.0); +#ifdef TONEMAP_IN_SHADER + color = tone_mapping(color, bevy_pbr::mesh_view_bindings::view.color_grading); #endif + return color; } +#import bevy_pbr::mesh_bindings mesh +#import bevy_pbr::mesh_functions get_model_matrix, mesh_position_local_to_clip -@group(1) @binding(0) var joints_gizmo: LineGizmoUniform; +struct CustomMaterial { + color: vec4, +}; +@group(1) @binding(0) var material: CustomMaterial; -struct VertexInput { - @location(0) position_a: vec3, - @location(1) position_b: vec3, - @location(2) position_c: vec3, - @location(3) color: vec4, - @builtin(vertex_index) index: u32, +struct Vertex { + @builtin(instance_index) instance_index: u32, + @location(0) position: vec3, + @location(1) blend_color: vec4, }; struct VertexOutput { @builtin(position) clip_position: vec4, - @location(0) color: vec4, + @location(0) blend_color: vec4, }; -const EPSILON: f32 = 4.88e-04; - @vertex -fn vertex_bevel(vertex: VertexInput) -> VertexOutput { - var positions = array, 3>( - vec2(0, 0), - vec2(0, 0.5), - vec2(0.5, 0), +fn vertex(vertex: Vertex) -> VertexOutput { + var out: VertexOutput; + out.clip_position = mesh_position_local_to_clip( + get_model_matrix(vertex.instance_index), + vec4(vertex.position, 1.0), ); - var position = positions[vertex.index]; + out.blend_color = vertex.blend_color; + return out; +} - var clip_a = view.view_proj * vec4(vertex.position_a, 1.); - var clip_b = view.view_proj * vec4(vertex.position_b, 1.); - var clip_c = view.view_proj * vec4(vertex.position_c, 1.); +struct FragmentInput { + @location(0) blend_color: vec4, +}; - // Manual near plane clipping to avoid errors when doing the perspective divide inside this shader. - clip_a = clip_near_plane(clip_a, clip_c); - clip_b = clip_near_plane(clip_b, clip_a); - clip_c = clip_near_plane(clip_c, clip_b); - clip_a = clip_near_plane(clip_a, clip_c); +@fragment +fn fragment(input: FragmentInput) -> @location(0) vec4 { + return material.color * input.blend_color; +} +// This shader computes the chromatic aberration effect - let resolution = view.viewport.zw; - let screen_a = resolution * (0.5 * clip_a.xy / clip_a.w + 0.5); - let screen_b = resolution * (0.5 * clip_b.xy / clip_b.w + 0.5); - let screen_c = resolution * (0.5 * clip_c.xy / clip_c.w + 0.5); +#import bevy_pbr::utils - var color = vertex.color; - var line_width = joints_gizmo.line_width; +// Since post processing is a fullscreen effect, we use the fullscreen vertex shader provided by bevy. +// This will import a vertex shader that renders a single fullscreen triangle. +// +// A fullscreen triangle is a single triangle that covers the entire screen. +// The box in the top left in that diagram is the screen. The 4 x are the corner of the screen +// +// Y axis +// 1 | x-----x...... +// 0 | | s | . ´ +// -1 | x_____x´ +// -2 | : .´ +// -3 | :´ +// +--------------- X axis +// -1 0 1 2 3 +// +// As you can see, the triangle ends up bigger than the screen. +// +// You don't need to worry about this too much since bevy will compute the correct UVs for you. +#import bevy_core_pipeline::fullscreen_vertex_shader FullscreenVertexOutput -#ifdef PERSPECTIVE - line_width /= clip_b.w; +@group(0) @binding(0) var screen_texture: texture_2d; +@group(0) @binding(1) var texture_sampler: sampler; +struct PostProcessSettings { + intensity: f32, +#ifdef SIXTEEN_BYTE_ALIGNMENT + // WebGL2 structs must be 16 byte aligned. + _webgl2_padding: vec3 #endif - - // Line thinness fade from https://acegikmo.com/shapes/docs/#anti-aliasing - if line_width > 0.0 && line_width < 1. { - color.a *= line_width; - line_width = 1.; - } - - let ab = normalize(screen_b - screen_a); - let cb = normalize(screen_b - screen_c); - let ab_norm = vec2(-ab.y, ab.x); - let cb_norm = vec2(cb.y, -cb.x); - let tangent = normalize(ab - cb); - let normal = vec2(-tangent.y, tangent.x); - let sigma = sign(dot(ab + cb, normal)); - - var p0 = line_width * sigma * ab_norm; - var p1 = line_width * sigma * cb_norm; - - let screen = screen_b + position.x * p0 + position.y * p1; - - let depth = depth(clip_b); - - var clip_position = vec4(clip_b.w * ((2. * screen) / resolution - 1.), depth, clip_b.w); - return VertexOutput(clip_position, color); } +@group(0) @binding(2) var settings: PostProcessSettings; -@vertex -fn vertex_miter(vertex: VertexInput) -> VertexOutput { - var positions = array, 6>( - vec3(0, 0, 0), - vec3(0.5, 0, 0), - vec3(0, 0.5, 0), - vec3(0, 0, 0), - vec3(0, 0.5, 0), - vec3(0, 0, 0.5), +@fragment +fn fragment(in: FullscreenVertexOutput) -> @location(0) vec4 { + // Chromatic aberration strength + let offset_strength = settings.intensity; + + // Sample each color channel with an arbitrary shift + return vec4( + texture_sample(screen_texture, texture_sampler, in.uv + vec2(offset_strength, -offset_strength)).r, + texture_sample(screen_texture, texture_sampler, in.uv + vec2(-offset_strength, 0.0)).g, + texture_sample(screen_texture, texture_sampler, in.uv + vec2(0.0, offset_strength)).b, + 1.0 ); - var position = positions[vertex.index]; - - var clip_a = view.view_proj * vec4(vertex.position_a, 1.); - var clip_b = view.view_proj * vec4(vertex.position_b, 1.); - var clip_c = view.view_proj * vec4(vertex.position_c, 1.); +} - // Manual near plane clipping to avoid errors when doing the perspective divide inside this shader. - clip_a = clip_near_plane(clip_a, clip_c); - clip_b = clip_near_plane(clip_b, clip_a); - clip_c = clip_near_plane(clip_c, clip_b); - clip_a = clip_near_plane(clip_a, clip_c); +#import bevy_pbr::forward_io::VertexOutput - let resolution = view.viewport.zw; - let screen_a = resolution * (0.5 * clip_a.xy / clip_a.w + 0.5); - let screen_b = resolution * (0.5 * clip_b.xy / clip_b.w + 0.5); - let screen_c = resolution * (0.5 * clip_c.xy / clip_c.w + 0.5); +struct CustomMaterial { + color: vec4, +}; - var color = vertex.color; - var line_width = joints_gizmo.line_width; +@group(1) @binding(0) var material: CustomMaterial; -#ifdef PERSPECTIVE - line_width /= clip_b.w; +@fragment +fn fragment( + mesh: VertexOutput, +) -> @location(0) vec4 { +#ifdef IS_RED + return vec4(1.0, 0.0, 0.0, 1.0); +#else + return material.color; #endif +} +@group(0) @binding(0) var texture: texture_storage_2d; - // Line thinness fade from https://acegikmo.com/shapes/docs/#anti-aliasing - if line_width > 0.0 && line_width < 1. { - color.a *= line_width; - line_width = 1.; - } +fn hash(value: u32) -> u32 { + var state = value; + state = state ^ 2747636419u; + state = state * 2654435769u; + state = state ^ state >> 16u; + state = state * 2654435769u; + state = state ^ state >> 16u; + state = state * 2654435769u; + return state; +} - let ab = normalize(screen_b - screen_a); - let cb = normalize(screen_b - screen_c); - let ab_norm = vec2(-ab.y, ab.x); - let cb_norm = vec2(cb.y, -cb.x); - let tangent = normalize(ab - cb); - let normal = vec2(-tangent.y, tangent.x); - let sigma = sign(dot(ab + cb, normal)); +fn random_float(value: u32) -> f32 { + return f32(hash(value)) / 4294967295.0; +} - var p0 = line_width * sigma * ab_norm; - var p1 = line_width * sigma * normal / dot(normal, ab_norm); - var p2 = line_width * sigma * cb_norm; - - var screen = screen_b + position.x * p0 + position.y * p1 + position.z * p2; +@compute @workgroup_size(8, 8, 1) +fn init(@builtin(global_invocation_id) invocation_id: vec3, @builtin(num_workgroups) num_workgroups: vec3) { + let location = vec2(i32(invocation_id.x), i32(invocation_id.y)); - var depth = depth(clip_b); + let random_number = random_float(invocation_id.y * num_workgroups.x + invocation_id.x); + let alive = random_number > 0.9; + let color = vec4(f32(alive)); - var clip_position = vec4(clip_b.w * ((2. * screen) / resolution - 1.), depth, clip_b.w); - return VertexOutput(clip_position, color); + texture_store(texture, location, color); } -@vertex -fn vertex_round(vertex: VertexInput) -> VertexOutput { - var clip_a = view.view_proj * vec4(vertex.position_a, 1.); - var clip_b = view.view_proj * vec4(vertex.position_b, 1.); - var clip_c = view.view_proj * vec4(vertex.position_c, 1.); - - // Manual near plane clipping to avoid errors when doing the perspective divide inside this shader. - clip_a = clip_near_plane(clip_a, clip_c); - clip_b = clip_near_plane(clip_b, clip_a); - clip_c = clip_near_plane(clip_c, clip_b); - clip_a = clip_near_plane(clip_a, clip_c); +fn is_alive(location: vec2, offset_x: i32, offset_y: i32) -> i32 { + let value: vec4 = texture_load(texture, location + vec2(offset_x, offset_y)); + return i32(value.x); +} - let resolution = view.viewport.zw; - let screen_a = resolution * (0.5 * clip_a.xy / clip_a.w + 0.5); - let screen_b = resolution * (0.5 * clip_b.xy / clip_b.w + 0.5); - let screen_c = resolution * (0.5 * clip_c.xy / clip_c.w + 0.5); +fn count_alive(location: vec2) -> i32 { + return is_alive(location, -1, -1) + + is_alive(location, -1, 0) + + is_alive(location, -1, 1) + + is_alive(location, 0, -1) + + is_alive(location, 0, 1) + + is_alive(location, 1, -1) + + is_alive(location, 1, 0) + + is_alive(location, 1, 1); +} - var color = vertex.color; - var line_width = joints_gizmo.line_width; +@compute @workgroup_size(8, 8, 1) +fn update(@builtin(global_invocation_id) invocation_id: vec3) { + let location = vec2(i32(invocation_id.x), i32(invocation_id.y)); -#ifdef PERSPECTIVE - line_width /= clip_b.w; -#endif + let n_alive = count_alive(location); - // Line thinness fade from https://acegikmo.com/shapes/docs/#anti-aliasing - if line_width > 0.0 && line_width < 1. { - color.a *= line_width; - line_width = 1.; + var alive: bool; + if (n_alive == 3) { + alive = true; + } else if (n_alive == 2) { + let currently_alive = is_alive(location, 0, 0); + alive = bool(currently_alive); + } else { + alive = false; } + let color = vec4(f32(alive)); - let ab = normalize(screen_b - screen_a); - let cb = normalize(screen_b - screen_c); - let ab_norm = vec2(-ab.y, ab.x); - let cb_norm = vec2(cb.y, -cb.x); + storage_barrier(); - // We render `joints_gizmo.resolution`triangles. The vertices in each triangle are ordered as follows: - // - 0: The 'center' vertex at `screen_b`. - // - 1: The vertex closer to the ab line. - // - 2: The vertex closer to the cb line. - var in_triangle_index = f32(vertex.index) % 3.0; - var tri_index = floor(f32(vertex.index) / 3.0); - var radius = sign(in_triangle_index) * 0.5 * line_width; - var theta = acos(dot(ab_norm, cb_norm)); - let sigma = sign(dot(ab_norm, cb)); - var angle = theta * (tri_index + in_triangle_index - 1) / f32(joints_gizmo.resolution); - var position_x = sigma * radius * cos(angle); - var position_y = radius * sin(angle); + texture_store(texture, location, color); +}#import bevy_pbr::forward_io::VertexOutput - var screen = screen_b + position_x * ab_norm + position_y * ab; +#ifdef CUBEMAP_ARRAY +@group(1) @binding(0) var base_color_texture: texture_cube_array; +#else +@group(1) @binding(0) var base_color_texture: texture_cube; +#endif - var depth = depth(clip_b); +@group(1) @binding(1) var base_color_sampler: sampler; - var clip_position = vec4(clip_b.w * ((2. * screen) / resolution - 1.), depth, clip_b.w); - return VertexOutput(clip_position, color); +@fragment +fn fragment( + mesh: VertexOutput, +) -> @location(0) vec4 { + let fragment_position_view_lh = mesh.world_position.xyz * vec3(1.0, 1.0, -1.0); + return texture_sample( + base_color_texture, + base_color_sampler, + fragment_position_view_lh + ); } +#import bevy_pbr::forward_io::VertexOutput -fn clip_near_plane(a: vec4, b: vec4) -> vec4 { - // Move a if a is behind the near plane and b is in front. - if a.z > a.w && b.z <= b.w { - // Interpolate a towards b until it's at the near plane. - let distance_a = a.z - a.w; - let distance_b = b.z - b.w; - // Add an epsilon to the interpolator to ensure that the point is - // not just behind the clip plane due to floating-point imprecision. - let t = distance_a / (distance_a - distance_b) + EPSILON; - return mix(a, b, t); - } - return a; -} +struct LineMaterial { + color: vec4, +}; -fn depth(clip: vec4) -> f32 { - var depth: f32; - if joints_gizmo.depth_bias >= 0. { - depth = clip.z * (1. - joints_gizmo.depth_bias); - } else { - // depth * (clip.w / depth)^-depth_bias. So that when -depth_bias is 1.0, this is equal to clip.w - // and when equal to 0.0, it is exactly equal to depth. - // the epsilon is here to prevent the depth from exceeding clip.w when -depth_bias = 1.0 - // clip.w represents the near plane in homogeneous clip space in bevy, having a depth - // of this value means nothing can be in front of this - // The reason this uses an exponential function is that it makes it much easier for the - // user to chose a value that is convenient for them - depth = clip.z * exp2(-joints_gizmo.depth_bias * log2(clip.w / clip.z - EPSILON)); - } - return depth; +@group(1) @binding(0) var material: LineMaterial; + +@fragment +fn fragment( + mesh: VertexOutput, +) -> @location(0) vec4 { + return material.color; } +#import bevy_pbr::mesh_functions get_model_matrix, mesh_position_local_to_clip +#import bevy_pbr::mesh_bindings mesh -struct FragmentInput { - @location(0) color: vec4, +struct Vertex { + @location(0) position: vec3, + @location(1) normal: vec3, + @location(2) uv: vec2, + + @location(3) i_pos_scale: vec4, + @location(4) i_color: vec4, }; -struct FragmentOutput { +struct VertexOutput { + @builtin(position) clip_position: vec4, @location(0) color: vec4, }; +@vertex +fn vertex(vertex: Vertex) -> VertexOutput { + let position = vertex.position * vertex.i_pos_scale.w + vertex.i_pos_scale.xyz; + var out: VertexOutput; + // NOTE: Passing 0 as the instance_index to get_model_matrix() is a hack + // for this example as the instance_index builtin would map to the wrong + // index in the Mesh array. This index could be passed in via another + // uniform instead but it's unnecessary for the example. + out.clip_position = mesh_position_local_to_clip( + get_model_matrix(0u), + vec4(position, 1.0) + ); + out.color = vertex.i_color; + return out; +} + @fragment -fn fragment(in: FragmentInput) -> FragmentOutput { - // return FragmentOutput(vec4(1, 1, 1, 1)); - return FragmentOutput(in.color); +fn fragment(in: VertexOutput) -> @location(0) vec4 { + return in.color; } -``` +#import bevy_pbr::forward_io::VertexOutput -### crates/bevy_sprite/src/render/sprite +@group(1) @binding(0) var textures: binding_array>; +@group(1) @binding(1) var nearest_sampler: sampler; +// We can also have array of samplers +// var samplers: binding_array; -```rust -#ifdef TONEMAP_IN_SHADER -#import bevy_core_pipeline::tonemapping -#endif +@fragment +fn fragment( + mesh: VertexOutput, +) -> @location(0) vec4 { + // Select the texture to sample from using non-uniform uv coordinates + let coords = clamp(vec2(mesh.uv * 4.0), vec2(0u), vec2(3u)); + let index = coords.y * 4u + coords.x; + let inner_uv = fract(mesh.uv * 4.0); + return texture_sample(textures[index], nearest_sampler, inner_uv); +} +#import bevy_pbr::mesh_types +#import bevy_pbr::mesh_view_bindings globals +#import bevy_pbr::prepass_utils +#import bevy_pbr::mesh_vertex_output VertexOutput -#import bevy_render::{ - maths::affine3_to_square, - view::View, +struct ShowPrepassSettings { + show_depth: u32, + show_normals: u32, + show_motion_vectors: u32, + padding_1: u32, + padding_2: u32, } +@group(1) @binding(0) var settings: ShowPrepassSettings; -@group(0) @binding(0) var view: View; +@fragment +fn fragment( +#ifdef MULTISAMPLED + @builtin(sample_index) sample_index: u32, +#endif + mesh: VertexOutput, +) -> @location(0) vec4 { +#ifndef MULTISAMPLED + let sample_index = 0u; +#endif + if settings.show_depth == 1u { + let depth = bevy_pbr::prepass_utils::prepass_depth(mesh.position, sample_index); + return vec4(depth, depth, depth, 1.0); + } else if settings.show_normals == 1u { + let normal = bevy_pbr::prepass_utils::prepass_normal(mesh.position, sample_index); + return vec4(normal, 1.0); + } else if settings.show_motion_vectors == 1u { + let motion_vector = bevy_pbr::prepass_utils::prepass_motion_vector(mesh.position, sample_index); + return vec4(motion_vector / globals.delta_time, 0.0, 1.0); + } -struct VertexInput { - @builtin(vertex_index) index: u32, - // NOTE: Instance-rate vertex buffer members prefixed with i_ - // NOTE: i_model_transpose_colN are the 3 columns of a 3x4 matrix that is the transpose of the - // affine 4x3 model matrix. - @location(0) i_model_transpose_col0: vec4, - @location(1) i_model_transpose_col1: vec4, - @location(2) i_model_transpose_col2: vec4, - @location(3) i_color: vec4, - @location(4) i_uv_offset_scale: vec4, + return vec4(0.0); } +// If using this WGSL snippet as an #import, the following should be in scope: +// +// - the `morph_weights` uniform of type `MorphWeights` +// - the `morph_targets` 3d texture +// +// They are defined in `mesh_types.wgsl` and `mesh_bindings.wgsl`. -struct VertexOutput { - @builtin(position) clip_position: vec4, - @location(0) uv: vec2, - @location(1) @interpolate(flat) color: vec4, -}; +#define_import_path bevy_pbr::morph -@vertex -fn vertex(in: VertexInput) -> VertexOutput { - var out: VertexOutput; +#ifdef MORPH_TARGETS - let vertex_position = vec3( - f32(in.index & 0x1u), - f32((in.index & 0x2u) >> 1u), - 0.0 - ); +#import bevy_pbr::mesh_types MorphWeights - out.clip_position = view.view_proj * affine3_to_square(mat3x4( - in.i_model_transpose_col0, - in.i_model_transpose_col1, - in.i_model_transpose_col2, - )) * vec4(vertex_position, 1.0); - out.uv = vec2(vertex_position.xy) * in.i_uv_offset_scale.zw + in.i_uv_offset_scale.xy; - out.color = in.i_color; +#ifdef MESH_BINDGROUP_1 - return out; -} +@group(1) @binding(2) var morph_weights: MorphWeights; +@group(1) @binding(3) var morph_targets: texture_3d; -@group(1) @binding(0) var sprite_texture: texture_2d; -@group(1) @binding(1) var sprite_sampler: sampler; +#else -@fragment -fn fragment(in: VertexOutput) -> @location(0) vec4 { - var color = in.color * textureSample(sprite_texture, sprite_sampler, in.uv); +@group(2) @binding(2) var morph_weights: MorphWeights; +@group(2) @binding(3) var morph_targets: texture_3d; -#ifdef TONEMAP_IN_SHADER - color = tonemapping::tone_mapping(color, view.color_grading); #endif - return color; -} -``` +// NOTE: Those are the "hardcoded" values found in `MorphAttributes` struct +// in crates/bevy_render/src/mesh/morph/visitors.rs +// In an ideal world, the offsets are established dynamically and passed as #defines +// to the shader, but it's out of scope for the initial implementation of morph targets. +const position_offset: u32 = 0u; +const normal_offset: u32 = 3u; +const tangent_offset: u32 = 6u; +const total_component_count: u32 = 9u; + +fn layer_count() -> u32 { + let dimensions = texture_dimensions(morph_targets); + return u32(dimensions.z); +} +fn component_texture_coord(vertex_index: u32, component_offset: u32) -> vec2 { + let width = u32(texture_dimensions(morph_targets).x); + let component_index = total_component_count * vertex_index + component_offset; + return vec2(component_index % width, component_index / width); +} +fn weight_at(weight_index: u32) -> f32 { + let i = weight_index; + return morph_weights.weights[i / 4u][i % 4u]; +} +fn morph_pixel(vertex: u32, component: u32, weight: u32) -> f32 { + let coord = component_texture_coord(vertex, component); + // Due to https://gpuweb.github.io/gpuweb/wgsl/#texel-formats + // While the texture stores a f32, the textureLoad returns a vec4<>, where + // only the first component is set. + return texture_load(morph_targets, vec3(coord, weight), 0).r; +} +fn morph(vertex_index: u32, component_offset: u32, weight_index: u32) -> vec3 { + return vec3( + morph_pixel(vertex_index, component_offset, weight_index), + morph_pixel(vertex_index, component_offset + 1u, weight_index), + morph_pixel(vertex_index, component_offset + 2u, weight_index), + ); +} -### crates/bevy_sprite/src/mesh2d/mesh2d_functions +#endif // MORPH_TARGETS#define_import_path bevy_pbr::mesh_functions -```rust -#define_import_path bevy_sprite::mesh2d_functions +#import bevy_pbr::mesh_view_bindings view +#import bevy_pbr::mesh_bindings mesh +#import bevy_pbr::mesh_types MESH_FLAGS_SIGN_DETERMINANT_MODEL_3X3_BIT +#import bevy_render::instance_index get_instance_index +#import bevy_render::maths affine_to_square, mat2x4_f32_to_mat3x3_unpack -#import bevy_sprite::{ - mesh2d_view_bindings::view, - mesh2d_bindings::mesh, +fn get_model_matrix(instance_index: u32) -> mat4x4 { + return affine_to_square(mesh[get_instance_index(instance_index)].model); } -#import bevy_render::maths::{affine3_to_square, mat2x4_f32_to_mat3x3_unpack} -fn get_model_matrix(instance_index: u32) -> mat4x4 { - return affine3_to_square(mesh[instance_index].model); +fn get_previous_model_matrix(instance_index: u32) -> mat4x4 { + return affine_to_square(mesh[get_instance_index(instance_index)].previous_model); } -fn mesh2d_position_local_to_world(model: mat4x4, vertex_position: vec4) -> vec4 { +fn mesh_position_local_to_world(model: mat4x4, vertex_position: vec4) -> vec4 { return model * vertex_position; } -fn mesh2d_position_world_to_clip(world_position: vec4) -> vec4 { +fn mesh_position_world_to_clip(world_position: vec4) -> vec4 { return view.view_proj * world_position; } // NOTE: The intermediate world_position assignment is important // for precision purposes when using the 'equals' depth comparison // function. -fn mesh2d_position_local_to_clip(model: mat4x4, vertex_position: vec4) -> vec4 { - let world_position = mesh2d_position_local_to_world(model, vertex_position); - return mesh2d_position_world_to_clip(world_position); +fn mesh_position_local_to_clip(model: mat4x4, vertex_position: vec4) -> vec4 { + let world_position = mesh_position_local_to_world(model, vertex_position); + return mesh_position_world_to_clip(world_position); } -fn mesh2d_normal_local_to_world(vertex_normal: vec3, instance_index: u32) -> vec3 { - return mat2x4_f32_to_mat3x3_unpack( - mesh[instance_index].inverse_transpose_model_a, - mesh[instance_index].inverse_transpose_model_b, - ) * vertex_normal; +fn mesh_normal_local_to_world(vertex_normal: vec3, instance_index: u32) -> vec3 { + // NOTE: The mikktspace method of normal mapping requires that the world normal is + // re-normalized in the vertex shader to match the way mikktspace bakes vertex tangents + // and normal maps so that the exact inverse process is applied when shading. Blender, Unity, + // Unreal Engine, Godot, and more all use the mikktspace method. Do not change this code + // unless you really know what you are doing. + // http://www.mikktspace.com/ + return normalize( + mat2x4_f32_to_mat3x3_unpack( + mesh[instance_index].inverse_transpose_model_a, + mesh[instance_index].inverse_transpose_model_b, + ) * vertex_normal + ); +} + +// Calculates the sign of the determinant of the 3x3 model matrix based on a +// mesh flag +fn sign_determinant_model_3x3m(instance_index: u32) -> f32 { + // bool(u32) is false if 0u else true + // f32(bool) is 1.0 if true else 0.0 + // * 2.0 - 1.0 remaps 0.0 or 1.0 to -1.0 or 1.0 respectively + return f32(bool(mesh[instance_index].flags & MESH_FLAGS_SIGN_DETERMINANT_MODEL_3X3_BIT)) * 2.0 - 1.0; } -fn mesh2d_tangent_local_to_world(model: mat4x4, vertex_tangent: vec4) -> vec4 { +fn mesh_tangent_local_to_world(model: mat4x4, vertex_tangent: vec4, instance_index: u32) -> vec4 { + // NOTE: The mikktspace method of normal mapping requires that the world tangent is + // re-normalized in the vertex shader to match the way mikktspace bakes vertex tangents + // and normal maps so that the exact inverse process is applied when shading. Blender, Unity, + // Unreal Engine, Godot, and more all use the mikktspace method. Do not change this code + // unless you really know what you are doing. + // http://www.mikktspace.com/ return vec4( - mat3x3( - model[0].xyz, - model[1].xyz, - model[2].xyz - ) * vertex_tangent.xyz, - vertex_tangent.w + normalize( + mat3x3( + model[0].xyz, + model[1].xyz, + model[2].xyz + ) * vertex_tangent.xyz + ), + // NOTE: Multiplying by the sign of the determinant of the 3x3 model matrix accounts for + // situations such as negative scaling. + vertex_tangent.w * sign_determinant_model_3x3m(instance_index) ); } +#define_import_path bevy_pbr::mesh_types -``` - -### crates/bevy_sprite/src/mesh2d/mesh2d - -```rust -#import bevy_sprite::{ - mesh2d_functions as mesh_functions, - mesh2d_vertex_output::VertexOutput, - mesh2d_view_bindings::view, -} +struct Mesh { + // Affine 4x3 matrices transposed to 3x4 + // Use bevy_render::maths::affine_to_square to unpack + model: mat3x4, + previous_model: mat3x4, + // 3x3 matrix packed in mat2x4 and f32 as: + // [0].xyz, [1].x, + // [1].yz, [2].xy + // [2].z + // Use bevy_pbr::mesh_functions::mat2x4_f32_to_mat3x3_unpack to unpack + inverse_transpose_model_a: mat2x4, + inverse_transpose_model_b: f32, + // 'flags' is a bit field indicating various options. u32 is 32 bits so we have up to 32 options. + flags: u32, +}; -#ifdef TONEMAP_IN_SHADER -#import bevy_core_pipeline::tonemapping +#ifdef SKINNED +struct SkinnedMesh { + data: array, 256u>, +}; #endif -struct Vertex { - @builtin(instance_index) instance_index: u32, -#ifdef VERTEX_POSITIONS - @location(0) position: vec3, -#endif -#ifdef VERTEX_NORMALS - @location(1) normal: vec3, -#endif -#ifdef VERTEX_UVS - @location(2) uv: vec2, -#endif -#ifdef VERTEX_TANGENTS - @location(3) tangent: vec4, -#endif -#ifdef VERTEX_COLORS - @location(4) color: vec4, -#endif +#ifdef MORPH_TARGETS +struct MorphWeights { + weights: array, 16u>, // 16 = 64 / 4 (64 = MAX_MORPH_WEIGHTS) }; - -@vertex -fn vertex(vertex: Vertex) -> VertexOutput { - var out: VertexOutput; -#ifdef VERTEX_UVS - out.uv = vertex.uv; #endif -#ifdef VERTEX_POSITIONS - var model = mesh_functions::get_model_matrix(vertex.instance_index); - out.world_position = mesh_functions::mesh2d_position_local_to_world( - model, - vec4(vertex.position, 1.0) - ); - out.position = mesh_functions::mesh2d_position_world_to_clip(out.world_position); -#endif +const MESH_FLAGS_SHADOW_RECEIVER_BIT: u32 = 1u; +// 2^31 - if the flag is set, the sign is positive, else it is negative +const MESH_FLAGS_SIGN_DETERMINANT_MODEL_3X3_BIT: u32 = 2147483648u; +#define_import_path bevy_pbr::skinning -#ifdef VERTEX_NORMALS - out.world_normal = mesh_functions::mesh2d_normal_local_to_world(vertex.normal, vertex.instance_index); -#endif +#import bevy_pbr::mesh_types SkinnedMesh -#ifdef VERTEX_TANGENTS - out.world_tangent = mesh_functions::mesh2d_tangent_local_to_world( - model, - vertex.tangent - ); -#endif +#ifdef SKINNED -#ifdef VERTEX_COLORS - out.color = vertex.color; +#ifdef MESH_BINDGROUP_1 + @group(1) @binding(1) var joint_matrices: SkinnedMesh; +#else + @group(2) @binding(1) var joint_matrices: SkinnedMesh; #endif - return out; -} -@fragment -fn fragment( - in: VertexOutput, -) -> @location(0) vec4 { -#ifdef VERTEX_COLORS - var color = in.color; -#ifdef TONEMAP_IN_SHADER - color = tonemapping::tone_mapping(color, view.color_grading); -#endif - return color; -#else - return vec4(1.0, 0.0, 1.0, 1.0); -#endif -} -``` +fn skin_model( + indexes: vec4, + weights: vec4, +) -> mat4x4 { + return weights.x * joint_matrices.data[indexes.x] + + weights.y * joint_matrices.data[indexes.y] + + weights.z * joint_matrices.data[indexes.z] + + weights.w * joint_matrices.data[indexes.w]; +} -### crates/bevy_sprite/src/mesh2d/color_material +fn inverse_transpose_3x3m(in: mat3x3) -> mat3x3 { + let x = cross(in[1], in[2]); + let y = cross(in[2], in[0]); + let z = cross(in[0], in[1]); + let det = dot(in[2], z); + return mat3x3( + x / det, + y / det, + z / det + ); +} -```rust -#import bevy_sprite::{ - mesh2d_vertex_output::VertexOutput, - mesh2d_view_bindings::view, +fn skin_normals( + model: mat4x4, + normal: vec3, +) -> vec3 { + return normalize( + inverse_transpose_3x3m( + mat3x3( + model[0].xyz, + model[1].xyz, + model[2].xyz + ) + ) * normal + ); } -#ifdef TONEMAP_IN_SHADER -#import bevy_core_pipeline::tonemapping #endif +#define_import_path bevy_pbr::mesh_view_bindings -struct ColorMaterial { - color: vec4, - // 'flags' is a bit field indicating various options. u32 is 32 bits so we have up to 32 options. - flags: u32, -}; -const COLOR_MATERIAL_FLAGS_TEXTURE_BIT: u32 = 1u; - -@group(2) @binding(0) var material: ColorMaterial; -@group(2) @binding(1) var texture: texture_2d; -@group(2) @binding(2) var texture_sampler: sampler; +#import bevy_pbr::mesh_view_types as types +#import bevy_render::view View +#import bevy_render::globals Globals -@fragment -fn fragment( - mesh: VertexOutput, -) -> @location(0) vec4 { - var output_color: vec4 = material.color; -#ifdef VERTEX_COLORS - output_color = output_color * mesh.color; +@group(0) @binding(0) var view: View; +@group(0) @binding(1) var lights: types::Lights; +#ifdef NO_ARRAY_TEXTURES_SUPPORT +@group(0) @binding(2) var point_shadow_textures: texture_depth_cube; +#else +@group(0) @binding(2) var point_shadow_textures: texture_depth_cube_array; #endif - if ((material.flags & COLOR_MATERIAL_FLAGS_TEXTURE_BIT) != 0u) { - output_color = output_color * textureSample(texture, texture_sampler, mesh.uv); - } -#ifdef TONEMAP_IN_SHADER - output_color = tonemapping::tone_mapping(output_color, view.color_grading); +@group(0) @binding(3) var point_shadow_textures_sampler: sampler_comparison; +#ifdef NO_ARRAY_TEXTURES_SUPPORT +@group(0) @binding(4) var directional_shadow_textures: texture_depth_2d; +#else +@group(0) @binding(4) var directional_shadow_textures: texture_depth_2d_array; #endif - return output_color; -} - -``` +@group(0) @binding(5) var directional_shadow_textures_sampler: sampler_comparison; -### crates/bevy_sprite/src/mesh2d/mesh2d_view_bindings +#if AVAILABLE_STORAGE_BUFFER_BINDINGS >= 3 +@group(0) @binding(6) var point_lights: types::PointLights; +@group(0) @binding(7) var cluster_light_index_lists: types::ClusterLightIndexLists; +@group(0) @binding(8) var cluster_offsets_and_counts: types::ClusterOffsetsAndCounts; +#else +@group(0) @binding(6) var point_lights: types::PointLights; +@group(0) @binding(7) var cluster_light_index_lists: types::ClusterLightIndexLists; +@group(0) @binding(8) var cluster_offsets_and_counts: types::ClusterOffsetsAndCounts; +#endif -```rust -#define_import_path bevy_sprite::mesh2d_view_bindings +@group(0) @binding(9) var globals: Globals; +@group(0) @binding(10) var fog: types::Fog; -#import bevy_render::view::View -#import bevy_render::globals::Globals +@group(0) @binding(11) var screen_space_ambient_occlusion_texture: texture_2d; -@group(0) @binding(0) var view: View; +@group(0) @binding(12) var environment_map_diffuse: texture_cube; +@group(0) @binding(13) var environment_map_specular: texture_cube; +@group(0) @binding(14) var environment_map_sampler: sampler; -@group(0) @binding(1) var globals: Globals; +@group(0) @binding(15) var dt_lut_texture: texture_3d; +@group(0) @binding(16) var dt_lut_sampler: sampler; -``` +#ifdef MULTISAMPLED +@group(0) @binding(17) var depth_prepass_texture: texture_depth_multisampled_2d; +@group(0) @binding(18) var normal_prepass_texture: texture_multisampled_2d; +@group(0) @binding(19) var motion_vector_prepass_texture: texture_multisampled_2d; +#else +@group(0) @binding(17) var depth_prepass_texture: texture_depth_2d; +@group(0) @binding(18) var normal_prepass_texture: texture_2d; +@group(0) @binding(19) var motion_vector_prepass_texture: texture_2d; +#endif +#import bevy_pbr::mesh_bindings mesh +#import bevy_pbr::mesh_functions get_model_matrix, mesh_position_local_to_clip +#import bevy_pbr::morph -### crates/bevy_sprite/src/mesh2d/mesh2d_types +#ifdef SKINNED + #import bevy_pbr::skinning +#endif -```rust -#define_import_path bevy_sprite::mesh2d_types +struct Vertex { + @builtin(instance_index) instance_index: u32, + @location(0) position: vec3, +#ifdef SKINNED + @location(5) joint_indexes: vec4, + @location(6) joint_weights: vec4, +#endif +#ifdef MORPH_TARGETS + @builtin(vertex_index) index: u32, +#endif +}; -struct Mesh2d { - // Affine 4x3 matrix transposed to 3x4 - // Use bevy_render::maths::affine3_to_square to unpack - model: mat3x4, - // 3x3 matrix packed in mat2x4 and f32 as: - // [0].xyz, [1].x, - // [1].yz, [2].xy - // [2].z - // Use bevy_render::maths::mat2x4_f32_to_mat3x3_unpack to unpack - inverse_transpose_model_a: mat2x4, - inverse_transpose_model_b: f32, - // 'flags' is a bit field indicating various options. u32 is 32 bits so we have up to 32 options. - flags: u32, +struct VertexOutput { + @builtin(position) clip_position: vec4, }; -``` -### crates/bevy_sprite/src/mesh2d/mesh2d_view_types +#ifdef MORPH_TARGETS +fn morph_vertex(vertex_in: Vertex) -> Vertex { + var vertex = vertex_in; + let weight_count = bevy_pbr::morph::layer_count(); + for (var i: u32 = 0u; i < weight_count; i ++) { + let weight = bevy_pbr::morph::weight_at(i); + if weight == 0.0 { + continue; + } + vertex.position += weight * bevy_pbr::morph::morph(vertex.index, bevy_pbr::morph::position_offset, i); + } + return vertex; +} +#endif -```rust -#define_import_path bevy_sprite::mesh2d_view_types +@vertex +fn vertex(vertex_no_morph: Vertex) -> VertexOutput { -#import bevy_render::view -#import bevy_render::globals +#ifdef MORPH_TARGETS + var vertex = morph_vertex(vertex_no_morph); +#else + var vertex = vertex_no_morph; +#endif -``` +#ifdef SKINNED + let model = bevy_pbr::skinning::skin_model(vertex.joint_indexes, vertex.joint_weights); +#else + let model = get_model_matrix(vertex.instance_index); +#endif -### crates/bevy_sprite/src/mesh2d/mesh2d_vertex_output + var out: VertexOutput; + out.clip_position = mesh_position_local_to_clip(model, vec4(vertex.position, 1.0)); + return out; +} -```rust -#define_import_path bevy_sprite::mesh2d_vertex_output +@fragment +fn fragment() -> @location(0) vec4 { + return vec4(1.0, 1.0, 1.0, 1.0); +} +#define_import_path bevy_pbr::fragment -struct VertexOutput { - // this is `clip position` when the struct is used as a vertex stage output +#import bevy_pbr::pbr_functions as pbr_functions +#import bevy_pbr::pbr_bindings as pbr_bindings +#import bevy_pbr::pbr_types as pbr_types +#import bevy_pbr::prepass_utils + +#import bevy_pbr::mesh_vertex_output VertexOutput +#import bevy_pbr::mesh_bindings mesh +#import bevy_pbr::mesh_view_bindings view, fog, screen_space_ambient_occlusion_texture +#import bevy_pbr::mesh_view_types FOG_MODE_OFF +#import bevy_core_pipeline::tonemapping screen_space_dither, powsafe, tone_mapping +#import bevy_pbr::parallax_mapping parallaxed_uv + +#import bevy_pbr::prepass_utils + +#ifdef SCREEN_SPACE_AMBIENT_OCCLUSION +#import bevy_pbr::gtao_utils gtao_multibounce +#endif + +@fragment +fn fragment( + in: VertexOutput, + @builtin(front_facing) is_front: bool, +) -> @location(0) vec4 { + var output_color: vec4 = pbr_bindings::material.base_color; + + let is_orthographic = view.projection[3].w == 1.0; + let V = pbr_functions::calculate_view(in.world_position, is_orthographic); +#ifdef VERTEX_UVS + var uv = in.uv; +#ifdef VERTEX_TANGENTS + if ((pbr_bindings::material.flags & pbr_types::STANDARD_MATERIAL_FLAGS_DEPTH_MAP_BIT) != 0u) { + let N = in.world_normal; + let T = in.world_tangent.xyz; + let B = in.world_tangent.w * cross(N, T); + // Transform V from fragment to camera in world space to tangent space. + let Vt = vec3(dot(V, T), dot(V, B), dot(V, N)); + uv = parallaxed_uv( + pbr_bindings::material.parallax_depth_scale, + pbr_bindings::material.max_parallax_layer_count, + pbr_bindings::material.max_relief_mapping_search_steps, + uv, + // Flip the direction of Vt to go toward the surface to make the + // parallax mapping algorithm easier to understand and reason + // about. + -Vt, + ); + } +#endif +#endif + +#ifdef VERTEX_COLORS + output_color = output_color * in.color; +#endif +#ifdef VERTEX_UVS + if ((pbr_bindings::material.flags & pbr_types::STANDARD_MATERIAL_FLAGS_BASE_COLOR_TEXTURE_BIT) != 0u) { + output_color = output_color * texture_sample_bias(pbr_bindings::base_color_texture, pbr_bindings::base_color_sampler, uv, view.mip_bias); + } +#endif + + // NOTE: Unlit bit not set means == 0 is true, so the true case is if lit + if ((pbr_bindings::material.flags & pbr_types::STANDARD_MATERIAL_FLAGS_UNLIT_BIT) == 0u) { + // Prepare a 'processed' StandardMaterial by sampling all textures to resolve + // the material members + var pbr_input: pbr_functions::PbrInput; + + pbr_input.material.base_color = output_color; + pbr_input.material.reflectance = pbr_bindings::material.reflectance; + pbr_input.material.flags = pbr_bindings::material.flags; + pbr_input.material.alpha_cutoff = pbr_bindings::material.alpha_cutoff; + + // TODO use .a for exposure compensation in HDR + var emissive: vec4 = pbr_bindings::material.emissive; +#ifdef VERTEX_UVS + if ((pbr_bindings::material.flags & pbr_types::STANDARD_MATERIAL_FLAGS_EMISSIVE_TEXTURE_BIT) != 0u) { + emissive = vec4(emissive.rgb * texture_sample_bias(pbr_bindings::emissive_texture, pbr_bindings::emissive_sampler, uv, view.mip_bias).rgb, 1.0); + } +#endif + pbr_input.material.emissive = emissive; + + var metallic: f32 = pbr_bindings::material.metallic; + var perceptual_roughness: f32 = pbr_bindings::material.perceptual_roughness; +#ifdef VERTEX_UVS + if ((pbr_bindings::material.flags & pbr_types::STANDARD_MATERIAL_FLAGS_METALLIC_ROUGHNESS_TEXTURE_BIT) != 0u) { + let metallic_roughness = texture_sample_bias(pbr_bindings::metallic_roughness_texture, pbr_bindings::metallic_roughness_sampler, uv, view.mip_bias); + // Sampling from GLTF standard channels for now + metallic = metallic * metallic_roughness.b; + perceptual_roughness = perceptual_roughness * metallic_roughness.g; + } +#endif + pbr_input.material.metallic = metallic; + pbr_input.material.perceptual_roughness = perceptual_roughness; + + // TODO: Split into diffuse/specular occlusion? + var occlusion: vec3 = vec3(1.0); +#ifdef VERTEX_UVS + if ((pbr_bindings::material.flags & pbr_types::STANDARD_MATERIAL_FLAGS_OCCLUSION_TEXTURE_BIT) != 0u) { + occlusion = vec3(texture_sample_bias(pbr_bindings::occlusion_texture, pbr_bindings::occlusion_sampler, uv, view.mip_bias).r); + } +#endif +#ifdef SCREEN_SPACE_AMBIENT_OCCLUSION + let ssao = texture_load(screen_space_ambient_occlusion_texture, vec2(in.position.xy), 0i).r; + let ssao_multibounce = gtao_multibounce(ssao, pbr_input.material.base_color.rgb); + occlusion = min(occlusion, ssao_multibounce); +#endif + pbr_input.occlusion = occlusion; + + pbr_input.frag_coord = in.position; + pbr_input.world_position = in.world_position; + + pbr_input.world_normal = pbr_functions::prepare_world_normal( + in.world_normal, + (pbr_bindings::material.flags & pbr_types::STANDARD_MATERIAL_FLAGS_DOUBLE_SIDED_BIT) != 0u, + is_front, + ); + + pbr_input.is_orthographic = is_orthographic; + +#ifdef LOAD_PREPASS_NORMALS + pbr_input.N = bevy_pbr::prepass_utils::prepass_normal(in.position, 0u); +#else + pbr_input.N = pbr_functions::apply_normal_mapping( + pbr_bindings::material.flags, + pbr_input.world_normal, +#ifdef VERTEX_TANGENTS +#ifdef STANDARDMATERIAL_NORMAL_MAP + in.world_tangent, +#endif +#endif +#ifdef VERTEX_UVS + uv, +#endif + view.mip_bias, + ); +#endif + + pbr_input.V = V; + pbr_input.occlusion = occlusion; + + pbr_input.flags = mesh[in.instance_index].flags; + + output_color = pbr_functions::pbr(pbr_input); + } else { + output_color = pbr_functions::alpha_discard(pbr_bindings::material, output_color); + } + + // fog + if (fog.mode != FOG_MODE_OFF && (pbr_bindings::material.flags & pbr_types::STANDARD_MATERIAL_FLAGS_FOG_ENABLED_BIT) != 0u) { + output_color = pbr_functions::apply_fog(fog, output_color, in.world_position.xyz, view.world_position.xyz); + } + +#ifdef TONEMAP_IN_SHADER + output_color = tone_mapping(output_color, view.color_grading); +#ifdef DEBAND_DITHER + var output_rgb = output_color.rgb; + output_rgb = powsafe(output_rgb, 1.0 / 2.2); + output_rgb = output_rgb + screen_space_dither(in.position.xy); + // This conversion back to linear space is required because our output texture format is + // SRGB; the GPU will assume our output is linear and will apply an SRGB conversion. + output_rgb = powsafe(output_rgb, 2.2); + output_color = vec4(output_rgb, output_color.a); +#endif +#endif +#ifdef PREMULTIPLY_ALPHA + output_color = pbr_functions::premultiply_alpha(pbr_bindings::material.flags, output_color); +#endif + return output_color; +} +#define_import_path bevy_pbr::pbr_bindings + +#import bevy_pbr::pbr_types StandardMaterial + +@group(1) @binding(0) var material: StandardMaterial; +@group(1) @binding(1) var base_color_texture: texture_2d; +@group(1) @binding(2) var base_color_sampler: sampler; +@group(1) @binding(3) var emissive_texture: texture_2d; +@group(1) @binding(4) var emissive_sampler: sampler; +@group(1) @binding(5) var metallic_roughness_texture: texture_2d; +@group(1) @binding(6) var metallic_roughness_sampler: sampler; +@group(1) @binding(7) var occlusion_texture: texture_2d; +@group(1) @binding(8) var occlusion_sampler: sampler; +@group(1) @binding(9) var normal_map_texture: texture_2d; +@group(1) @binding(10) var normal_map_sampler: sampler; +@group(1) @binding(11) var depth_map_texture: texture_2d; +@group(1) @binding(12) var depth_map_sampler: sampler; +#define_import_path bevy_pbr::mesh_vertex_output + +struct VertexOutput { + // this is `clip position` when the struct is used as a vertex stage output // and `frag coord` when used as a fragment stage input @builtin(position) position: vec4, @location(0) world_position: vec4, @location(1) world_normal: vec3, + #ifdef VERTEX_UVS @location(2) uv: vec2, + #endif #ifdef VERTEX_TANGENTS @location(3) world_tangent: vec4, #endif #ifdef VERTEX_COLORS @location(4) color: vec4, #endif + #ifdef VERTEX_OUTPUT_INSTANCE_INDEX + @location(5) @interpolate(flat) instance_index: u32, + #endif } +#define_import_path bevy_pbr::mesh_bindings -``` - -### crates/bevy_sprite/src/mesh2d/mesh2d_bindings - -```rust -#define_import_path bevy_sprite::mesh2d_bindings +#import bevy_pbr::mesh_types Mesh -#import bevy_sprite::mesh2d_types::Mesh2d +#ifdef MESH_BINDGROUP_1 #ifdef PER_OBJECT_BUFFER_BATCH_SIZE -@group(1) @binding(0) var mesh: array; +@group(1) @binding(0) var mesh: array; #else -@group(1) @binding(0) var mesh: array; +@group(1) @binding(0) var mesh: array; #endif // PER_OBJECT_BUFFER_BATCH_SIZE -``` +#else // MESH_BINDGROUP_1 -### crates/bevy_sprite/src/mesh2d/wireframe2d +#ifdef PER_OBJECT_BUFFER_BATCH_SIZE +@group(2) @binding(0) var mesh: array; +#else +@group(2) @binding(0) var mesh: array; +#endif // PER_OBJECT_BUFFER_BATCH_SIZE -```rust -#import bevy_sprite::mesh2d_vertex_output::VertexOutput +#endif // MESH_BINDGROUP_1 +#import bevy_pbr::mesh_functions as mesh_functions +#import bevy_pbr::skinning +#import bevy_pbr::morph +#import bevy_pbr::mesh_bindings mesh +#import bevy_pbr::mesh_vertex_output VertexOutput +#import bevy_render::instance_index get_instance_index -struct WireframeMaterial { - color: vec4, +struct Vertex { + @builtin(instance_index) instance_index: u32, +#ifdef VERTEX_POSITIONS + @location(0) position: vec3, +#endif +#ifdef VERTEX_NORMALS + @location(1) normal: vec3, +#endif +#ifdef VERTEX_UVS + @location(2) uv: vec2, +#endif +#ifdef VERTEX_TANGENTS + @location(3) tangent: vec4, +#endif +#ifdef VERTEX_COLORS + @location(4) color: vec4, +#endif +#ifdef SKINNED + @location(5) joint_indices: vec4, + @location(6) joint_weights: vec4, +#endif +#ifdef MORPH_TARGETS + @builtin(vertex_index) index: u32, +#endif }; -@group(2) @binding(0) var material: WireframeMaterial; -@fragment -fn fragment(in: VertexOutput) -> @location(0) vec4 { - return material.color; +#ifdef MORPH_TARGETS +fn morph_vertex(vertex_in: Vertex) -> Vertex { + var vertex = vertex_in; + let weight_count = bevy_pbr::morph::layer_count(); + for (var i: u32 = 0u; i < weight_count; i ++) { + let weight = bevy_pbr::morph::weight_at(i); + if weight == 0.0 { + continue; + } + vertex.position += weight * bevy_pbr::morph::morph(vertex.index, bevy_pbr::morph::position_offset, i); +#ifdef VERTEX_NORMALS + vertex.normal += weight * bevy_pbr::morph::morph(vertex.index, bevy_pbr::morph::normal_offset, i); +#endif +#ifdef VERTEX_TANGENTS + vertex.tangent += vec4(weight * bevy_pbr::morph::morph(vertex.index, bevy_pbr::morph::tangent_offset, i), 0.0); +#endif + } + return vertex; } +#endif -``` - -### crates/bevy_ui/src/render/ui +@vertex +fn vertex(vertex_no_morph: Vertex) -> VertexOutput { + var out: VertexOutput; -```rust -#import bevy_render::view::View +#ifdef MORPH_TARGETS + var vertex = morph_vertex(vertex_no_morph); +#else + var vertex = vertex_no_morph; +#endif -const TEXTURED = 1u; -const RIGHT_VERTEX = 2u; -const BOTTOM_VERTEX = 4u; -const BORDER: u32 = 8u; +#ifdef SKINNED + var model = bevy_pbr::skinning::skin_model(vertex.joint_indices, vertex.joint_weights); +#else + // Use vertex_no_morph.instance_index instead of vertex.instance_index to work around a wgpu dx12 bug. + // See https://github.com/gfx-rs/naga/issues/2416 . + var model = mesh_functions::get_model_matrix(vertex_no_morph.instance_index); +#endif -fn enabled(flags: u32, mask: u32) -> bool { - return (flags & mask) != 0u; -} +#ifdef VERTEX_NORMALS +#ifdef SKINNED + out.world_normal = bevy_pbr::skinning::skin_normals(model, vertex.normal); +#else + out.world_normal = mesh_functions::mesh_normal_local_to_world( + vertex.normal, + // Use vertex_no_morph.instance_index instead of vertex.instance_index to work around a wgpu dx12 bug. + // See https://github.com/gfx-rs/naga/issues/2416 + get_instance_index(vertex_no_morph.instance_index) + ); +#endif +#endif -@group(0) @binding(0) var view: View; +#ifdef VERTEX_POSITIONS + out.world_position = mesh_functions::mesh_position_local_to_world(model, vec4(vertex.position, 1.0)); + out.position = mesh_functions::mesh_position_world_to_clip(out.world_position); +#endif -struct VertexOutput { - @location(0) uv: vec2, - @location(1) color: vec4, +#ifdef VERTEX_UVS + out.uv = vertex.uv; +#endif - @location(2) @interpolate(flat) size: vec2, - @location(3) @interpolate(flat) flags: u32, - @location(4) @interpolate(flat) radius: vec4, - @location(5) @interpolate(flat) border: vec4, +#ifdef VERTEX_TANGENTS + out.world_tangent = mesh_functions::mesh_tangent_local_to_world( + model, + vertex.tangent, + // Use vertex_no_morph.instance_index instead of vertex.instance_index to work around a wgpu dx12 bug. + // See https://github.com/gfx-rs/naga/issues/2416 + get_instance_index(vertex_no_morph.instance_index) + ); +#endif - // Position relative to the center of the rectangle. - @location(6) point: vec2, - @builtin(position) position: vec4, -}; +#ifdef VERTEX_COLORS + out.color = vertex.color; +#endif -@vertex -fn vertex( - @location(0) vertex_position: vec3, - @location(1) vertex_uv: vec2, - @location(2) vertex_color: vec4, - @location(3) flags: u32, - - // x: top left, y: top right, z: bottom right, w: bottom left. - @location(4) radius: vec4, - - // x: left, y: top, z: right, w: bottom. - @location(5) border: vec4, - @location(6) size: vec2, -) -> VertexOutput { - var out: VertexOutput; - out.uv = vertex_uv; - out.position = view.view_proj * vec4(vertex_position, 1.0); - out.color = vertex_color; - out.flags = flags; - out.radius = radius; - out.size = size; - out.border = border; - var point = 0.49999 * size; - if (flags & RIGHT_VERTEX) == 0u { - point.x *= -1.; - } - if (flags & BOTTOM_VERTEX) == 0u { - point.y *= -1.; - } - out.point = point; +#ifdef VERTEX_OUTPUT_INSTANCE_INDEX + // Use vertex_no_morph.instance_index instead of vertex.instance_index to work around a wgpu dx12 bug. + // See https://github.com/gfx-rs/naga/issues/2416 + out.instance_index = get_instance_index(vertex_no_morph.instance_index); +#endif return out; } -@group(1) @binding(0) var sprite_texture: texture_2d; -@group(1) @binding(1) var sprite_sampler: sampler; - -// The returned value is the shortest distance from the given point to the boundary of the rounded -// box. -// -// Negative values indicate that the point is inside the rounded box, positive values that the point -// is outside, and zero is exactly on the boundary. -// -// Arguments: -// - `point` -> The function will return the distance from this point to the closest point on -// the boundary. -// - `size` -> The maximum width and height of the box. -// - `corner_radii` -> The radius of each rounded corner. Ordered counter clockwise starting -// top left: -// x: top left, y: top right, z: bottom right, w: bottom left. -fn sd_rounded_box(point: vec2, size: vec2, corner_radii: vec4) -> f32 { - // If 0.0 < y then select bottom left (w) and bottom right corner radius (z). - // Else select top left (x) and top right corner radius (y). - let rs = select(corner_radii.xy, corner_radii.wz, 0.0 < point.y); - // w and z are swapped so that both pairs are in left to right order, otherwise this second - // select statement would return the incorrect value for the bottom pair. - let radius = select(rs.x, rs.y, 0.0 < point.x); - // Vector from the corner closest to the point, to the point. - let corner_to_point = abs(point) - 0.5 * size; - // Vector from the center of the radius circle to the point. - let q = corner_to_point + radius; - // Length from center of the radius circle to the point, zeros a component if the point is not - // within the quadrant of the radius circle that is part of the curved corner. - let l = length(max(q, vec2(0.0))); - let m = min(max(q.x, q.y), 0.0); - return l + m - radius; +@fragment +fn fragment( + mesh: VertexOutput, +) -> @location(0) vec4 { +#ifdef VERTEX_COLORS + return mesh.color; +#else + return vec4(1.0, 0.0, 1.0, 1.0); +#endif } +#define_import_path bevy_pbr::pbr_functions -fn sd_inset_rounded_box(point: vec2, size: vec2, radius: vec4, inset: vec4) -> f32 { - let inner_size = size - inset.xy - inset.zw; - let inner_center = inset.xy + 0.5 * inner_size - 0.5 * size; - let inner_point = point - inner_center; +#ifdef TONEMAP_IN_SHADER +#import bevy_core_pipeline::tonemapping +#endif - var r = radius; +#import bevy_pbr::pbr_types as pbr_types +#import bevy_pbr::pbr_bindings as pbr_bindings +#import bevy_pbr::mesh_view_bindings as view_bindings +#import bevy_pbr::mesh_view_types as mesh_view_types +#import bevy_pbr::lighting as lighting +#import bevy_pbr::clustered_forward as clustering +#import bevy_pbr::shadows as shadows +#import bevy_pbr::fog as fog +#import bevy_pbr::ambient as ambient +#ifdef ENVIRONMENT_MAP +#import bevy_pbr::environment_map +#endif - // Top left corner. - r.x = r.x - max(inset.x, inset.y); +#import bevy_pbr::mesh_bindings mesh +#import bevy_pbr::mesh_types MESH_FLAGS_SHADOW_RECEIVER_BIT - // Top right corner. - r.y = r.y - max(inset.z, inset.y); +fn alpha_discard(material: pbr_types::StandardMaterial, output_color: vec4) -> vec4 { + var color = output_color; + let alpha_mode = material.flags & pbr_types::STANDARD_MATERIAL_FLAGS_ALPHA_MODE_RESERVED_BITS; + if alpha_mode == pbr_types::STANDARD_MATERIAL_FLAGS_ALPHA_MODE_OPAQUE { + // NOTE: If rendering as opaque, alpha should be ignored so set to 1.0 + color.a = 1.0; + } - // Bottom right corner. - r.z = r.z - max(inset.z, inset.w); +#ifdef MAY_DISCARD + else if alpha_mode == pbr_types::STANDARD_MATERIAL_FLAGS_ALPHA_MODE_MASK { + if color.a >= material.alpha_cutoff { + // NOTE: If rendering as masked alpha and >= the cutoff, render as fully opaque + color.a = 1.0; + } else { + // NOTE: output_color.a < in.material.alpha_cutoff should not be rendered + discard; + } + } +#endif - // Bottom left corner. - r.w = r.w - max(inset.x, inset.w); + return color; +} - let half_size = inner_size * 0.5; - let min_size = min(half_size.x, half_size.y); +fn prepare_world_normal( + world_normal: vec3, + double_sided: bool, + is_front: bool, +) -> vec3 { + var output: vec3 = world_normal; +#ifndef VERTEX_TANGENTS +#ifndef STANDARDMATERIAL_NORMAL_MAP + // NOTE: When NOT using normal-mapping, if looking at the back face of a double-sided + // material, the normal needs to be inverted. This is a branchless version of that. + output = (f32(!double_sided || is_front) * 2.0 - 1.0) * output; +#endif +#endif + return output; +} - r = min(max(r, vec4(0.0)), vec4(min_size)); +fn apply_normal_mapping( + standard_material_flags: u32, + world_normal: vec3, +#ifdef VERTEX_TANGENTS +#ifdef STANDARDMATERIAL_NORMAL_MAP + world_tangent: vec4, +#endif +#endif +#ifdef VERTEX_UVS + uv: vec2, +#endif + mip_bias: f32, +) -> vec3 { + // NOTE: The mikktspace method of normal mapping explicitly requires that the world normal NOT + // be re-normalized in the fragment shader. This is primarily to match the way mikktspace + // bakes vertex tangents and normal maps so that this is the exact inverse. Blender, Unity, + // Unreal Engine, Godot, and more all use the mikktspace method. Do not change this code + // unless you really know what you are doing. + // http://www.mikktspace.com/ + var N: vec3 = world_normal; - return sd_rounded_box(inner_point, inner_size, r); -} +#ifdef VERTEX_TANGENTS +#ifdef STANDARDMATERIAL_NORMAL_MAP + // NOTE: The mikktspace method of normal mapping explicitly requires that these NOT be + // normalized nor any Gram-Schmidt applied to ensure the vertex normal is orthogonal to the + // vertex tangent! Do not change this code unless you really know what you are doing. + // http://www.mikktspace.com/ + var T: vec3 = world_tangent.xyz; + var B: vec3 = world_tangent.w * cross(N, T); +#endif +#endif -fn draw(in: VertexOutput) -> vec4 { - let texture_color = textureSample(sprite_texture, sprite_sampler, in.uv); +#ifdef VERTEX_TANGENTS +#ifdef VERTEX_UVS +#ifdef STANDARDMATERIAL_NORMAL_MAP + // Nt is the tangent-space normal. + var Nt = texture_sample_bias(pbr_bindings::normal_map_texture, pbr_bindings::normal_map_sampler, uv, mip_bias).rgb; + if (standard_material_flags & pbr_types::STANDARD_MATERIAL_FLAGS_TWO_COMPONENT_NORMAL_MAP) != 0u { + // Only use the xy components and derive z for 2-component normal maps. + Nt = vec3(Nt.rg * 2.0 - 1.0, 0.0); + Nt.z = sqrt(1.0 - Nt.x * Nt.x - Nt.y * Nt.y); + } else { + Nt = Nt * 2.0 - 1.0; + } + // Normal maps authored for DirectX require flipping the y component + if (standard_material_flags & pbr_types::STANDARD_MATERIAL_FLAGS_FLIP_NORMAL_MAP_Y) != 0u { + Nt.y = -Nt.y; + } + // NOTE: The mikktspace method of normal mapping applies maps the tangent-space normal from + // the normal map texture in this way to be an EXACT inverse of how the normal map baker + // calculates the normal maps so there is no error introduced. Do not change this code + // unless you really know what you are doing. + // http://www.mikktspace.com/ + N = Nt.x * T + Nt.y * B + Nt.z * N; +#endif +#endif +#endif - // Only use the color sampled from the texture if the `TEXTURED` flag is enabled. - // This allows us to draw both textured and untextured shapes together in the same batch. - let color = select(in.color, in.color * texture_color, enabled(in.flags, TEXTURED)); + return normalize(N); +} - // Signed distances. The magnitude is the distance of the point from the edge of the shape. - // * Negative values indicate that the point is inside the shape. - // * Zero values indicate the point is on on the edge of the shape. - // * Positive values indicate the point is outside the shape. +// NOTE: Correctly calculates the view vector depending on whether +// the projection is orthographic or perspective. +fn calculate_view( + world_position: vec4, + is_orthographic: bool, +) -> vec3 { + var V: vec3; + if is_orthographic { + // Orthographic view vector + V = normalize(vec3(view_bindings::view.view_proj[0].z, view_bindings::view.view_proj[1].z, view_bindings::view.view_proj[2].z)); + } else { + // Only valid for a perpective projection + V = normalize(view_bindings::view.world_position.xyz - world_position.xyz); + } + return V; +} - // Signed distance from the exterior boundary. - let external_distance = sd_rounded_box(in.point, in.size, in.radius); +struct PbrInput { + material: pbr_types::StandardMaterial, + occlusion: vec3, + frag_coord: vec4, + world_position: vec4, + // Normalized world normal used for shadow mapping as normal-mapping is not used for shadow + // mapping + world_normal: vec3, + // Normalized normal-mapped world normal used for lighting + N: vec3, + // Normalized view vector in world space, pointing from the fragment world position toward the + // view world position + V: vec3, + is_orthographic: bool, + flags: u32, +}; - // Signed distance from the border's internal edge (the signed distance is negative if the point - // is inside the rect but not on the border). - // If the border size is set to zero, this is the same as as the external distance. - let internal_distance = sd_inset_rounded_box(in.point, in.size, in.radius, in.border); +// Creates a PbrInput with default values +fn pbr_input_new() -> PbrInput { + var pbr_input: PbrInput; - // Signed distance from the border (the intersection of the rect with its border). - // Points inside the border have negative signed distance. Any point outside the border, whether - // outside the outside edge, or inside the inner edge have positive signed distance. - let border_distance = max(external_distance, -internal_distance); + pbr_input.material = pbr_types::standard_material_new(); + pbr_input.occlusion = vec3(1.0); - // The `fwidth` function returns an approximation of the rate of change of the signed distance - // value that is used to ensure that the smooth alpha transition created by smoothstep occurs - // over a range of distance values that is proportional to how quickly the distance is changing. - let fborder = fwidth(border_distance); - let fexternal = fwidth(external_distance); + pbr_input.frag_coord = vec4(0.0, 0.0, 0.0, 1.0); + pbr_input.world_position = vec4(0.0, 0.0, 0.0, 1.0); + pbr_input.world_normal = vec3(0.0, 0.0, 1.0); - if enabled(in.flags, BORDER) { - // The item is a border + pbr_input.is_orthographic = false; - // At external edges with no border, `border_distance` is equal to zero. - // This select statement ensures we only perform anti-aliasing where a non-zero width border - // is present, otherwise an outline about the external boundary would be drawn even without - // a border. - let t = 1. - select(step(0.0, border_distance), smoothstep(0.0, fborder, border_distance), external_distance < internal_distance); - return color.rgba * t; - } + pbr_input.N = vec3(0.0, 0.0, 1.0); + pbr_input.V = vec3(1.0, 0.0, 0.0); - // The item is a rectangle, draw normally with anti-aliasing at the edges. - let t = 1. - smoothstep(0.0, fexternal, external_distance); - return color.rgba * t; -} + pbr_input.flags = 0u; -@fragment -fn fragment(in: VertexOutput) -> @location(0) vec4 { - return draw(in); + return pbr_input; } -``` +#ifndef PREPASS_FRAGMENT +fn pbr( + in: PbrInput, +) -> vec4 { + var output_color: vec4 = in.material.base_color; -### crates/bevy_ui/src/render/ui_material + // TODO use .a for exposure compensation in HDR + let emissive = in.material.emissive; -```rust -#import bevy_render::{ - view::View, - globals::Globals, -} -#import bevy_ui::ui_vertex_output::UiVertexOutput - -@group(0) @binding(0) -var view: View; -@group(0) @binding(1) -var globals: Globals; + // calculate non-linear roughness from linear perceptualRoughness + let metallic = in.material.metallic; + let perceptual_roughness = in.material.perceptual_roughness; + let roughness = lighting::perceptual_roughness_to_roughness(perceptual_roughness); -@vertex -fn vertex( - @location(0) vertex_position: vec3, - @location(1) vertex_uv: vec2, - @location(2) size: vec2, - @location(3) border_widths: vec4, -) -> UiVertexOutput { - var out: UiVertexOutput; - out.uv = vertex_uv; - out.position = view.view_proj * vec4(vertex_position, 1.0); - out.size = size; - out.border_widths = border_widths; - return out; -} + let occlusion = in.occlusion; -@fragment -fn fragment(in: UiVertexOutput) -> @location(0) vec4 { - return vec4(1.0); -} + output_color = alpha_discard(in.material, output_color); -``` + // Neubelt and Pettineo 2013, "Crafting a Next-gen Material Pipeline for The Order: 1886" + let NdotV = max(dot(in.N, in.V), 0.0001); -### crates/bevy_ui/src/render/ui_vertex_output + // Remapping [0,1] reflectance to F0 + // See https://google.github.io/filament/Filament.html#materialsystem/parameterization/remapping + let reflectance = in.material.reflectance; + let F0 = 0.16 * reflectance * reflectance * (1.0 - metallic) + output_color.rgb * metallic; -```rust -#define_import_path bevy_ui::ui_vertex_output + // Diffuse strength inversely related to metallicity + let diffuse_color = output_color.rgb * (1.0 - metallic); -// The Vertex output of the default vertex shader for the Ui Material pipeline. -struct UiVertexOutput { - @location(0) uv: vec2, - // The size of the borders in UV space. Order is Left, Right, Top, Bottom. - @location(1) border_widths: vec4, - // The size of the node in pixels. Order is width, height. - @location(2) @interpolate(flat) size: vec2, - @builtin(position) position: vec4, -}; + let R = reflect(-in.V, in.N); -``` + let f_ab = lighting::F_AB(perceptual_roughness, NdotV); -### crates/bevy_core_pipeline/src/bloom/bloom + var direct_light: vec3 = vec3(0.0); -```rust -// Bloom works by creating an intermediate texture with a bunch of mip levels, each half the size of the previous. -// You then downsample each mip (starting with the original texture) to the lower resolution mip under it, going in order. -// You then upsample each mip (starting from the smallest mip) and blend with the higher resolution mip above it (ending on the original texture). -// -// References: -// * [COD] - Next Generation Post Processing in Call of Duty - http://www.iryoku.com/next-generation-post-processing-in-call-of-duty-advanced-warfare -// * [PBB] - Physically Based Bloom - https://learnopengl.com/Guest-Articles/2022/Phys.-Based-Bloom + let view_z = dot(vec4( + view_bindings::view.inverse_view[0].z, + view_bindings::view.inverse_view[1].z, + view_bindings::view.inverse_view[2].z, + view_bindings::view.inverse_view[3].z + ), in.world_position); + let cluster_index = clustering::fragment_cluster_index(in.frag_coord.xy, view_z, in.is_orthographic); + let offset_and_counts = clustering::unpack_offset_and_counts(cluster_index); -struct BloomUniforms { - threshold_precomputations: vec4, - viewport: vec4, - aspect: f32, -}; + // Point lights (direct) + for (var i: u32 = offset_and_counts[0]; i < offset_and_counts[0] + offset_and_counts[1]; i = i + 1u) { + let light_id = clustering::get_light_id(i); + var shadow: f32 = 1.0; + if ((in.flags & MESH_FLAGS_SHADOW_RECEIVER_BIT) != 0u + && (view_bindings::point_lights.data[light_id].flags & mesh_view_types::POINT_LIGHT_FLAGS_SHADOWS_ENABLED_BIT) != 0u) { + shadow = shadows::fetch_point_shadow(light_id, in.world_position, in.world_normal); + } + let light_contrib = lighting::point_light(in.world_position.xyz, light_id, roughness, NdotV, in.N, in.V, R, F0, f_ab, diffuse_color); + direct_light += light_contrib * shadow; + } -@group(0) @binding(0) var input_texture: texture_2d; -@group(0) @binding(1) var s: sampler; + // Spot lights (direct) + for (var i: u32 = offset_and_counts[0] + offset_and_counts[1]; i < offset_and_counts[0] + offset_and_counts[1] + offset_and_counts[2]; i = i + 1u) { + let light_id = clustering::get_light_id(i); -@group(0) @binding(2) var uniforms: BloomUniforms; + var shadow: f32 = 1.0; + if ((in.flags & MESH_FLAGS_SHADOW_RECEIVER_BIT) != 0u + && (view_bindings::point_lights.data[light_id].flags & mesh_view_types::POINT_LIGHT_FLAGS_SHADOWS_ENABLED_BIT) != 0u) { + shadow = shadows::fetch_spot_shadow(light_id, in.world_position, in.world_normal); + } + let light_contrib = lighting::spot_light(in.world_position.xyz, light_id, roughness, NdotV, in.N, in.V, R, F0, f_ab, diffuse_color); + direct_light += light_contrib * shadow; + } -#ifdef FIRST_DOWNSAMPLE -// https://catlikecoding.com/unity/tutorials/advanced-rendering/bloom/#3.4 -fn soft_threshold(color: vec3) -> vec3 { - let brightness = max(color.r, max(color.g, color.b)); - var softness = brightness - uniforms.threshold_precomputations.y; - softness = clamp(softness, 0.0, uniforms.threshold_precomputations.z); - softness = softness * softness * uniforms.threshold_precomputations.w; - var contribution = max(brightness - uniforms.threshold_precomputations.x, softness); - contribution /= max(brightness, 0.00001); // Prevent division by 0 - return color * contribution; -} + // directional lights (direct) + let n_directional_lights = view_bindings::lights.n_directional_lights; + for (var i: u32 = 0u; i < n_directional_lights; i = i + 1u) { + var shadow: f32 = 1.0; + if ((in.flags & MESH_FLAGS_SHADOW_RECEIVER_BIT) != 0u + && (view_bindings::lights.directional_lights[i].flags & mesh_view_types::DIRECTIONAL_LIGHT_FLAGS_SHADOWS_ENABLED_BIT) != 0u) { + shadow = shadows::fetch_directional_shadow(i, in.world_position, in.world_normal, view_z); + } + var light_contrib = lighting::directional_light(i, roughness, NdotV, in.N, in.V, R, F0, f_ab, diffuse_color); +#ifdef DIRECTIONAL_LIGHT_SHADOW_MAP_DEBUG_CASCADES + light_contrib = shadows::cascade_debug_visualization(light_contrib, i, view_z); #endif + direct_light += light_contrib * shadow; + } -// luminance coefficients from Rec. 709. -// https://en.wikipedia.org/wiki/Rec._709 -fn tonemapping_luminance(v: vec3) -> f32 { - return dot(v, vec3(0.2126, 0.7152, 0.0722)); -} - -fn rgb_to_srgb_simple(color: vec3) -> vec3 { - return pow(color, vec3(1.0 / 2.2)); -} - -// http://graphicrants.blogspot.com/2013/12/tone-mapping.html -fn karis_average(color: vec3) -> f32 { - // Luminance calculated by gamma-correcting linear RGB to non-linear sRGB using pow(color, 1.0 / 2.2) - // and then calculating luminance based on Rec. 709 color primaries. - let luma = tonemapping_luminance(rgb_to_srgb_simple(color)) / 4.0; - return 1.0 / (1.0 + luma); -} - -// [COD] slide 153 -fn sample_input_13_tap(uv: vec2) -> vec3 { - let a = textureSample(input_texture, s, uv, vec2(-2, 2)).rgb; - let b = textureSample(input_texture, s, uv, vec2(0, 2)).rgb; - let c = textureSample(input_texture, s, uv, vec2(2, 2)).rgb; - let d = textureSample(input_texture, s, uv, vec2(-2, 0)).rgb; - let e = textureSample(input_texture, s, uv).rgb; - let f = textureSample(input_texture, s, uv, vec2(2, 0)).rgb; - let g = textureSample(input_texture, s, uv, vec2(-2, -2)).rgb; - let h = textureSample(input_texture, s, uv, vec2(0, -2)).rgb; - let i = textureSample(input_texture, s, uv, vec2(2, -2)).rgb; - let j = textureSample(input_texture, s, uv, vec2(-1, 1)).rgb; - let k = textureSample(input_texture, s, uv, vec2(1, 1)).rgb; - let l = textureSample(input_texture, s, uv, vec2(-1, -1)).rgb; - let m = textureSample(input_texture, s, uv, vec2(1, -1)).rgb; + // Ambient light (indirect) + var indirect_light = ambient::ambient_light(in.world_position, in.N, in.V, NdotV, diffuse_color, F0, perceptual_roughness, occlusion); -#ifdef FIRST_DOWNSAMPLE - // [COD] slide 168 - // - // The first downsample pass reads from the rendered frame which may exhibit - // 'fireflies' (individual very bright pixels) that should not cause the bloom effect. - // - // The first downsample uses a firefly-reduction method proposed by Brian Karis - // which takes a weighted-average of the samples to limit their luma range to [0, 1]. - // This implementation matches the LearnOpenGL article [PBB]. - var group0 = (a + b + d + e) * (0.125f / 4.0f); - var group1 = (b + c + e + f) * (0.125f / 4.0f); - var group2 = (d + e + g + h) * (0.125f / 4.0f); - var group3 = (e + f + h + i) * (0.125f / 4.0f); - var group4 = (j + k + l + m) * (0.5f / 4.0f); - group0 *= karis_average(group0); - group1 *= karis_average(group1); - group2 *= karis_average(group2); - group3 *= karis_average(group3); - group4 *= karis_average(group4); - return group0 + group1 + group2 + group3 + group4; -#else - var sample = (a + c + g + i) * 0.03125; - sample += (b + d + f + h) * 0.0625; - sample += (e + j + k + l + m) * 0.125; - return sample; + // Environment map light (indirect) +#ifdef ENVIRONMENT_MAP + let environment_light = bevy_pbr::environment_map::environment_map_light(perceptual_roughness, roughness, diffuse_color, NdotV, f_ab, in.N, R, F0); + indirect_light += (environment_light.diffuse * occlusion) + environment_light.specular; #endif -} - -// [COD] slide 162 -fn sample_input_3x3_tent(uv: vec2) -> vec3 { - // Radius. Empirically chosen by and tweaked from the LearnOpenGL article. - let x = 0.004 / uniforms.aspect; - let y = 0.004; - - let a = textureSample(input_texture, s, vec2(uv.x - x, uv.y + y)).rgb; - let b = textureSample(input_texture, s, vec2(uv.x, uv.y + y)).rgb; - let c = textureSample(input_texture, s, vec2(uv.x + x, uv.y + y)).rgb; - let d = textureSample(input_texture, s, vec2(uv.x - x, uv.y)).rgb; - let e = textureSample(input_texture, s, vec2(uv.x, uv.y)).rgb; - let f = textureSample(input_texture, s, vec2(uv.x + x, uv.y)).rgb; + let emissive_light = emissive.rgb * output_color.a; - let g = textureSample(input_texture, s, vec2(uv.x - x, uv.y - y)).rgb; - let h = textureSample(input_texture, s, vec2(uv.x, uv.y - y)).rgb; - let i = textureSample(input_texture, s, vec2(uv.x + x, uv.y - y)).rgb; + // Total light + output_color = vec4( + direct_light + indirect_light + emissive_light, + output_color.a + ); - var sample = e * 0.25; - sample += (b + d + f + h) * 0.125; - sample += (a + c + g + i) * 0.0625; + output_color = clustering::cluster_debug_visualization( + output_color, + view_z, + in.is_orthographic, + offset_and_counts, + cluster_index, + ); - return sample; + return output_color; } +#endif // PREPASS_FRAGMENT -#ifdef FIRST_DOWNSAMPLE -@fragment -fn downsample_first(@location(0) output_uv: vec2) -> @location(0) vec4 { - let sample_uv = uniforms.viewport.xy + output_uv * uniforms.viewport.zw; - var sample = sample_input_13_tap(sample_uv); - // Lower bound of 0.0001 is to avoid propagating multiplying by 0.0 through the - // downscaling and upscaling which would result in black boxes. - // The upper bound is to prevent NaNs. - // with f32::MAX (E+38) Chrome fails with ":value 340282346999999984391321947108527833088.0 cannot be represented as 'f32'" - sample = clamp(sample, vec3(0.0001), vec3(3.40282347E+37)); +#ifndef PREPASS_FRAGMENT +fn apply_fog(fog_params: mesh_view_types::Fog, input_color: vec4, fragment_world_position: vec3, view_world_position: vec3) -> vec4 { + let view_to_world = fragment_world_position.xyz - view_world_position.xyz; -#ifdef USE_THRESHOLD - sample = soft_threshold(sample); -#endif + // `length()` is used here instead of just `view_to_world.z` since that produces more + // high quality results, especially for denser/smaller fogs. we get a "curved" + // fog shape that remains consistent with camera rotation, instead of a "linear" + // fog shape that looks a bit fake + let distance = length(view_to_world); - return vec4(sample, 1.0); -} -#endif + var scattering = vec3(0.0); + if fog_params.directional_light_color.a > 0.0 { + let view_to_world_normalized = view_to_world / distance; + let n_directional_lights = view_bindings::lights.n_directional_lights; + for (var i: u32 = 0u; i < n_directional_lights; i = i + 1u) { + let light = view_bindings::lights.directional_lights[i]; + scattering += pow( + max( + dot(view_to_world_normalized, light.direction_to_light), + 0.0 + ), + fog_params.directional_light_exponent + ) * light.color.rgb; + } + } -@fragment -fn downsample(@location(0) uv: vec2) -> @location(0) vec4 { - return vec4(sample_input_13_tap(uv), 1.0); + if fog_params.mode == mesh_view_types::FOG_MODE_LINEAR { + return fog::linear_fog(fog_params, input_color, distance, scattering); + } else if fog_params.mode == mesh_view_types::FOG_MODE_EXPONENTIAL { + return fog::exponential_fog(fog_params, input_color, distance, scattering); + } else if fog_params.mode == mesh_view_types::FOG_MODE_EXPONENTIAL_SQUARED { + return fog::exponential_squared_fog(fog_params, input_color, distance, scattering); + } else if fog_params.mode == mesh_view_types::FOG_MODE_ATMOSPHERIC { + return fog::atmospheric_fog(fog_params, input_color, distance, scattering); + } else { + return input_color; + } } +#endif // PREPASS_FRAGMENT -@fragment -fn upsample(@location(0) uv: vec2) -> @location(0) vec4 { - return vec4(sample_input_3x3_tent(uv), 1.0); +#ifdef PREMULTIPLY_ALPHA +fn premultiply_alpha(standard_material_flags: u32, color: vec4) -> vec4 { +// `Blend`, `Premultiplied` and `Alpha` all share the same `BlendState`. Depending +// on the alpha mode, we premultiply the color channels by the alpha channel value, +// (and also optionally replace the alpha value with 0.0) so that the result produces +// the desired blend mode when sent to the blending operation. +#ifdef BLEND_PREMULTIPLIED_ALPHA + // For `BlendState::PREMULTIPLIED_ALPHA_BLENDING` the blend function is: + // + // result = 1 * src_color + (1 - src_alpha) * dst_color + let alpha_mode = standard_material_flags & pbr_types::STANDARD_MATERIAL_FLAGS_ALPHA_MODE_RESERVED_BITS; + if alpha_mode == pbr_types::STANDARD_MATERIAL_FLAGS_ALPHA_MODE_ADD { + // Here, we premultiply `src_color` by `src_alpha`, and replace `src_alpha` with 0.0: + // + // src_color *= src_alpha + // src_alpha = 0.0 + // + // We end up with: + // + // result = 1 * (src_alpha * src_color) + (1 - 0) * dst_color + // result = src_alpha * src_color + 1 * dst_color + // + // Which is the blend operation for additive blending + return vec4(color.rgb * color.a, 0.0); + } else { + // Here, we don't do anything, so that we get premultiplied alpha blending. (As expected) + return color.rgba; + } +#endif +// `Multiply` uses its own `BlendState`, but we still need to premultiply here in the +// shader so that we get correct results as we tweak the alpha channel +#ifdef BLEND_MULTIPLY + // The blend function is: + // + // result = dst_color * src_color + (1 - src_alpha) * dst_color + // + // We premultiply `src_color` by `src_alpha`: + // + // src_color *= src_alpha + // + // We end up with: + // + // result = dst_color * (src_color * src_alpha) + (1 - src_alpha) * dst_color + // result = src_alpha * (src_color * dst_color) + (1 - src_alpha) * dst_color + // + // Which is the blend operation for multiplicative blending with arbitrary mixing + // controlled by the source alpha channel + return vec4(color.rgb * color.a, color.a); +#endif } +#endif +#import bevy_pbr::prepass_bindings +#import bevy_pbr::mesh_functions +#import bevy_pbr::skinning +#import bevy_pbr::morph +#import bevy_pbr::mesh_bindings mesh +#import bevy_render::instance_index get_instance_index -``` +// Most of these attributes are not used in the default prepass fragment shader, but they are still needed so we can +// pass them to custom prepass shaders like pbr_prepass.wgsl. +struct Vertex { + @builtin(instance_index) instance_index: u32, + @location(0) position: vec3, -### crates/bevy_core_pipeline/src/fxaa/fxaa +#ifdef VERTEX_UVS + @location(1) uv: vec2, +#endif // VERTEX_UVS -```rust -// NVIDIA FXAA 3.11 -// Original source code by TIMOTHY LOTTES -// https://gist.github.com/kosua20/0c506b81b3812ac900048059d2383126 -// -// Cleaned version - https://github.com/kosua20/Rendu/blob/master/resources/common/shaders/screens/fxaa.frag -// -// Tweaks by mrDIMAS - https://github.com/FyroxEngine/Fyrox/blob/master/src/renderer/shaders/fxaa_fs.glsl +#ifdef NORMAL_PREPASS + @location(2) normal: vec3, +#ifdef VERTEX_TANGENTS + @location(3) tangent: vec4, +#endif // VERTEX_TANGENTS +#endif // NORMAL_PREPASS -#import bevy_core_pipeline::fullscreen_vertex_shader::FullscreenVertexOutput +#ifdef SKINNED + @location(4) joint_indices: vec4, + @location(5) joint_weights: vec4, +#endif // SKINNED -@group(0) @binding(0) var screenTexture: texture_2d; -@group(0) @binding(1) var samp: sampler; +#ifdef MORPH_TARGETS + @builtin(vertex_index) index: u32, +#endif // MORPH_TARGETS +} -// Trims the algorithm from processing darks. -#ifdef EDGE_THRESH_MIN_LOW - const EDGE_THRESHOLD_MIN: f32 = 0.0833; -#endif +struct VertexOutput { + @builtin(position) clip_position: vec4, -#ifdef EDGE_THRESH_MIN_MEDIUM - const EDGE_THRESHOLD_MIN: f32 = 0.0625; -#endif +#ifdef VERTEX_UVS + @location(0) uv: vec2, +#endif // VERTEX_UVS -#ifdef EDGE_THRESH_MIN_HIGH - const EDGE_THRESHOLD_MIN: f32 = 0.0312; -#endif +#ifdef NORMAL_PREPASS + @location(1) world_normal: vec3, +#ifdef VERTEX_TANGENTS + @location(2) world_tangent: vec4, +#endif // VERTEX_TANGENTS +#endif // NORMAL_PREPASS -#ifdef EDGE_THRESH_MIN_ULTRA - const EDGE_THRESHOLD_MIN: f32 = 0.0156; -#endif +#ifdef MOTION_VECTOR_PREPASS + @location(3) world_position: vec4, + @location(4) previous_world_position: vec4, +#endif // MOTION_VECTOR_PREPASS -#ifdef EDGE_THRESH_MIN_EXTREME - const EDGE_THRESHOLD_MIN: f32 = 0.0078; -#endif +#ifdef DEPTH_CLAMP_ORTHO + @location(5) clip_position_unclamped: vec4, +#endif // DEPTH_CLAMP_ORTHO +} -// The minimum amount of local contrast required to apply algorithm. -#ifdef EDGE_THRESH_LOW - const EDGE_THRESHOLD_MAX: f32 = 0.250; +#ifdef MORPH_TARGETS +fn morph_vertex(vertex_in: Vertex) -> Vertex { + var vertex = vertex_in; + let weight_count = bevy_pbr::morph::layer_count(); + for (var i: u32 = 0u; i < weight_count; i ++) { + let weight = bevy_pbr::morph::weight_at(i); + if weight == 0.0 { + continue; + } + vertex.position += weight * bevy_pbr::morph::morph(vertex.index, bevy_pbr::morph::position_offset, i); +#ifdef VERTEX_NORMALS + vertex.normal += weight * bevy_pbr::morph::morph(vertex.index, bevy_pbr::morph::normal_offset, i); #endif - -#ifdef EDGE_THRESH_MEDIUM - const EDGE_THRESHOLD_MAX: f32 = 0.166; +#ifdef VERTEX_TANGENTS + vertex.tangent += vec4(weight * bevy_pbr::morph::morph(vertex.index, bevy_pbr::morph::tangent_offset, i), 0.0); #endif - -#ifdef EDGE_THRESH_HIGH - const EDGE_THRESHOLD_MAX: f32 = 0.125; + } + return vertex; +} #endif -#ifdef EDGE_THRESH_ULTRA - const EDGE_THRESHOLD_MAX: f32 = 0.063; -#endif +@vertex +fn vertex(vertex_no_morph: Vertex) -> VertexOutput { + var out: VertexOutput; -#ifdef EDGE_THRESH_EXTREME - const EDGE_THRESHOLD_MAX: f32 = 0.031; +#ifdef MORPH_TARGETS + var vertex = morph_vertex(vertex_no_morph); +#else + var vertex = vertex_no_morph; #endif -const ITERATIONS: i32 = 12; //default is 12 -const SUBPIXEL_QUALITY: f32 = 0.75; -// #define QUALITY(q) ((q) < 5 ? 1.0 : ((q) > 5 ? ((q) < 10 ? 2.0 : ((q) < 11 ? 4.0 : 8.0)) : 1.5)) -fn QUALITY(q: i32) -> f32 { - switch (q) { - //case 0, 1, 2, 3, 4: { return 1.0; } - default: { return 1.0; } - case 5: { return 1.5; } - case 6, 7, 8, 9: { return 2.0; } - case 10: { return 4.0; } - case 11: { return 8.0; } - } -} +#ifdef SKINNED + var model = bevy_pbr::skinning::skin_model(vertex.joint_indices, vertex.joint_weights); +#else // SKINNED + // Use vertex_no_morph.instance_index instead of vertex.instance_index to work around a wgpu dx12 bug. + // See https://github.com/gfx-rs/naga/issues/2416 + var model = bevy_pbr::mesh_functions::get_model_matrix(vertex_no_morph.instance_index); +#endif // SKINNED -fn rgb2luma(rgb: vec3) -> f32 { - return sqrt(dot(rgb, vec3(0.299, 0.587, 0.114))); -} + out.clip_position = bevy_pbr::mesh_functions::mesh_position_local_to_clip(model, vec4(vertex.position, 1.0)); +#ifdef DEPTH_CLAMP_ORTHO + out.clip_position_unclamped = out.clip_position; + out.clip_position.z = min(out.clip_position.z, 1.0); +#endif // DEPTH_CLAMP_ORTHO -// Performs FXAA post-process anti-aliasing as described in the Nvidia FXAA white paper and the associated shader code. -@fragment -fn fragment(in: FullscreenVertexOutput) -> @location(0) vec4 { - let resolution = vec2(textureDimensions(screenTexture)); - let inverseScreenSize = 1.0 / resolution.xy; - let texCoord = in.position.xy * inverseScreenSize; +#ifdef VERTEX_UVS + out.uv = vertex.uv; +#endif // VERTEX_UVS - let centerSample = textureSampleLevel(screenTexture, samp, texCoord, 0.0); - let colorCenter = centerSample.rgb; +#ifdef NORMAL_PREPASS +#ifdef SKINNED + out.world_normal = bevy_pbr::skinning::skin_normals(model, vertex.normal); +#else // SKINNED + out.world_normal = bevy_pbr::mesh_functions::mesh_normal_local_to_world( + vertex.normal, + // Use vertex_no_morph.instance_index instead of vertex.instance_index to work around a wgpu dx12 bug. + // See https://github.com/gfx-rs/naga/issues/2416 + get_instance_index(vertex_no_morph.instance_index) + ); +#endif // SKINNED - // Luma at the current fragment - let lumaCenter = rgb2luma(colorCenter); +#ifdef VERTEX_TANGENTS + out.world_tangent = bevy_pbr::mesh_functions::mesh_tangent_local_to_world( + model, + vertex.tangent, + // Use vertex_no_morph.instance_index instead of vertex.instance_index to work around a wgpu dx12 bug. + // See https://github.com/gfx-rs/naga/issues/2416 + get_instance_index(vertex_no_morph.instance_index) + ); +#endif // VERTEX_TANGENTS +#endif // NORMAL_PREPASS - // Luma at the four direct neighbors of the current fragment. - let lumaDown = rgb2luma(textureSampleLevel(screenTexture, samp, texCoord, 0.0, vec2(0, -1)).rgb); - let lumaUp = rgb2luma(textureSampleLevel(screenTexture, samp, texCoord, 0.0, vec2(0, 1)).rgb); - let lumaLeft = rgb2luma(textureSampleLevel(screenTexture, samp, texCoord, 0.0, vec2(-1, 0)).rgb); - let lumaRight = rgb2luma(textureSampleLevel(screenTexture, samp, texCoord, 0.0, vec2(1, 0)).rgb); +#ifdef MOTION_VECTOR_PREPASS + out.world_position = bevy_pbr::mesh_functions::mesh_position_local_to_world(model, vec4(vertex.position, 1.0)); + // Use vertex_no_morph.instance_index instead of vertex.instance_index to work around a wgpu dx12 bug. + // See https://github.com/gfx-rs/naga/issues/2416 + out.previous_world_position = bevy_pbr::mesh_functions::mesh_position_local_to_world( + bevy_pbr::mesh_functions::get_previous_model_matrix(vertex_no_morph.instance_index), + vec4(vertex.position, 1.0) + ); +#endif // MOTION_VECTOR_PREPASS - // Find the maximum and minimum luma around the current fragment. - let lumaMin = min(lumaCenter, min(min(lumaDown, lumaUp), min(lumaLeft, lumaRight))); - let lumaMax = max(lumaCenter, max(max(lumaDown, lumaUp), max(lumaLeft, lumaRight))); + return out; +} - // Compute the delta. - let lumaRange = lumaMax - lumaMin; +#ifdef PREPASS_FRAGMENT +struct FragmentInput { +#ifdef VERTEX_UVS + @location(0) uv: vec2, +#endif // VERTEX_UVS - // If the luma variation is lower that a threshold (or if we are in a really dark area), we are not on an edge, don't perform any AA. - if (lumaRange < max(EDGE_THRESHOLD_MIN, lumaMax * EDGE_THRESHOLD_MAX)) { - return centerSample; - } +#ifdef NORMAL_PREPASS + @location(1) world_normal: vec3, +#endif // NORMAL_PREPASS - // Query the 4 remaining corners lumas. - let lumaDownLeft = rgb2luma(textureSampleLevel(screenTexture, samp, texCoord, 0.0, vec2(-1, -1)).rgb); - let lumaUpRight = rgb2luma(textureSampleLevel(screenTexture, samp, texCoord, 0.0, vec2(1, 1)).rgb); - let lumaUpLeft = rgb2luma(textureSampleLevel(screenTexture, samp, texCoord, 0.0, vec2(-1, 1)).rgb); - let lumaDownRight = rgb2luma(textureSampleLevel(screenTexture, samp, texCoord, 0.0, vec2(1, -1)).rgb); +#ifdef MOTION_VECTOR_PREPASS + @location(3) world_position: vec4, + @location(4) previous_world_position: vec4, +#endif // MOTION_VECTOR_PREPASS - // Combine the four edges lumas (using intermediary variables for future computations with the same values). - let lumaDownUp = lumaDown + lumaUp; - let lumaLeftRight = lumaLeft + lumaRight; +#ifdef DEPTH_CLAMP_ORTHO + @location(5) clip_position_unclamped: vec4, +#endif // DEPTH_CLAMP_ORTHO +} - // Same for corners - let lumaLeftCorners = lumaDownLeft + lumaUpLeft; - let lumaDownCorners = lumaDownLeft + lumaDownRight; +struct FragmentOutput { +#ifdef NORMAL_PREPASS + @location(0) normal: vec4, +#endif // NORMAL_PREPASS + +#ifdef MOTION_VECTOR_PREPASS + @location(1) motion_vector: vec2, +#endif // MOTION_VECTOR_PREPASS + +#ifdef DEPTH_CLAMP_ORTHO + @builtin(frag_depth) frag_depth: f32, +#endif // DEPTH_CLAMP_ORTHO +} + +@fragment +fn fragment(in: FragmentInput) -> FragmentOutput { + var out: FragmentOutput; + +#ifdef NORMAL_PREPASS + out.normal = vec4(in.world_normal * 0.5 + vec3(0.5), 1.0); +#endif + +#ifdef DEPTH_CLAMP_ORTHO + out.frag_depth = in.clip_position_unclamped.z; +#endif // DEPTH_CLAMP_ORTHO + +#ifdef MOTION_VECTOR_PREPASS + let clip_position_t = bevy_pbr::prepass_bindings::view.unjittered_view_proj * in.world_position; + let clip_position = clip_position_t.xy / clip_position_t.w; + let previous_clip_position_t = bevy_pbr::prepass_bindings::previous_view_proj * in.previous_world_position; + let previous_clip_position = previous_clip_position_t.xy / previous_clip_position_t.w; + // These motion vectors are used as offsets to UV positions and are stored + // in the range -1,1 to allow offsetting from the one corner to the + // diagonally-opposite corner in UV coordinates, in either direction. + // A difference between diagonally-opposite corners of clip space is in the + // range -2,2, so this needs to be scaled by 0.5. And the V direction goes + // down where clip space y goes up, so y needs to be flipped. + out.motion_vector = (clip_position - previous_clip_position) * vec2(0.5, -0.5); +#endif // MOTION_VECTOR_PREPASS + + return out; +} +#endif // PREPASS_FRAGMENT +#define_import_path bevy_pbr::prepass_bindings +#import bevy_render::view View +#import bevy_render::globals Globals +#import bevy_pbr::mesh_types + +@group(0) @binding(0) var view: View; +@group(0) @binding(1) var globals: Globals; + +#ifdef MOTION_VECTOR_PREPASS +@group(0) @binding(2) var previous_view_proj: mat4x4; +#endif // MOTION_VECTOR_PREPASS + +// Material bindings will be in @group(1) +#import bevy_pbr::mesh_bindings mesh +#import bevy_core_pipeline::fullscreen_vertex_shader FullscreenVertexOutput + +@group(0) @binding(0) var in_texture: texture_2d; +@group(0) @binding(1) var in_sampler: sampler; + +@fragment +fn fs_main(in: FullscreenVertexOutput) -> @location(0) vec4 { + return texture_sample(in_texture, in_sampler, in.uv); +} +// Copyright (c) 2022 Advanced Micro Devices, Inc. All rights reserved. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +#import bevy_core_pipeline::fullscreen_vertex_shader FullscreenVertexOutput + +struct CASUniforms { + sharpness: f32, +}; + +@group(0) @binding(0) var screen_texture: texture_2d; +@group(0) @binding(1) var samp: sampler; +@group(0) @binding(2) var uniforms: CASUniforms; + +// This is set at the limit of providing unnatural results for sharpening. +const FSR_RCAS_LIMIT = 0.1875; +// -4.0 instead of -1.0 to avoid issues with MSAA. +const peak_c = vec2(10.0, -40.0); + +// Robust Contrast Adaptive Sharpening (RCAS) +// Based on the following implementation: +// https://github.com/GPUOpen-Effects/FidelityFX-FSR2/blob/ea97a113b0f9cadf519fbcff315cc539915a3acd/src/ffx-fsr2-api/shaders/ffx_fsr1.h#L672 +// RCAS is based on the following logic. +// RCAS uses a 5 tap filter in a cross pattern (same as CAS), +// W b +// W 1 W for taps d e f +// W h +// Where 'W' is the negative lobe weight. +// output = (W*(b+d+f+h)+e)/(4*W+1) +// RCAS solves for 'W' by seeing where the signal might clip out of the {0 to 1} input range, +// 0 == (W*(b+d+f+h)+e)/(4*W+1) -> W = -e/(b+d+f+h) +// 1 == (W*(b+d+f+h)+e)/(4*W+1) -> W = (1-e)/(b+d+f+h-4) +// Then chooses the 'W' which results in no clipping, limits 'W', and multiplies by the 'sharp' amount. +// This solution above has issues with MSAA input as the steps along the gradient cause edge detection issues. +// So RCAS uses 4x the maximum and 4x the minimum (depending on equation)in place of the individual taps. +// As well as switching from 'e' to either the minimum or maximum (depending on side), to help in energy conservation. +// This stabilizes RCAS. +// RCAS does a simple highpass which is normalized against the local contrast then shaped, +// 0.25 +// 0.25 -1 0.25 +// 0.25 +// This is used as a noise detection filter, to reduce the effect of RCAS on grain, and focus on real edges. +// The CAS node runs after tonemapping, so the input will be in the range of 0 to 1. +@fragment +fn fragment(in: FullscreenVertexOutput) -> @location(0) vec4 { + // Algorithm uses minimal 3x3 pixel neighborhood. + // b + // d e f + // h + let b = texture_sample(screen_texture, samp, in.uv, vec2(0, -1)).rgb; + let d = texture_sample(screen_texture, samp, in.uv, vec2(-1, 0)).rgb; + // We need the alpha value of the pixel we're working on for the output + let e = texture_sample(screen_texture, samp, in.uv).rgbw; + let f = texture_sample(screen_texture, samp, in.uv, vec2(1, 0)).rgb; + let h = texture_sample(screen_texture, samp, in.uv, vec2(0, 1)).rgb; + // Min and max of ring. + let mn4 = min(min(b, d), min(f, h)); + let mx4 = max(max(b, d), max(f, h)); + // Limiters + // 4.0 to avoid issues with MSAA. + let hit_min = mn4 / (4.0 * mx4); + let hit_max = (peak_c.x - mx4) / (peak_c.y + 4.0 * mn4); + let lobe_rgb = max(-hit_min, hit_max); + var lobe = max(-FSR_RCAS_LIMIT, min(0.0, max(lobe_rgb.r, max(lobe_rgb.g, lobe_rgb.b)))) * uniforms.sharpness; +#ifdef RCAS_DENOISE + // Luma times 2. + let b_l = b.b * 0.5 + (b.r * 0.5 + b.g); + let d_l = d.b * 0.5 + (d.r * 0.5 + d.g); + let e_l = e.b * 0.5 + (e.r * 0.5 + e.g); + let f_l = f.b * 0.5 + (f.r * 0.5 + f.g); + let h_l = h.b * 0.5 + (h.r * 0.5 + h.g); + // Noise detection. + var noise = 0.25 * b_l + 0.25 * d_l + 0.25 * f_l + 0.25 * h_l - e_l;; + noise = saturate(abs(noise) / (max(max(b_l, d_l), max(f_l, h_l)) - min(min(b_l, d_l), min(f_l, h_l)))); + noise = 1.0 - 0.5 * noise; + // Apply noise removal. + lobe *= noise; +#endif + return vec4((lobe * b + lobe * d + lobe * f + lobe * h + e.rgb) / (4.0 * lobe + 1.0), e.w); +} +#import bevy_render::view View + +@group(0) @binding(0) var skybox: texture_cube; +@group(0) @binding(1) var skybox_sampler: sampler; +@group(0) @binding(2) var view: View; + +struct VertexOutput { + @builtin(position) clip_position: vec4, + @location(0) world_position: vec3, +}; + +// 3 | 2. +// 2 | : `. +// 1 | x-----x. +// 0 | | s | `. +// -1 | 0-----x.....1 +// +--------------- +// -1 0 1 2 3 +// +// The axes are clip-space x and y. The region marked s is the visible region. +// The digits in the corners of the right-angled triangle are the vertex +// indices. +@vertex +fn skybox_vertex(@builtin(vertex_index) vertex_index: u32) -> VertexOutput { + // See the explanation above for how this works. + let clip_position = vec4( + f32(vertex_index & 1u), + f32((vertex_index >> 1u) & 1u), + 0.25, + 0.5 + ) * 4.0 - vec4(1.0); + // Use the position on the near clipping plane to avoid -inf world position + // because the far plane of an infinite reverse projection is at infinity. + // NOTE: The clip position has a w component equal to 1.0 so we don't need + // to apply a perspective divide to it before inverse-projecting it. + let world_position_homogeneous = view.inverse_view_proj * vec4(clip_position.xy, 1.0, 1.0); + let world_position = world_position_homogeneous.xyz / world_position_homogeneous.w; + + return VertexOutput(clip_position, world_position); +} + +@fragment +fn skybox_fragment(in: VertexOutput) -> @location(0) vec4 { + // The skybox cubemap is sampled along the direction from the camera world + // position, to the fragment world position on the near clipping plane + let ray_direction = in.world_position - view.world_position; + // cube maps are left-handed so we negate the z coordinate + return texture_sample(skybox, skybox_sampler, ray_direction * vec3(1.0, 1.0, -1.0)); +} +// Bloom works by creating an intermediate texture with a bunch of mip levels, each half the size of the previous. +// You then downsample each mip (starting with the original texture) to the lower resolution mip under it, going in order. +// You then upsample each mip (starting from the smallest mip) and blend with the higher resolution mip above it (ending on the original texture). +// +// References: +// * [COD] - Next Generation Post Processing in Call of Duty - http://www.iryoku.com/next-generation-post-processing-in-call-of-duty-advanced-warfare +// * [PBB] - Physically Based Bloom - https://learnopengl.com/Guest-Articles/2022/Phys.-Based-Bloom + +#import bevy_core_pipeline::fullscreen_vertex_shader + +struct BloomUniforms { + threshold_precomputations: vec4, + viewport: vec4, + aspect: f32, +}; + +@group(0) @binding(0) var input_texture: texture_2d; +@group(0) @binding(1) var s: sampler; + +@group(0) @binding(2) var uniforms: BloomUniforms; + +#ifdef FIRST_DOWNSAMPLE +// https://catlikecoding.com/unity/tutorials/advanced-rendering/bloom/#3.4 +fn soft_threshold(color: vec3) -> vec3 { + let brightness = max(color.r, max(color.g, color.b)); + var softness = brightness - uniforms.threshold_precomputations.y; + softness = clamp(softness, 0.0, uniforms.threshold_precomputations.z); + softness = softness * softness * uniforms.threshold_precomputations.w; + var contribution = max(brightness - uniforms.threshold_precomputations.x, softness); + contribution /= max(brightness, 0.00001); // Prevent division by 0 + return color * contribution; +} +#endif + +// luminance coefficients from Rec. 709. +// https://en.wikipedia.org/wiki/Rec._709 +fn tonemapping_luminance(v: vec3) -> f32 { + return dot(v, vec3(0.2126, 0.7152, 0.0722)); +} + +fn rgb_to_srgb_simple(color: vec3) -> vec3 { + return pow(color, vec3(1.0 / 2.2)); +} + +// http://graphicrants.blogspot.com/2013/12/tone-mapping.html +fn karis_average(color: vec3) -> f32 { + // Luminance calculated by gamma-correcting linear RGB to non-linear sRGB using pow(color, 1.0 / 2.2) + // and then calculating luminance based on Rec. 709 color primaries. + let luma = tonemapping_luminance(rgb_to_srgb_simple(color)) / 4.0; + return 1.0 / (1.0 + luma); +} + +// [COD] slide 153 +fn sample_input_13_tap(uv: vec2) -> vec3 { + let a = texture_sample(input_texture, s, uv, vec2(-2, 2)).rgb; + let b = texture_sample(input_texture, s, uv, vec2(0, 2)).rgb; + let c = texture_sample(input_texture, s, uv, vec2(2, 2)).rgb; + let d = texture_sample(input_texture, s, uv, vec2(-2, 0)).rgb; + let e = texture_sample(input_texture, s, uv).rgb; + let f = texture_sample(input_texture, s, uv, vec2(2, 0)).rgb; + let g = texture_sample(input_texture, s, uv, vec2(-2, -2)).rgb; + let h = texture_sample(input_texture, s, uv, vec2(0, -2)).rgb; + let i = texture_sample(input_texture, s, uv, vec2(2, -2)).rgb; + let j = texture_sample(input_texture, s, uv, vec2(-1, 1)).rgb; + let k = texture_sample(input_texture, s, uv, vec2(1, 1)).rgb; + let l = texture_sample(input_texture, s, uv, vec2(-1, -1)).rgb; + let m = texture_sample(input_texture, s, uv, vec2(1, -1)).rgb; + +#ifdef FIRST_DOWNSAMPLE + // [COD] slide 168 + // + // The first downsample pass reads from the rendered frame which may exhibit + // 'fireflies' (individual very bright pixels) that should not cause the bloom effect. + // + // The first downsample uses a firefly-reduction method proposed by Brian Karis + // which takes a weighted-average of the samples to limit their luma range to [0, 1]. + // This implementation matches the LearnOpenGL article [PBB]. + var group0 = (a + b + d + e) * (0.125f / 4.0f); + var group1 = (b + c + e + f) * (0.125f / 4.0f); + var group2 = (d + e + g + h) * (0.125f / 4.0f); + var group3 = (e + f + h + i) * (0.125f / 4.0f); + var group4 = (j + k + l + m) * (0.5f / 4.0f); + group0 *= karis_average(group0); + group1 *= karis_average(group1); + group2 *= karis_average(group2); + group3 *= karis_average(group3); + group4 *= karis_average(group4); + return group0 + group1 + group2 + group3 + group4; +#else + var sample = (a + c + g + i) * 0.03125; + sample += (b + d + f + h) * 0.0625; + sample += (e + j + k + l + m) * 0.125; + return sample; +#endif +} + +// [COD] slide 162 +fn sample_input_3x3_tent(uv: vec2) -> vec3 { + // Radius. Empirically chosen by and tweaked from the LearnOpenGL article. + let x = 0.004 / uniforms.aspect; + let y = 0.004; + + let a = texture_sample(input_texture, s, vec2(uv.x - x, uv.y + y)).rgb; + let b = texture_sample(input_texture, s, vec2(uv.x, uv.y + y)).rgb; + let c = texture_sample(input_texture, s, vec2(uv.x + x, uv.y + y)).rgb; + + let d = texture_sample(input_texture, s, vec2(uv.x - x, uv.y)).rgb; + let e = texture_sample(input_texture, s, vec2(uv.x, uv.y)).rgb; + let f = texture_sample(input_texture, s, vec2(uv.x + x, uv.y)).rgb; + + let g = texture_sample(input_texture, s, vec2(uv.x - x, uv.y - y)).rgb; + let h = texture_sample(input_texture, s, vec2(uv.x, uv.y - y)).rgb; + let i = texture_sample(input_texture, s, vec2(uv.x + x, uv.y - y)).rgb; + + var sample = e * 0.25; + sample += (b + d + f + h) * 0.125; + sample += (a + c + g + i) * 0.0625; + + return sample; +} + +#ifdef FIRST_DOWNSAMPLE +@fragment +fn downsample_first(@location(0) output_uv: vec2) -> @location(0) vec4 { + let sample_uv = uniforms.viewport.xy + output_uv * uniforms.viewport.zw; + var sample = sample_input_13_tap(sample_uv); + // Lower bound of 0.0001 is to avoid propagating multiplying by 0.0 through the + // downscaling and upscaling which would result in black boxes. + // The upper bound is to prevent NaNs. + // with f32::MAX (E+38) Chrome fails with ":value 340282346999999984391321947108527833088.0 cannot be represented as 'f32'" + sample = clamp(sample, vec3(0.0001), vec3(3.40282347E+37)); + +#ifdef USE_THRESHOLD + sample = soft_threshold(sample); +#endif + + return vec4(sample, 1.0); +} +#endif + +@fragment +fn downsample(@location(0) uv: vec2) -> @location(0) vec4 { + return vec4(sample_input_13_tap(uv), 1.0); +} + +@fragment +fn upsample(@location(0) uv: vec2) -> @location(0) vec4 { + return vec4(sample_input_3x3_tent(uv), 1.0); +} +// NVIDIA FXAA 3.11 +// Original source code by TIMOTHY LOTTES +// https://gist.github.com/kosua20/0c506b81b3812ac900048059d2383126 +// +// Cleaned version - https://github.com/kosua20/Rendu/blob/master/resources/common/shaders/screens/fxaa.frag +// +// Tweaks by mrDIMAS - https://github.com/FyroxEngine/Fyrox/blob/master/src/renderer/shaders/fxaa_fs.glsl + +#import bevy_core_pipeline::fullscreen_vertex_shader FullscreenVertexOutput + +@group(0) @binding(0) var screen_texture: texture_2d; +@group(0) @binding(1) var samp: sampler; + +// Trims the algorithm from processing darks. +#ifdef EDGE_THRESH_MIN_LOW + const EDGE_THRESHOLD_MIN: f32 = 0.0833; +#endif + +#ifdef EDGE_THRESH_MIN_MEDIUM + const EDGE_THRESHOLD_MIN: f32 = 0.0625; +#endif + +#ifdef EDGE_THRESH_MIN_HIGH + const EDGE_THRESHOLD_MIN: f32 = 0.0312; +#endif + +#ifdef EDGE_THRESH_MIN_ULTRA + const EDGE_THRESHOLD_MIN: f32 = 0.0156; +#endif + +#ifdef EDGE_THRESH_MIN_EXTREME + const EDGE_THRESHOLD_MIN: f32 = 0.0078; +#endif + +// The minimum amount of local contrast required to apply algorithm. +#ifdef EDGE_THRESH_LOW + const EDGE_THRESHOLD_MAX: f32 = 0.250; +#endif + +#ifdef EDGE_THRESH_MEDIUM + const EDGE_THRESHOLD_MAX: f32 = 0.166; +#endif + +#ifdef EDGE_THRESH_HIGH + const EDGE_THRESHOLD_MAX: f32 = 0.125; +#endif + +#ifdef EDGE_THRESH_ULTRA + const EDGE_THRESHOLD_MAX: f32 = 0.063; +#endif + +#ifdef EDGE_THRESH_EXTREME + const EDGE_THRESHOLD_MAX: f32 = 0.031; +#endif + +const ITERATIONS: i32 = 12; //default is 12 +const SUBPIXEL_QUALITY: f32 = 0.75; +// #define QUALITY(q) ((q) < 5 ? 1.0 : ((q) > 5 ? ((q) < 10 ? 2.0 : ((q) < 11 ? 4.0 : 8.0)) : 1.5)) +fn QUALITY(q: i32) -> f32 { + switch (q) { + //case 0, 1, 2, 3, 4: { return 1.0; } + default: { return 1.0; } + case 5: { return 1.5; } + case 6, 7, 8, 9: { return 2.0; } + case 10: { return 4.0; } + case 11: { return 8.0; } + } +} + +fn rgb2luma(rgb: vec3) -> f32 { + return sqrt(dot(rgb, vec3(0.299, 0.587, 0.114))); +} + +// Performs FXAA post-process anti-aliasing as described in the Nvidia FXAA white paper and the associated shader code. +@fragment +fn fragment(in: FullscreenVertexOutput) -> @location(0) vec4 { + let resolution = vec2(texture_dimensions(screen_texture)); + let inverse_screen_size = 1.0 / resolution.xy; + let tex_coord = in.position.xy * inverse_screen_size; + + let center_sample = texture_sample_level(screen_texture, samp, tex_coord, 0.0); + let color_center = center_sample.rgb; + + // Luma at the current fragment + let luma_center = rgb2luma(color_center); + + // Luma at the four direct neighbors of the current fragment. + let luma_down = rgb2luma(texture_sample_level(screen_texture, samp, tex_coord, 0.0, vec2(0, -1)).rgb); + let luma_up = rgb2luma(texture_sample_level(screen_texture, samp, tex_coord, 0.0, vec2(0, 1)).rgb); + let luma_left = rgb2luma(texture_sample_level(screen_texture, samp, tex_coord, 0.0, vec2(-1, 0)).rgb); + let luma_right = rgb2luma(texture_sample_level(screen_texture, samp, tex_coord, 0.0, vec2(1, 0)).rgb); + + // Find the maximum and minimum luma around the current fragment. + let luma_min = min(luma_center, min(min(luma_down, luma_up), min(luma_left, luma_right))); + let luma_max = max(luma_center, max(max(luma_down, luma_up), max(luma_left, luma_right))); + + // Compute the delta. + let luma_range = luma_max - luma_min; + + // If the luma variation is lower that a threshold (or if we are in a really dark area), we are not on an edge, don't perform any AA. + if (luma_range < max(EDGE_THRESHOLD_MIN, luma_max * EDGE_THRESHOLD_MAX)) { + return center_sample; + } + + // Query the 4 remaining corners lumas. + let luma_down_left = rgb2luma(texture_sample_level(screen_texture, samp, tex_coord, 0.0, vec2(-1, -1)).rgb); + let luma_up_right = rgb2luma(texture_sample_level(screen_texture, samp, tex_coord, 0.0, vec2(1, 1)).rgb); + let luma_up_left = rgb2luma(texture_sample_level(screen_texture, samp, tex_coord, 0.0, vec2(-1, 1)).rgb); + let luma_down_right = rgb2luma(texture_sample_level(screen_texture, samp, tex_coord, 0.0, vec2(1, -1)).rgb); + + // Combine the four edges lumas (using intermediary variables for future computations with the same values). + let luma_down_up = luma_down + luma_up; + let luma_leftRight = luma_left + luma_right; + + // Same for corners + let luma_left_corners = luma_down_left + luma_up_left; + let luma_down_corners = luma_down_left + luma_down_right; + let luma_right_corners = luma_down_right + luma_up_right; + let luma_up_corners = luma_up_right + luma_up_left; + + // Compute an estimation of the gradient along the horizontal and vertical axis. + let edge_horizontal = abs(-2.0 * luma_left + luma_left_corners) + + abs(-2.0 * luma_center + luma_down_up) * 2.0 + + abs(-2.0 * luma_right + luma_rightCorners); + + let edge_vertical = abs(-2.0 * luma_up + luma_up_corners) + + abs(-2.0 * luma_center + luma_left_right) * 2.0 + + abs(-2.0 * luma_down + luma_downCorners); + + // Is the local edge horizontal or vertical ? + let is_horizontal = (edge_horizontal >= edge_vertical); + + // Choose the step size (one pixel) accordingly. + var step_length = select(inverse_screen_size.x, inverse_screen_size.y, is_horizontal); + + // Select the two neighboring texels lumas in the opposite direction to the local edge. + var luma1 = select(luma_left, luma_down, is_horizontal); + var luma2 = select(luma_right, luma_up, is_horizontal); + + // Compute gradients in this direction. + let gradient1 = luma1 - luma_center; + let gradient2 = luma2 - luma_center; + + // Which direction is the steepest ? + let is1Steepest = abs(gradient1) >= abs(gradient2); + + // Gradient in the corresponding direction, normalized. + let gradient_scaled = 0.25 * max(abs(gradient1), abs(gradient2)); + + // Average luma in the correct direction. + var luma_local_average = 0.0; + if (is1Steepest) { + // Switch the direction + step_length = -step_length; + luma_local_average = 0.5 * (luma1 + luma_center); + } else { + luma_local_average = 0.5 * (luma2 + luma_center); + } + + // Shift UV in the correct direction by half a pixel. + // Compute offset (for each iteration step) in the right direction. + var current_uv = tex_coord; + var offset = vec2(0.0, 0.0); + if (is_horizontal) { + current_uv.y = current_uv.y + step_length * 0.5; + offset.x = inverse_screen_size.x; + } else { + current_uv.x = current_uv.x + step_length * 0.5; + offset.y = inverse_screen_size.y; + } + + // Compute UVs to explore on each side of the edge, orthogonally. The QUALITY allows us to step faster. + var uv1 = current_uv - offset; // * QUALITY(0); // (quality 0 is 1.0) + var uv2 = current_uv + offset; // * QUALITY(0); // (quality 0 is 1.0) + + // Read the lumas at both current extremities of the exploration segment, and compute the delta wrt to the local average luma. + var luma_end1 = rgb2luma(texture_sample_level(screen_texture, samp, uv1, 0.0).rgb); + var luma_end2 = rgb2luma(texture_sample_level(screen_texture, samp, uv2, 0.0).rgb); + luma_end1 = luma_end1 - luma_local_average; + luma_end2 = luma_end2 - luma_local_average; + + // If the luma deltas at the current extremities is larger than the local gradient, we have reached the side of the edge. + var reached1 = abs(luma_end1) >= gradient_scaled; + var reached2 = abs(luma_end2) >= gradient_scaled; + var reached_both = reached1 && reached2; + + // If the side is not reached, we continue to explore in this direction. + uv1 = select(uv1 - offset, uv1, reached1); // * QUALITY(1); // (quality 1 is 1.0) + uv2 = select(uv2 - offset, uv2, reached2); // * QUALITY(1); // (quality 1 is 1.0) + + // If both sides have not been reached, continue to explore. + if (!reached_both) { + for (var i: i32 = 2; i < ITERATIONS; i = i + 1) { + // If needed, read luma in 1st direction, compute delta. + if (!reached1) { + luma_end1 = rgb2luma(texture_sample_level(screen_texture, samp, uv1, 0.0).rgb); + luma_end1 = luma_end1 - luma_local_average; + } + // If needed, read luma in opposite direction, compute delta. + if (!reached2) { + luma_end2 = rgb2luma(texture_sample_level(screen_texture, samp, uv2, 0.0).rgb); + luma_end2 = luma_end2 - luma_local_average; + } + // If the luma deltas at the current extremities is larger than the local gradient, we have reached the side of the edge. + reached1 = abs(luma_end1) >= gradient_scaled; + reached2 = abs(luma_end2) >= gradient_scaled; + reached_both = reached1 && reached2; + + // If the side is not reached, we continue to explore in this direction, with a variable quality. + if (!reached1) { + uv1 = uv1 - offset * QUALITY(i); + } + if (!reached2) { + uv2 = uv2 + offset * QUALITY(i); + } + + // If both sides have been reached, stop the exploration. + if (reached_both) { + break; + } + } + } + + // Compute the distances to each side edge of the edge (!). + var distance1 = select(tex_coord.y - uv1.y, tex_coord.x - uv1.x, is_horizontal); + var distance2 = select(uv2.y - tex_coord.y, uv2.x - tex_coord.x, is_horizontal); + + // In which direction is the side of the edge closer ? + let is_direction1 = distance1 < distance2; + let distance_final = min(distance1, distance2); + + // Thickness of the edge. + let edge_thickness = (distance1 + distance2); + + // Is the luma at center smaller than the local average ? + let is_luma_center_smaller = luma_center < luma_local_average; + + // If the luma at center is smaller than at its neighbor, the delta luma at each end should be positive (same variation). + let correct_variation1 = (luma_end1 < 0.0) != is_luma_center_smaller; + let correct_variation2 = (luma_end2 < 0.0) != is_luma_center_smaller; + + // Only keep the result in the direction of the closer side of the edge. + var correct_variation = select(correct_variation2, correct_variation1, is_direction1); + + // UV offset: read in the direction of the closest side of the edge. + let pixel_offset = - distance_final / edge_thickness + 0.5; + + // If the luma variation is incorrect, do not offset. + var final_offset = select(0.0, pixel_offset, correct_variation); + + // Sub-pixel shifting + // Full weighted average of the luma over the 3x3 neighborhood. + let luma_average = (1.0 / 12.0) * (2.0 * (luma_down_up + luma_left_right) + luma_left_corners + luma_right_corners); + // Ratio of the delta between the global average and the center luma, over the luma range in the 3x3 neighborhood. + let sub_pixel_offset1 = clamp(abs(luma_average - luma_center) / luma_range, 0.0, 1.0); + let sub_pixel_offset2 = (-2.0 * sub_pixel_offset1 + 3.0) * sub_pixel_offset1 * sub_pixel_offset1; + // Compute a sub-pixel offset based on this delta. + let sub_pixel_offset_final = sub_pixel_offset2 * sub_pixel_offset2 * SUBPIXEL_QUALITY; + + // Pick the biggest of the two offsets. + final_offset = max(final_offset, sub_pixel_offset_final); + + // Compute the final UV coordinates. + var final_uv = tex_coord; + if (is_horizontal) { + final_uv.y = final_uv.y + final_offset * step_length; + } else { + final_uv.x = final_uv.x + final_offset * step_length; + } + + // Read the color at the new UV coordinates, and use it. + var final_color = texture_sample_level(screen_texture, samp, final_uv, 0.0).rgb; + return vec4(final_color, center_sample.a); +} +#define TONEMAPPING_PASS + +#import bevy_core_pipeline::fullscreen_vertex_shader FullscreenVertexOutput +#import bevy_render::view View +#import bevy_core_pipeline::tonemapping tone_mapping, powsafe, screen_space_dither + +@group(0) @binding(0) var view: View; + +@group(0) @binding(1) var hdr_texture: texture_2d; +@group(0) @binding(2) var hdr_sampler: sampler; +@group(0) @binding(3) var dt_lut_texture: texture_3d; +@group(0) @binding(4) var dt_lut_sampler: sampler; + +#import bevy_core_pipeline::tonemapping + +@fragment +fn fragment(in: FullscreenVertexOutput) -> @location(0) vec4 { + let hdr_color = texture_sample(hdr_texture, hdr_sampler, in.uv); + + var output_rgb = tone_mapping(hdr_color, view.color_grading).rgb; + +#ifdef DEBAND_DITHER + output_rgb = powsafe(output_rgb.rgb, 1.0 / 2.2); + output_rgb = output_rgb + bevy_core_pipeline::tonemapping::screen_space_dither(in.position.xy); + // This conversion back to linear space is required because our output texture format is + // SRGB; the GPU will assume our output is linear and will apply an SRGB conversion. + output_rgb = powsafe(output_rgb.rgb, 2.2); +#endif + + return vec4(output_rgb, hdr_color.a); +} +#define_import_path bevy_core_pipeline::tonemapping + +#import bevy_render::view View, ColorGrading + +// hack !! not sure what to do with this +#ifdef TONEMAPPING_PASS + @group(0) @binding(3) var dt_lut_texture: texture_3d; + @group(0) @binding(4) var dt_lut_sampler: sampler; +#else + @group(0) @binding(15) var dt_lut_texture: texture_3d; + @group(0) @binding(16) var dt_lut_sampler: sampler; +#endif + +fn sample_current_lut(p: vec3) -> vec3 { + // Don't include code that will try to sample from LUTs if tonemap method doesn't require it + // Allows this file to be imported without necessarily needing the lut texture bindings +#ifdef TONEMAP_METHOD_AGX + return texture_sample_level(dt_lut_texture, dt_lut_sampler, p, 0.0).rgb; +#else ifdef TONEMAP_METHOD_TONY_MC_MAPFACE + return texture_sample_level(dt_lut_texture, dt_lut_sampler, p, 0.0).rgb; +#else ifdef TONEMAP_METHOD_BLENDER_FILMIC + return texture_sample_level(dt_lut_texture, dt_lut_sampler, p, 0.0).rgb; +#else + return vec3(1.0, 0.0, 1.0); + #endif +} + +// -------------------------------------- +// --- SomewhatBoringDisplayTransform --- +// -------------------------------------- +// By Tomasz Stachowiak + +fn rgb_to_ycbcr(col: vec3) -> vec3 { + let m = mat3x3( + 0.2126, 0.7152, 0.0722, + -0.1146, -0.3854, 0.5, + 0.5, -0.4542, -0.0458 + ); + return col * m; +} + +fn ycbcr_to_rgb(col: vec3) -> vec3 { + let m = mat3x3( + 1.0, 0.0, 1.5748, + 1.0, -0.1873, -0.4681, + 1.0, 1.8556, 0.0 + ); + return max(vec3(0.0), col * m); +} + +fn tonemap_curve(v: f32) -> f32 { +#ifdef 0 + // Large linear part in the lows, but compresses highs. + float c = v + v * v + 0.5 * v * v * v; + return c / (1.0 + c); +#else + return 1.0 - exp(-v); +#endif +} + +fn tonemap_curve3_(v: vec3) -> vec3 { + return vec3(tonemap_curve(v.r), tonemap_curve(v.g), tonemap_curve(v.b)); +} + +fn somewhat_boring_display_transform(col: vec3) -> vec3 { + var boring_color = col; + let ycbcr = rgb_to_ycbcr(boring_color); + + let bt = tonemap_curve(length(ycbcr.yz) * 2.4); + var desat = max((bt - 0.7) * 0.8, 0.0); + desat *= desat; + + let desat_col = mix(boring_color.rgb, ycbcr.xxx, desat); + + let tm_luma = tonemap_curve(ycbcr.x); + let tm0 = boring_color.rgb * max(0.0, tm_luma / max(1e-5, tonemapping_luminance(boring_color.rgb))); + let final_mult = 0.97; + let tm1 = tonemap_curve3_(desat_col); + + boring_color = mix(tm0, tm1, bt * bt); + + return boring_color * final_mult; +} + +// ------------------------------------------ +// ------------- Tony McMapface ------------- +// ------------------------------------------ +// By Tomasz Stachowiak +// https://github.com/h3r2tic/tony-mc-mapface + +const TONY_MC_MAPFACE_LUT_DIMS: f32 = 48.0; + +fn sample_tony_mc_mapface_lut(stimulus: vec3) -> vec3 { + var uv = (stimulus / (stimulus + 1.0)) * (f32(TONY_MC_MAPFACE_LUT_DIMS - 1.0) / f32(TONY_MC_MAPFACE_LUT_DIMS)) + 0.5 / f32(TONY_MC_MAPFACE_LUT_DIMS); + return sample_current_lut(saturate(uv)).rgb; +} + +// --------------------------------- +// ---------- ACES Fitted ---------- +// --------------------------------- + +// Same base implementation that Godot 4.0 uses for Tonemap ACES. + +// https://github.com/TheRealMJP/BakingLab/blob/master/BakingLab/ACES.hlsl + +// The code in this file was originally written by Stephen Hill (@self_shadow), who deserves all +// credit for coming up with this fit and implementing it. Buy him a beer next time you see him. :) + +fn RRTAndODTFit(v: vec3) -> vec3 { + let a = v * (v + 0.0245786) - 0.000090537; + let b = v * (0.983729 * v + 0.4329510) + 0.238081; + return a / b; +} + +fn ACESFitted(color: vec3) -> vec3 { + var fitted_color = color; + + // sRGB => XYZ => D65_2_D60 => AP1 => RRT_SAT + let rgb_to_rrt = mat3x3( + vec3(0.59719, 0.35458, 0.04823), + vec3(0.07600, 0.90834, 0.01566), + vec3(0.02840, 0.13383, 0.83777) + ); + + // ODT_SAT => XYZ => D60_2_D65 => sRGB + let odt_to_rgb = mat3x3( + vec3(1.60475, -0.53108, -0.07367), + vec3(-0.10208, 1.10813, -0.00605), + vec3(-0.00327, -0.07276, 1.07602) + ); + + fitted_color *= rgb_to_rrt; + + // Apply RRT and ODT + fitted_color = RRTAndODTFit(fitted_color); + + fitted_color *= odt_to_rgb; + + // Clamp to [0, 1] + fitted_color = saturate(fitted_color); + + return fitted_color; +} + +// ------------------------------- +// ------------- AgX ------------- +// ------------------------------- +// By Troy Sobotka +// https://github.com/MrLixm/AgXc +// https://github.com/sobotka/AgX + +// pow() but safe for NaNs/negatives +fn powsafe(color: vec3, power: f32) -> vec3 { + return pow(abs(color), vec3(power)) * sign(color); +} + +/* + Increase color saturation of the given color data. + :param color: expected s_rgb primaries input + :param saturation_amount: expected 0-1 range with 1=neutral, 0=no saturation. + -- ref[2] [4] +*/ +fn saturation(color: vec3, saturation_amount: f32) -> vec3 { + let luma = tonemapping_luminance(color); + return mix(vec3(luma), color, vec3(saturation_amount)); +} + +/* + Output log domain encoded data. + Similar to OCIO lg2 AllocationTransform. + ref[0] +*/ +fn convertOpenDomainToNormalizedLog2_(color: vec3, minimum_ev: f32, maximum_ev: f32) -> vec3 { + let in_midgray = 0.18; + + // remove negative before log transform + var normalized_color = max(vec3(0.0), color); + // avoid infinite issue with log -- ref[1] + normalized_color = select(normalized_color, 0.00001525878 + normalized_color, normalized_color < vec3(0.00003051757)); + normalized_color = clamp( + log2(normalized_color / in_midgray), + vec3(minimum_ev), + vec3(maximum_ev) + ); + let total_exposure = maximum_ev - minimum_ev; + + return (normalized_color - minimum_ev) / total_exposure; +} + +// Inverse of above +fn convert_normalized_log2_to_open_domain(color: vec3, minimum_ev: f32, maximum_ev: f32) -> vec3 { + var open_color = color; + let in_midgray = 0.18; + let total_exposure = maximum_ev - minimum_ev; + + open_color = (open_color * total_exposure) + minimum_ev; + open_color = pow(vec3(2.0), open_color); + open_color = open_color * in_midgray; + + return open_color; +} + + +/*================= + Main processes +=================*/ + +// Prepare the data for display encoding. Converted to log domain. +fn apply_ag_x_log(Image: vec3) -> vec3 { + var prepared_image = max(vec3(0.0), Image); // clamp negatives + let r = dot(prepared_image, vec3(0.84247906, 0.0784336, 0.07922375)); + let g = dot(prepared_image, vec3(0.04232824, 0.87846864, 0.07916613)); + let b = dot(prepared_image, vec3(0.04237565, 0.0784336, 0.87914297)); + prepared_image = vec3(r, g, b); + + prepared_image = convertOpenDomainToNormalizedLog2_(prepared_image, -10.0, 6.5); + + prepared_image = clamp(prepared_image, vec3(0.0), vec3(1.0)); + return prepared_image; +} + +fn apply_lut3_d(Image: vec3, block_size: f32) -> vec3 { + return sample_current_lut(Image * ((block_size - 1.0) / block_size) + 0.5 / block_size).rgb; +} + +// ------------------------- +// ------------------------- +// ------------------------- + +fn sample_blender_filmic_lut(stimulus: vec3) -> vec3 { + let block_size = 64.0; + let normalized = saturate(convertOpenDomainToNormalizedLog2_(stimulus, -11.0, 12.0)); + return apply_lut3_d(normalized, block_size); +} + +// from https://64.github.io/tonemapping/ +// reinhard on RGB oversaturates colors +fn tonemapping_reinhard(color: vec3) -> vec3 { + return color / (1.0 + color); +} + +fn tonemapping_reinhard_extended(color: vec3, max_white: f32) -> vec3 { + let numerator = color * (1.0 + (color / vec3(max_white * max_white))); + return numerator / (1.0 + color); +} + +// luminance coefficients from Rec. 709. +// https://en.wikipedia.org/wiki/Rec._709 +fn tonemapping_luminance(v: vec3) -> f32 { + return dot(v, vec3(0.2126, 0.7152, 0.0722)); +} + +fn tonemapping_change_luminance(c_in: vec3, l_out: f32) -> vec3 { + let l_in = tonemapping_luminance(c_in); + return c_in * (l_out / l_in); +} + +fn tonemapping_reinhard_luminance(color: vec3) -> vec3 { + let l_old = tonemapping_luminance(color); + let l_new = l_old / (1.0 + l_old); + return tonemapping_change_luminance(color, l_new); +} + +fn rgb_to_srgb_simple(color: vec3) -> vec3 { + return pow(color, vec3(1.0 / 2.2)); +} + +// Source: Advanced VR Rendering, GDC 2015, Alex Vlachos, Valve, Slide 49 +// https://media.steampowered.com/apps/valve/2015/Alex_Vlachos_Advanced_VR_Rendering_GDC2015.pdf +fn screen_space_dither(frag_coord: vec2) -> vec3 { + var dither = vec3(dot(vec2(171.0, 231.0), frag_coord)).xxx; + dither = fract(dither.rgb / vec3(103.0, 71.0, 97.0)); + return (dither - 0.5) / 255.0; +} + +fn tone_mapping(in: vec4, color_grading: ColorGrading) -> vec4 { + var color = max(in.rgb, vec3(0.0)); + + // Possible future grading: + + // highlight gain gamma: 0.. + // let luma = powsafe(vec3(tonemapping_luminance(color)), 1.0); + + // highlight gain: 0.. + // color += color * luma.xxx * 1.0; + + // Linear pre tonemapping grading + color = saturation(color, color_grading.pre_saturation); + color = powsafe(color, color_grading.gamma); + color = color * powsafe(vec3(2.0), color_grading.exposure); + color = max(color, vec3(0.0)); + + // tone_mapping +#ifdef TONEMAP_METHOD_NONE + color = color; +#else ifdef TONEMAP_METHOD_REINHARD + color = tonemapping_reinhard(color.rgb); +#else ifdef TONEMAP_METHOD_REINHARD_LUMINANCE + color = tonemapping_reinhard_luminance(color.rgb); +#else ifdef TONEMAP_METHOD_ACES_FITTED + color = ACESFitted(color.rgb); +#else ifdef TONEMAP_METHOD_AGX + color = apply_ag_x_log(color); + color = apply_lut3_d(color, 32.0); +#else ifdef TONEMAP_METHOD_SOMEWHAT_BORING_DISPLAY_TRANSFORM + color = somewhat_boring_display_transform(color.rgb); +#else ifdef TONEMAP_METHOD_TONY_MC_MAPFACE + color = sample_tony_mc_mapface_lut(color); +#else ifdef TONEMAP_METHOD_BLENDER_FILMIC + color = sample_blender_filmic_lut(color.rgb); +#endif + + // Perceptual post tonemapping grading + color = saturation(color, color_grading.post_saturation); + + return vec4(color, in.a); +} + +#define_import_path bevy_sprite::mesh2d_bindings + +#import bevy_sprite::mesh2d_types + +@group(2) @binding(0) var mesh: bevy_sprite::mesh2d_types::Mesh2d; +#define_import_path bevy_sprite::mesh2d_view_bindings + +#import bevy_render::view View +#import bevy_render::globals Globals + +@group(0) @binding(0) var view: View; + +@group(0) @binding(1) var globals: Globals; +#import bevy_sprite::mesh2d_types Mesh2d +#import bevy_sprite::mesh2d_vertex_output VertexOutput +#import bevy_sprite::mesh2d_view_bindings view + +#ifdef TONEMAP_IN_SHADER +#import bevy_core_pipeline::tonemapping +#endif + +struct ColorMaterial { + color: vec4, + // 'flags' is a bit field indicating various options. u32 is 32 bits so we have up to 32 options. + flags: u32, +}; +const COLOR_MATERIAL_FLAGS_TEXTURE_BIT: u32 = 1u; + +@group(1) @binding(0) var material: ColorMaterial; +@group(1) @binding(1) var texture: texture_2d; +@group(1) @binding(2) var texture_sampler: sampler; + +@fragment +fn fragment( + mesh: VertexOutput, +) -> @location(0) vec4 { + var output_color: vec4 = material.color; +#ifdef VERTEX_COLORS + output_color = output_color * mesh.color; +#endif + if ((material.flags & COLOR_MATERIAL_FLAGS_TEXTURE_BIT) != 0u) { + output_color = output_color * texture_sample(texture, texture_sampler, mesh.uv); + } +#ifdef TONEMAP_IN_SHADER + output_color = bevy_core_pipeline::tonemapping::tone_mapping(output_color, view.color_grading); +#endif + return output_color; +} +#ifdef TONEMAP_IN_SHADER +#import bevy_core_pipeline::tonemapping +#endif + +#import bevy_render::maths affine_to_square +#import bevy_render::view View + +@group(0) @binding(0) var view: View; + +struct VertexInput { + @builtin(vertex_index) index: u32, + // NOTE: Instance-rate vertex buffer members prefixed with i_ + // NOTE: i_model_transpose_colN are the 3 columns of a 3x4 matrix that is the transpose of the + // affine 4x3 model matrix. + @location(0) i_model_transpose_col0: vec4, + @location(1) i_model_transpose_col1: vec4, + @location(2) i_model_transpose_col2: vec4, + @location(3) i_color: vec4, + @location(4) i_uv_offset_scale: vec4, +} + +struct VertexOutput { + @builtin(position) clip_position: vec4, + @location(0) uv: vec2, + @location(1) @interpolate(flat) color: vec4, +}; + +@vertex +fn vertex(in: VertexInput) -> VertexOutput { + var out: VertexOutput; + + let vertex_position = vec3( + f32(in.index & 0x1u), + f32((in.index & 0x2u) >> 1u), + 0.0 + ); + + out.clip_position = view.view_proj * affine_to_square(mat3x4( + in.i_model_transpose_col0, + in.i_model_transpose_col1, + in.i_model_transpose_col2, + )) * vec4(vertex_position, 1.0); + out.uv = vec2(vertex_position.xy) * in.i_uv_offset_scale.zw + in.i_uv_offset_scale.xy; + out.color = in.i_color; + + return out; +} + +@group(1) @binding(0) var sprite_texture: texture_2d; +@group(1) @binding(1) var sprite_sampler: sampler; + +@fragment +fn fragment(in: VertexOutput) -> @location(0) vec4 { + var color = in.color * texture_sample(sprite_texture, sprite_sampler, in.uv); + +#ifdef TONEMAP_IN_SHADER + color = bevy_core_pipeline::tonemapping::tone_mapping(color, view.color_grading); +#endif + + return color; +} +// TODO use common view binding +#import bevy_render::view View + +@group(0) @binding(0) var view: View; + + +struct LineGizmoUniform { + line_width: f32, + depth_bias: f32, +#ifdef SIXTEEN_BYTE_ALIGNMENT + // WebGL2 structs must be 16 byte aligned. + _padding: vec2, +#endif +} + +@group(1) @binding(0) var line_gizmo: LineGizmoUniform; + +struct VertexInput { + @location(0) position_a: vec3, + @location(1) position_b: vec3, + @location(2) color_a: vec4, + @location(3) color_b: vec4, + @builtin(vertex_index) index: u32, +}; + +struct VertexOutput { + @builtin(position) clip_position: vec4, + @location(0) color: vec4, +}; + +@vertex +fn vertex(vertex: VertexInput) -> VertexOutput { + var positions = array, 6>( + vec3(0., -0.5, 0.), + vec3(0., -0.5, 1.), + vec3(0., 0.5, 1.), + vec3(0., -0.5, 0.), + vec3(0., 0.5, 1.), + vec3(0., 0.5, 0.) + ); + let position = positions[vertex.index]; + + // algorithm based on https://wwwtyro.net/2019/11/18/instanced-lines.html + var clip_a = view.view_proj * vec4(vertex.position_a, 1.); + var clip_b = view.view_proj * vec4(vertex.position_b, 1.); + + // Manual near plane clipping to avoid errors when doing the perspective divide inside this shader. + clip_a = clip_near_plane(clip_a, clip_b); + clip_b = clip_near_plane(clip_b, clip_a); + + let clip = mix(clip_a, clip_b, position.z); + + let resolution = view.viewport.zw; + let screen_a = resolution * (0.5 * clip_a.xy / clip_a.w + 0.5); + let screen_b = resolution * (0.5 * clip_b.xy / clip_b.w + 0.5); + + let x_basis = normalize(screen_a - screen_b); + let y_basis = vec2(-x_basis.y, x_basis.x); + + var color = mix(vertex.color_a, vertex.color_b, position.z); + + var line_width = line_gizmo.line_width; + var alpha = 1.; + +#ifdef PERSPECTIVE + line_width /= clip.w; +#endif + + // Line thinness fade from https://acegikmo.com/shapes/docs/#anti-aliasing + if line_width > 0.0 && line_width < 1. { + color.a *= line_width; + line_width = 1.; + } + + let offset = line_width * (position.x * x_basis + position.y * y_basis); + let screen = mix(screen_a, screen_b, position.z) + offset; + + var depth: f32; + if line_gizmo.depth_bias >= 0. { + depth = clip.z * (1. - line_gizmo.depth_bias); + } else { + let epsilon = 4.88e-04; + // depth * (clip.w / depth)^-depth_bias. So that when -depth_bias is 1.0, this is equal to clip.w + // and when equal to 0.0, it is exactly equal to depth. + // the epsilon is here to prevent the depth from exceeding clip.w when -depth_bias = 1.0 + // clip.w represents the near plane in homogeneous clip space in bevy, having a depth + // of this value means nothing can be in front of this + // The reason this uses an exponential function is that it makes it much easier for the + // user to chose a value that is convenient for them + depth = clip.z * exp2(-line_gizmo.depth_bias * log2(clip.w / clip.z - epsilon)); + } + + var clip_position = vec4(clip.w * ((2. * screen) / resolution - 1.), depth, clip.w); + + return VertexOutput(clip_position, color); +} + +fn clip_near_plane(a: vec4, b: vec4) -> vec4 { + // Move a if a is behind the near plane and b is in front. + if a.z > a.w && b.z <= b.w { + // Interpolate a towards b until it's at the near plane. + let distance_a = a.z - a.w; + let distance_b = b.z - b.w; + let t = distance_a / (distance_a - distance_b); + return a + (b - a) * t; + } + return a; +} + +struct FragmentInput { + @location(0) color: vec4, +}; + +struct FragmentOutput { + @location(0) color: vec4, +}; + +@fragment +fn fragment(in: FragmentInput) -> FragmentOutput { + return FragmentOutput(in.color); +} +#import bevy_render::view View + +const TEXTURED_QUAD: u32 = 0u; + +@group(0) @binding(0) var view: View; + +struct VertexOutput { + @location(0) uv: vec2, + @location(1) color: vec4, + @location(3) @interpolate(flat) mode: u32, + @builtin(position) position: vec4, +}; + +@vertex +fn vertex( + @location(0) vertex_position: vec3, + @location(1) vertex_uv: vec2, + @location(2) vertex_color: vec4, + @location(3) mode: u32, +) -> VertexOutput { + var out: VertexOutput; + out.uv = vertex_uv; + out.position = view.view_proj * vec4(vertex_position, 1.0); + out.color = vertex_color; + out.mode = mode; + return out; +} + +@group(1) @binding(0) var sprite_texture: texture_2d; +@group(1) @binding(1) var sprite_sampler: sampler; + +@fragment +fn fragment(in: VertexOutput) -> @location(0) vec4 { + // textureSample can only be called in unform control flow, not inside an if branch. + var color = texture_sample(sprite_texture, sprite_sampler, in.uv); + if in.mode == TEXTURED_QUAD { + color = in.color * color; + } else { + color = in.color; + } + return color; +} +#define_import_path bevy_render::maths + +fn affine_to_square(affine: mat3x4) -> mat4x4 { + return transpose(mat4x4( + affine[0], + affine[1], + affine[2], + vec4(0.0, 0.0, 0.0, 1.0), + )); +} + +fn mat2x4_f32_to_mat3x3_unpack( + a: mat2x4, + b: f32, +) -> mat3x3 { + return mat3x3( + a[0].xyz, + vec3(a[0].w, a[1].xy), + vec3(a[1].zw, b), + ); +} +#define_import_path bevy_render::instance_index + +#ifdef BASE_INSTANCE_WORKAROUND +// naga and wgpu should polyfill WGSL instance_index functionality where it is +// not available in GLSL. Until that is done, we can work around it in bevy +// using a push constant which is converted to a uniform by naga and wgpu. +// https://github.com/gfx-rs/wgpu/issues/1573 +var base_instance: i32; + +fn get_instance_index(instance_index: u32) -> u32 { + return u32(base_instance) + instance_index; +} +#else +fn get_instance_index(instance_index: u32) -> u32 { + return instance_index; +} +#endif + +``` + +### assets/shaders/myshader + +```rust +//! +//! The default 3d Shader. +//! +#import bevy_pbr::forward_io::VertexOutput +#import bevy_pbr::mesh_view_bindings::globals; +#import bevy_pbr::utils PI +#import shadplay::shader_utils::common NEG_HALF_PI, shader_toy_default, rotate2D + +#import bevy_render::view View +// @group(0) @binding(0) var view: View; + +@group(2) @binding(101) var texture: texture_2d; +@group(2) @binding(102) var texture_sampler: sampler; + +const SPEED:f32 = 1.0; + +@fragment +fn fragment(in: VertexOutput) -> @location(0) vec4 { + // ensure our uv coords match shadertoy/the-lil-book-of-shaders + let texture_uvs = in.uv; + + let tex: vec4f = textureSample(texture, texture_sampler, texture_uvs); + return tex; +} + + +``` + +### assets/shaders/fast_dots + +```rust +#import bevy_pbr::mesh_view_bindings globals +#import bevy_pbr::forward_io::VertexOutput + +@fragment +fn fragment(in: VertexOutput) -> @location(0) vec4 { + let uv: vec2 = in.uv; + + var m = 0.; + let t = globals.time; + + var min_dist = 0.; + + for (var i = 0; i < 20; i += 1) { + let n = N22(vec2(f32(i))); + let p = sin(n*t); + let d = length(uv - p); + + m += smoothstep(0.002, 0.001, d); + + if d < min_dist { + min_dist = d; + } + } + + var col = vec3(m); + return vec4(col, 1.0); +} + +// Noise: two in -> two out +fn N22(pp: vec2)->vec2{ + var a = fract(pp.xyx*vec3(123.34, 234.34, 345.65)); + a += dot(a, a + 34.45); + return fract(vec2(a.x*a.y, a.y*a.z)); +} + +``` + +### assets/shaders/howto-texture + +```rust +//! Showing how to use a texture, drag-n-drop for you own texture will be supported soon. + +#import bevy_pbr::mesh_vertex_output VertexOutput + +@group(2) @binding(1) var texture: texture_2d; +@group(2) @binding(2) var texture_sampler: sampler; + +@fragment +fn fragment(in: VertexOutput) -> @location(0) vec4 { + // ensure our uv coords match shadertoy/the-lil-book-of-shaders + let texture_uvs = in.uv; + + let tex: vec4f = texture_sample(texture, texture_sampler, texture_uvs); + + return tex; +} + + +``` + +### assets/shaders/electro_cube + +```rust +#import bevy_pbr::mesh_view_bindings globals view +#import bevy_pbr::forward_io::VertexOutput +#import bevy_pbr::utils PI HALF_PI +#import bevy_pbr::mesh_functions + +const GRID_RATIO:f32 = 40.; + + +@fragment +fn fragment(in: VertexOutput) -> @location(0) vec4 { + let t = globals.time; + var uv = in.uv - 0.5; + var col = vec3(0.0); + + uv *= GRID_RATIO / 5.; + let grid = grid(uv); + let pal = palette(t / 2. ); + col = mix(col, pal, grid); + + let mo = 5.0%2.0; + + return vec4(col, mo); +} + +// I disklike boring colours, this paticular function comes from Kishimisu (see the wgsl file of same name to explore more of her/his/their ideas.) +fn palette(time : f32) -> vec3 { + let a = vec3(0.5, 0.5, 0.5); + let b = vec3(0.5, 0.5, 0.5); + let c = vec3(1.0, 1.0, 1.0); + let d = vec3(0.263, 0.416, 0.557); + + return a + b * cos(6.28318 * (c * time + d)); +} + +// inspired by https://www.shadertoy.com/view/Wt33Wf & https://www.shadertoy.com/view/XtBfzz +fn grid(uv: vec2)-> f32 { + let i = step(fract(uv), vec2(1.0/GRID_RATIO)); + return (1.1-i.x) * (0.005+i.y); + +} + + +// License: WTFPL, author: sam hocevar, found: https://stackoverflow.com/a/17897228/418488 +fn hsv2rgb(c: vec3) -> vec3 { + let K: vec4 = vec4(1.0, 2.0 / 3.0, 1.0 / 3.0, 3.0); + var p: vec3 = abs(fract(vec3(c.x) + K.xyz) * 6.0 - K.www); + return c.z * mix(K.xxx, clamp(p - K.xxx, vec3(0.0), vec3(1.0)), c.y); +} + +``` + +### assets/shaders/four_to_the_floor + +```rust +#import bevy_pbr::mesh_view_bindings globals +#import bevy_pbr::forward_io::VertexOutput + +const TAU:f32 = 6.28318530718; + +fn plot(st: vec2f, pct: f32) -> f32 { + let l = pct - 0.02; + let r = pct + 0.02; + + return smoothstep(l, pct, st.y) - smoothstep(pct, r, st.y); +} + +@fragment +fn fragment(in: VertexOutput) -> @location(0) vec4 { + // var uv = (in.uv * 2.0) - 1.0; + var uv = in.uv; + var col = vec3f(0.); + uv *= fract(uv); + + let to_center = vec2(0.25) - uv; + let angle = atan2(to_center.y, to_center.x); + let radius = length(to_center) * 2.0; + + col = hsv_to_srgb(vec3f((angle / TAU) + globals.time / 3.0, radius, 1.0)); + + let circ = circle(uv, 0.6); + col *= circ; + + let pct = distance(uv, vec2f(0.5)); + + return vec4f(col, 1.0); +} + +fn circle(st: vec2f, rad: f32) -> f32 { + let dist = st - vec2f(0.5); + return 1.0 - smoothstep(rad - (rad * 0.01), rad + (rad * 0.01), dot(dist, dist) * 4.0); +} + +// From the bevy source code +fn hsv_to_srgb(c: vec3) -> vec3 { + let K = vec4(1.0, 2.0 / 3.0, 1.0 / 3.0, 3.0); + let p = abs(fract(c.xxx + K.xyz) * 6.0 - K.www); + return c.z * mix(K.xxx, clamp(p - K.xxx, vec3(0.0), vec3(1.0)), c.y); +} + + +``` + +### assets/shaders/WIP-black-hole + +```rust +#import bevy_pbr::mesh_vertex_output VertexOutput +#import bevy_pbr::utils PI +#import bevy_sprite::mesh2d_view_bindings globals +#import bevy_render::view View + +#import shadplay::shader_utils::common rotate2D + +@group(0) @binding(0) var view: View; + +@group(2) @binding(0) var texture: texture_2d; +@group(2) @binding(1) var texture_sampler: sampler; + +const SPEED:f32 = 0.25; +const CAM_DISTANCE: f32 = -2.; +const DISK_ROTATION_SPEED: f32 = 3.0; +const DISK_TEXTURE_LAYERS: f32 = 12.0; +const BLACK_HOLE_SIZE:f32 = 0.3; //QUESTION: % of screen occupied??? +const ANTI_ALIASING: i32 = 2; + + +// Porting https://www.shadertoy.com/view/tsBXW3 by set111:https://www.shadertoy.com/user/set111 +@fragment +fn fragment( + in: VertexOutput +) -> @location(0) vec4 { + let t = globals.time * SPEED; + let resolution = view.viewport.zw; + var frag_out = vec4f(0.0); + var texture_uvs = in.uv; + // texture_uvs *= rotate2D(1.0 + t); // Play with this to rotate the stars in the background. + + let tex: vec4f = texture_sample(texture, texture_sampler, texture_uvs); // Shadertoy's ones don't seem to be affected by uvs modified in the scope of the functions that folk are writing so we take the uvs early do get around that. + + var uv = (in.uv * 2.0) - 1.0; + uv.x *= resolution.x / resolution.y; + uv *= rotate2D(PI / -2.0); + + // background + let ray = vec3f(0.0); + let bg = background(ray, tex); + + // disk + let zero_position = vec3f(0.0); + let disk = raymarch_disk(ray, zero_position,t); + + + //BH: + for (var i: i32 = 0; i < ANTI_ALIASING; i++) { + // var ray: vec3f = normalize(vec3((frag_rotation - resolution.xy * .5 + vec2(i, j) / ANTI_ALIASING) / resolution.x, 1.0)); + var ray: vec3f = vec3f(0.0); + var pos: vec3f = vec3f(0.0, 0.0, 0.0); + var angle: vec2f = vec2f((t * 0.1), .2); + + angle.y = (2.0 / resolution.y) * 3.14 + 0.1 + 3.14; + let dist: f32 = length(pos); + rotate3D(pos, angle); + + angle -= min((0.3 / dist), 3.14); + angle *= vec2f(1.0, 0.5); + + rotate3D(ray, angle); + + var col: vec4f = vec4(0.0); + var glow: vec4f = vec4(0.0); + var out_col: vec4f = vec4(100.0); + var dotpos: f32 = dot(pos, pos); + var inv_dist: f32 = 1.0/sqrt(dotpos); + var cent_dist: f32 = dotpos * inv_dist; // distance to BH + var step_dist: f32 = 0.92 * abs(pos.y / (ray.y)); //conservative distance to disk (y==0) + var far_limit: f32 = cent_dist * 0.5; //limit step size far from to BH + var close_limit: f32 = cent_dist * 0.1 + 0.05 * cent_dist * cent_dist * (1.0 / BLACK_HOLE_SIZE); //limit step size closse to BH + step_dist = min(step_dist, min(far_limit, close_limit)); + + var inv_dist_sqr: f32 = inv_dist * inv_dist; + var bend_force: f32 = step_dist * inv_dist_sqr * BLACK_HOLE_SIZE * 0.625; //bending force + ray = normalize(ray - (bend_force * inv_dist) * pos); //bend ray towards BH + pos += step_dist * ray; + + // glow += vec4f(1.2, 1.1, 1, 1.0); + // glow *= (0.01 * stepDist * invDistSqr * invDistSqr); + // glow *= clamp((centDist * 2.0 - 1.2), 0.0, 1.0); //adds fairly cheap glow + + for (var disks: i32 = 0; disks < 20; disks++) { + var dist2: f32 = length(pos); + + if dist2 < BLACK_HOLE_SIZE * 0.1 { + //ray sucked in to BH + out_col = vec4(col.rgb * col.a + glow.rgb * (1.0 - col.a), 1.0) ; + break; + + } else if dist2 > BLACK_HOLE_SIZE * 1000.0 { + // ray escaped + var bg = background(ray, tex); + out_col = vec4f(col.rgb * col.a + bg.rgb * (1.0 - col.a) + glow.rgb * (1.0 - col.a), 1.0); + } else if abs(pos.y) <= BLACK_HOLE_SIZE * 0.002 { + //ray hit accretion disk + var disk_col = raymarch_disk(ray, pos,t); //render disk + pos.y = 0.0; + pos += abs(BLACK_HOLE_SIZE * 0.001 / ray.y) * ray; + col = vec4(disk_col.rgb * (1.0 - col.a) + col.rgb, col.a + disk_col.a * (1.0 - col.a)); + } + } + + //if the ray never escaped or got sucked in + if out_col.r == 100.0 { + out_col = vec4(col.rgb + glow.rgb * (col.a + glow.a), 1.); + + col = out_col; + let col_rgb = pow(col.rgb, vec3(0.6)); + + frag_out = vec4f(col_rgb, col.a) ;// / (f32(ANTI_ALIASING) * f32(ANTI_ALIASING)); + } + } + + // return bg; + // return disk; + return frag_out; +} + +fn raymarch_disk(ray: vec3f, zero_position :vec3f, t: f32) -> vec4f { + // Probably the disk and its sizing? + var position = zero_position; + let len_pos: f32 = length(position.xz); + let dist: f32 = min(1., len_pos * (1. / BLACK_HOLE_SIZE) * 0.5) * BLACK_HOLE_SIZE * 0.4 * (1. / BLACK_HOLE_SIZE) / (abs(ray.y)); //TODO break this up. + + position += dist * DISK_TEXTURE_LAYERS * ray * 0.5; // why 0.5 + + //??? + var delta_pos = vec2f(0.0); + delta_pos.x = -zero_position.z * 0.01 + zero_position.x; + delta_pos.y = -zero_position.x * 0.01 + zero_position.z; // What happens wit h bigger values than 0.01 does this turn us around or something? + delta_pos = normalize(delta_pos - zero_position.xz); // why xz? + + + // Does what? + var parallel: f32 = dot(ray.xz, delta_pos); // what does the dot do again? + parallel /= sqrt(len_pos); + parallel *= 0.5; + var red_shift = parallel + 0.3; + red_shift *= red_shift; + red_shift = clamp(red_shift, 0.0, 1.0); + + var dis_mix = clamp((len_pos - BLACK_HOLE_SIZE * 2.0) * (1.0 / BLACK_HOLE_SIZE) * 0.25, 0.0, 1.0); // TODO: break this up. + + var inside_col: vec3f = mix(vec3f(1.0, 0.8, 0.0), vec3(1.6, 2.4, 4.0), red_shift); + inside_col *= mix(vec3(0.4, 0.2, 0.1), vec3(1.6, 2.4, 4.0), red_shift); + inside_col *= 1.25; + red_shift += 0.12; + red_shift *= red_shift; + + var out = vec4(0.0); // Initialise blanks to draw into. + var o_rgb = vec3f(0.0); + + for (var i: f32 = 0.0; i < DISK_TEXTURE_LAYERS; i += 1.0) { + position -= dist * ray; + + var intensity: f32 = clamp(1.0 - abs((i - 0.8) * (1. / DISK_TEXTURE_LAYERS) * 2.0), 0.0, 1.0); // TODO: wtf these numbers do? + var length_pos_local = length(position.xz); + var dist_mult = 1.0; + + dist_mult *= clamp((length_pos_local - DISK_TEXTURE_LAYERS * 0.75) * (1.0 / DISK_TEXTURE_LAYERS) * 1.5, 0.0, 1.); // TODO: wtf these numbers do? + dist_mult *= clamp((DISK_TEXTURE_LAYERS * 10. - length_pos_local) * (1.0 / DISK_TEXTURE_LAYERS) * 0.20, 0.0, 1.); // TODO: wtf these numbers do? + dist_mult *= dist_mult; + + let u = length_pos_local + t * DISK_TEXTURE_LAYERS * 0.3 + intensity * DISK_TEXTURE_LAYERS * 0.2; + + // -sin + cos, and sin cos is usually a rotation... + let rot = t * (DISK_ROTATION_SPEED % 8192.0); //QUESTION: suspicious power of 2... + let x: f32 = -position.z * sin(rot) + position.x * cos(rot); + let y: f32 = position.x * sin(rot) + position.z * cos(rot); + let xy = vec2f(x, y); + + + let x_ab = abs(xy.x / (xy.y)); // That's slick. + let angle = 0.02 * atan(x); + + let f = 70.0; // TODO: why? const? + let lhs: vec2f = vec2f(angle, (u * (1.0 / BLACK_HOLE_SIZE) * 0.05)); + var noise: f32 = value_noise(lhs, f); + noise = noise * 0.66 + 0.33 * value_noise(lhs, f * 2.); //QUESTION: this lhs was hard-coded in -- perhaps for good reason? + + + let extra_width: f32 = noise * 1.0 * (1.0 - clamp(i * (1.0 / DISK_TEXTURE_LAYERS) * 2.0 - 1.0, 0.0, 1.0)); // TODO: (1.0/BLABLACK_HOLE_SIZE is used so many times we should just do it once...) + + // let lhs_clamp: f32 = noise * (intensity + extra_width) * ((1.0 / BLACK_HOLE_SIZE) * 10.0 + 0.01) * dist * dist_mult; + let alpha: f32 = clamp(noise * (intensity + extra_width) * ((1.0 / BLACK_HOLE_SIZE) * 10.0 + 0.01) * dist * dist_mult, 0.0, 1.0); + var col = 2.0 * mix(vec3(0.3, 0.2, 0.15) * inside_col, inside_col, min(1., intensity * 2.)); + out = clamp(vec4(col * alpha + out.rgb * (1. - alpha), out.a * (1. - alpha) + alpha), vec4(0.), vec4(1.)); + + length_pos_local *= (1.0 / BLACK_HOLE_SIZE); + + o_rgb += red_shift * (intensity * 1.0 + 0.5) * (1.0 / DISK_TEXTURE_LAYERS) * 100.0 * dist_mult / (length_pos_local * length_pos_local); + } + + o_rgb.r = clamp(o_rgb.r - 0.005, 0.0, 1.0); + o_rgb.g = clamp(o_rgb.g - 0.005, 0.0, 1.0); + o_rgb.b = clamp(o_rgb.b - 0.005, 0.0, 1.0); + + // return vec4f(o_rgb, out.a); + + return vec4f(1.0, 1.0, 1.0, 0.0); // no disk +} + + +// Here's a breakdown of what the function does: +// 1. `vector.yz = cos(angle.y)*vector.yz + sin(angle.y)*vec2(-1,1)*vector.zy;`: This line rotates the vector around the y-axis. The `cos(angle.y)*vector.yz` term preserves the part of the vector that's aligned with the y-axis, while the `sin(angle.y)*vec2(-1,1)*vector.zy` term adds in the part of the vector that's perpendicular to the y-axis. +// 2. `vector.xz = cos(angle.x)*vector.xz + sin(angle.x)*vec2(-1,1)*vector.zx;`: This line rotates the vector around the x-axis in a similar way. +// The function modifies the input vector in place, meaning that after calling this function, the original vector will have been rotated by the specified angles. +// Please note that this function assumes that your vector components are arranged in a certain way (x, y, z). If your components are arranged differently, you may need to adjust which components are being rotated. +fn rotate3D(vector: vec3, angle:vec2) -> vec3 { + var temp_yz = cos(angle.y) * vector.yz + sin(angle.y) * vec2(-1.0, 1.0) * vector.zy; + var temp_xz = cos(angle.x) * vector.xz + sin(angle.x) * vec2(-1.0, 1.0) * vector.zx; + return vec3(temp_xz.x, temp_yz.x, temp_yz.y); +} + +fn background(ray: vec3f, texture: vec4f) -> vec4f { + var uv = ray.xy; + if abs(ray.x) > 0.5 { + uv.x = ray.z; + } else if abs(ray.y) > 0.5 { + uv.y = ray.z; + } + + + // + var brightness = value_noise(uv * 3.0, 100.); // (dodgy stars), according to the comments in shadertoy + var colour = value_noise(uv * 2.0, 20.); // why 20.? + brightness = pow(brightness, 256.0); // why 256? const? + brightness = brightness * 100.0; // *= 100.0 ?? + brightness = clamp(brightness, 0.0, 1.0); // does what? + + + var stars: vec3f = brightness * mix(vec3f(1.0, 0.6, 0.2), vec3f(0.2, 0.6, 1.0), colour); // what happens when you mess with these vec3's values? + + // var nebulae = textuxe(iChannel10, (uv*1.5)); // We have no textures so... have a think on that one.. (the shadertoy kid is using a galazy img.) + var nebulae = texture.xyz; + let nebulae_alpha = texture.a; // Keep this as we cannot swizzle with it + nebulae += (nebulae.xxx + nebulae.yyy + nebulae.zzz); + nebulae *= 0.25; + + // nebulae *= nebulae; //TODO loop, more pure math by multiplying a const? + nebulae *= nebulae; + nebulae *= nebulae; + nebulae *= nebulae; // Yep.. they do it 4 times, which basically makes it darker + + + nebulae += stars; + + + return vec4f(nebulae, 1.0); +} + +// Creates a pretty even noise (I have no idea how...) +fn value_noise(p: vec2f, f: f32) -> f32 { + let b1: f32 = hash21(floor(p * f + vec2(0.0, 0.))); + let br: f32 = hash21(floor(p * f + vec2(1.0, 0.))); + let t1: f32 = hash21(floor(p * f + vec2(0.0, 1.))); + let tr: f32 = hash21(floor(p * f + vec2(1.0, 1.))); + + var fr = fract(p * f); + fr = (3.0 - 2.0 * fr) * fr * fr; + let b = mix(b1, br, fr.x); + let t = mix(t1, tr, fr.x); + + return mix(b, t, fr.y); +} + +// Hash 2 into 1 +fn hash21(x: vec2f) -> f32 { + return (hash(x.x + hash(x.y))); +} +// Hash 1 into 1 +fn hash(x: f32) -> f32 { + return fract(sin(x) * 152754.742); +} + +``` + +### assets/shaders/WIP-waterPool + +```rust +/// ***************************** /// +/// This is a prot of WaterPool by rubaotree, on shadertoy: https://www.shadertoy.com/view/ctcBRn +/// ***************************** /// + +#import bevy_sprite::mesh2d_view_bindings::globals +#import shadplay::shader_utils::common::{NEG_HALF_PI, rotate2D, TWO_PI} +#import bevy_render::view::View +#import bevy_pbr::forward_io::VertexOutput; + +@group(0) @binding(0) var view: View; + +const SPEED:f32 = 0.25; + +@fragment +fn fragment(in: VertexOutput) -> @location(0) vec4 { + var uv = (in.uv * 2.0) - 1.0; + let resolution = view.viewport.zw; + let t = globals.time * SPEED; + uv.x *= resolution.x / resolution.y; + + let uvt = vec3(uv.x * 1.6, uv.y * 2.0 + t * 0.3, t * 0.3); + let height:f32 = get_height(uvt); + let shake:vec3f = get_gradient(uvt * 0.4) * 0.005; + let dlight:vec3f = normalize(vec3(0.0, -0.8, 0.9)); + let normal:vec3f = normalize(vec3(get_gradient(uvt * 2.0))); + let lightness:f32 = dot(dlight, normal); + + var col = poolColor((uv + shake.xy) * 0.5 + 0.25); + + let vorValue = voronoi(vec3(uv.x * 0.8, uv.y, t * 0.5), 4.0); + let cutValue = voronoi_cut(vorValue); + col += cutValue * 0.3; + + col += vec3(1.0) * step(1.2, lightness + height) * 0.9; + col += vec3(clamp(height - 0.3, -0.3, 1.0) * 0.5); + + return vec4(col, 1.0); +} + +fn ring_curve(t: f32) -> f32 { + return convex_and_clip((abs(1.0 / sin(t)) - 1.0) * 0.05, 1.0); +} + +fn light_mix(col: vec3, lightness: f32) -> vec3 { + return col * (lightness * 1.2 + 0.3); +} + +fn coord_to_uv(coord: vec2, iResolution: vec2) -> vec2 { + return coord / max(iResolution.x, iResolution.y); +} + +fn voronoi(p: vec3, density: f32) -> f32 { + var id = floor(p * density); + var min_dist = 1.0; + + for (var dy: i32 = -1; dy <= 1; dy++) { + for (var dx: i32 = -1; dx <= 1; dx++) { + var neighbor = id + vec3(f32(dx), f32(dy), 0.0); + var point = neighbor + random2to3(neighbor); // Assuming a random2to3 function + var dist = length(point - p * density); + min_dist = min(min_dist, dist); + } + } + return min_dist; +} + +fn voronoi_cut(t: f32) -> f32 { + return t * 1.4; +} + +fn convex_and_clip(t: f32, ind: f32) -> f32 { + if (t <= 0.0) { return 0.0; } + if (t >= 1.0) { return 1.0; } + return 1.0 - abs(pow(t - 1.0, ind)); +} + +// Dummy random function (replace with a better one) +fn random2to3(p: vec3) -> vec3 { + return fract(sin(vec3(dot(p, vec3(127.1, 311.7, 74.7)), + dot(p, vec3(269.5, 183.3, 246.1)), + dot(p, vec3(113.5, 271.9, 124.6)))) * 43758.5453); +} + +fn poolColor(uv: vec2) -> vec3 { + return vec3(uv.x, uv.y, 1.0 - uv.x * uv.y); +} + +fn get_gradient(uvt: vec3) -> vec3 { + return normalize(vec3(sin(uvt.x), cos(uvt.y), sin(uvt.z))); +} + +fn get_height(uvt: vec3) -> f32 { + return sin(uvt.x * 10.0) * cos(uvt.y * 10.0) * 0.5; +} + + +fn hash11(_p: f32) -> f32 { + var p = fract(_p * 0.1031); + p *= p + 33.33; + p *= p + p; + return fract(p); +} +fn hash21(_p: vec2) -> f32 { + var p3 = fract(vec3(_p.x, _p.y, _p.x) * 0.1031); + p3 += dot(p3, p3.yzx + 33.33); + return fract((p3.x + p3.y) * p3.z); +} +fn hash31(_p3: vec3) -> f32 { + var p = fract(_p3 * 0.1031); + p += dot(p, p.zyx + 31.32); + return fract((p.x + p.y) * p.z); +} + +fn hash12(_p: f32) -> vec2 { + var p3 = fract(vec3(_p) * vec3(0.1031, 0.1030, 0.0973)); + p3 += dot(p3, p3.yzx + 33.33); + return fract((p3.xx + p3.yz) * p3.zy); +} + +fn hash22(_p: vec2) -> vec2 { + var p3 = fract(vec3(_p.x, _p.y, _p.x) * vec3(0.1031, 0.1030, 0.0973)); + p3 += dot(p3, p3.yzx + 33.33); + return fract((p3.xx + p3.yz) * p3.zy); +} + +fn hash32(_p3: vec3) -> vec2 { + var p = fract(_p3 * vec3(0.1031, 0.1030, 0.0973)); + p += dot(p, p.yzx + 33.33); + return fract((p.xx + p.yz) * p.zy); +} + +fn luminance(_col: vec3) -> f32 { + return dot(vec3(0.2125, 0.7154, 0.0721), _col); +} + +fn rgb2hsv(_col: vec3) -> vec3 { + let min_val = min(min(_col.r, _col.g), _col.b); + let max_val = max(max(_col.r, _col.g), _col.b); + var h: f32 = 0.0; + var s: f32 = 0.0; + let v: f32 = max_val; + + let delta = max_val - min_val; + if (max_val != 0.0) { + s = delta / max_val; + } else { + // r = g = b = 0 + s = 0.0; + h = -1.0; + return vec3(h, s, v); + } + + if (_col.r == max_val) { + h = (_col.g - _col.b) / delta; + } else if (_col.g == max_val) { + h = 2.0 + (_col.b - _col.r) / delta; + } else { + h = 4.0 + (_col.r - _col.g) / delta; + } + + h *= 60.0; + if (h < 0.0) { + h += 360.0; + } + + return vec3(h / 360.0, s, v); +} + +fn hsv2rgb(_c: vec3) -> vec3 { + let K = vec4(1.0, 2.0 / 3.0, 1.0 / 3.0, 3.0); + var p = abs(fract(vec3(_c.x) + K.xyz) * 6.0 - K.www); + p = clamp(p - K.xxx, vec3(0.0, 0.0, 0.0), vec3(1.0, 1.0, 1.0)); + return _c.z * mix(K.xxx, p, _c.y); +} + + +fn smooth_curve(_x: f32) -> f32 { + // return 6.0 * _x * _x * _x * _x * _x - 15.0 * _x * _x * _x * _x + 10.0 * _x * _x * _x; + return 6.0 * pow(_x, 5.0) - 15.0 * pow(_x, 4.0) + 10.0 * pow(_x, 3.0); +} + +fn Gauss(_dist: f32) -> f32 { + return exp(-10.0 * _dist * _dist); +} + +fn Gauss_sq(_dist_sq: f32) -> f32 { + return exp(-10.0 * _dist_sq); +} + +fn palette(_t: f32, _a: vec3, _b: vec3, _c: vec3, _d: vec3) -> vec3 { + return _a + _b * cos(6.28318 * (_c * _t + _d)); +} + + + + + + + + + + + + + + + + + +``` + +### assets/shaders/indexing_into_vec_with_loop + +```rust +#import bevy_pbr::forward_io::VertexOutput +#import bevy_sprite::mesh2d_view_bindings globals // for 2D +#import bevy_render::view View +#import bevy_pbr::utils PI + +@group(0) @binding(0) var view: View; + +const SPEED:f32 = 0.22; + +// Working out how to use the vec2f[idx] indexing with loops. +@fragment +fn fragment(in: VertexOutput) -> @location(0) vec4 { + var uv = in.uv.xy; + + var col = vec4f(0.0); + col.a = 1.0; + + // Write over these v[idx] by idx we tick in a loop: + var v = vec3f(1.0); + for (var idx = 0; idx < 3; idx += 1) { + v[idx] = fract(globals.time * SPEED); + } + + col.r = v.r; + col.b = v.b; + col.g = v.g; + + return col; +} + +``` + +### assets/shaders/rain_generator + +```rust +/// ***************************** /// +/// This is a port of RainGenerator: +/// https://www.shadertoy.com/view/lt33zM, by TheBinaryCodeX +/// ***************************** /// + +#import bevy_sprite::mesh2d_view_bindings::globals +#import shadplay::shader_utils::common::{NEG_HALF_PI, shader_toy_default, rotate2D, TWO_PI} +#import bevy_render::view::View +#import bevy_pbr::forward_io::VertexOutput; + +@group(0) @binding(0) var view: View; + +// Constants +const LAYERS: i32 = 3; // Number of layers of drops +const SCALE: f32 = 256.0; // Overall scale of the drops +const LENGTH: f32 = 16.298; // Length of the drops +const LENGTH_SCALE: f32 = 0.6337; // How much the drop length changes every layer +const FADE: f32 = 0.6; // How much the drops fade every layer +const SPEED: f32 = 18.337; // How fast the drops fall +const DROP_COLOR: vec3 = vec3(0.54, 0.8, 0.94); +const BG_COLOR: vec3 = vec3(0.003, 0.02, 0.07); +const ANGLE:f32 = -8.9337; + +@fragment +fn fragment(in: VertexOutput) -> @location(0) vec4f { + var uv: vec2f = (in.uv * 2.0) - 1.0; + uv *= rotate2D(ANGLE); // Change the angle the rain is falling from... -8.4 to 8.4~ + let screen_resolution: vec2f = view.viewport.zw; + let aspect: f32 = screen_resolution.x / screen_resolution.y; + uv.x *= aspect; + + var finalColor: vec4f = vec4f(0.0, 0.0, 0.0, 0.0); + + var dropLength: f32 = LENGTH; + var alpha: f32 = 1.0; + + for (var i: i32 = 0; i < LAYERS; i = i + 1) { + let f: f32 = rainFactor(uv, SCALE, dropLength, vec2f(SCALE * f32(i), globals.time * SPEED), 0.95); + + let color: vec4f = vec4f(DROP_COLOR, f * alpha); + + finalColor = over(finalColor, color); + + dropLength *= LENGTH_SCALE; + alpha *= FADE; + } + + finalColor = over(finalColor, vec4f(BG_COLOR, 1.0)); + + return finalColor; +} + +// Function to generate random numbers based on coordinates +fn rand(co: vec2f) -> f32 { + let a: f32 = 12.9898; + let b: f32 = 78.233; + let c: f32 = 43758.5453; + let dt: f32 = dot(co, vec2f(a, b)); + let sn: f32 = dt % 3.14; + + return fract(sin(sn) * c); +} + +// Function to calculate the rain factor based on UV coordinates +fn rainFactor(uv: vec2f, scale: f32, dripLength: f32, offset: vec2f, cutoff: f32) -> f32 { + let pos: vec2f = uv * vec2f(scale, scale / dripLength) + offset; + let dripOffset: vec2f = vec2f(0.0, floor(rand(floor(pos * vec2f(1.0, 0.0))) * (dripLength - 0.0001)) / dripLength); + let f: f32 = rand(floor(pos + dripOffset)); + + return step(cutoff, f); +} + +// Function to overlay two colors +fn over(a: vec4f, b: vec4f) -> vec4f { + return vec4f(mix(b.rgb, a.rgb, a.a), max(a.a, b.a)); +} + +``` + +### assets/shaders/WIP-total_noob + +```rust +/// This is a port of 'total noob' by dynamite +/// Source material: https://www.shadertoy.com/view/XdlSDs +/// Authour: https://www.shadertoy.com/user/dynamite +#import bevy_pbr::forward_io::VertexOutput +#import bevy_sprite::mesh2d_view_bindings globals +#import shadplay::shader_utils::common NEG_HALF_PI, shader_toy_default, rotate2D, TAU, PI + +#import bevy_render::view View +@group(0) @binding(0) var view: View; + +const SPEED:f32 = 1.0; +const CIRCLE_SIZE:f32 = 0.4; +const BEAM_ROT_SPEED:f32 = 0.2; + +@fragment +fn fragment(in: VertexOutput) -> @location(0) vec4 { + // ensure our uv coords match shadertoy/the-lil-book-of-shaders + var uv = in.uv; + uv *= rotate2D(PI); + uv = (in.uv * 2.0) - 1.0; + let resolution:vec2f = view.viewport.zw; + uv.x *= resolution.x / resolution.y; + + let t = globals.time * SPEED; + var angle = atan2(uv.y, uv.x); + + return circle_charge_beam(uv,resolution, angle, t); +} + +fn circle_charge_beam(uv: vec2f, resolution: vec2f, angle: f32, t: f32) -> vec4 { + let uv_l = uv; + var uv = uv; // so we can mutate the uvs locally. + + var rad = length(uv) * CIRCLE_SIZE; // circle radius setter + uv = vec2(angle / TAU * 2.0 , rad); + + // 'get' a colour: + var x_colour: f32 = (uv.x - (t * BEAM_ROT_SPEED)) * 3.0; // QUESTION: what happens when this is not a multiple of PI? + x_colour = x_colour % 3.0; + var hor_colour = vec3(0.25); + + if x_colour < 1.0 { + hor_colour.r = 1.0 - x_colour; + hor_colour.g += x_colour; + } else if x_colour < 2.0 { + x_colour -= 1.0; + hor_colour.g = 1.0 - x_colour; + hor_colour.b += x_colour; + } else { + x_colour -= 2.0; + hor_colour.b = 1.0 - x_colour; + hor_colour.r += x_colour; + } + + + uv = (uv * 2.0) - 1.0; // resetting the earlier offsets we made to the uvs. + + // BUG: somewhere in this lies the solution to the lhs sharp, black line. + if uv.x > 9.0 { + hor_colour += 990.0; + } + + var coefficient_1 = 0.7; + var coefficient_2 = 0.5; + var uv_x = uv.x; + var uv_x_constant = uv_x * 3.20 * PI * 0.85; + // var uv_x_constant = PI * 0.85; + + var floor_value = floor(PI + cos(t)); + var clamped_value = clamp(floor_value, 0.0, 10.0); + var abs_uv_y_denominator = 90.0 * uv.y; + var abs_uv_y = abs(1.0 / abs_uv_y_denominator); + + // Recombine to calculate beam_width: + var beam_width:f32 = (coefficient_1 + coefficient_2 * cos(uv_x_constant * clamped_value)) * abs_uv_y; + + + var hor_beam = vec3f(beam_width / PI); + + return vec4f((hor_beam * hor_colour), 1.0); +} + + + +``` + +### assets/shaders/grid_with_colours + +```rust +#import bevy_pbr::mesh_view_bindings globals view +#import bevy_pbr::forward_io::VertexOutput +#import bevy_pbr::utils PI HALF_PI +#import bevy_pbr::mesh_functions + +const GRID_RATIO:f32 = 40.; + + +@fragment +fn fragment(in: VertexOutput) -> @location(0) vec4 { + let t = globals.time; + + var uv = in.uv - 0.5; + uv *= GRID_RATIO / 5.; + var col = vec3(0.0); + + let grid = grid(uv); + + let pal = palette(t / 20.); + col = mix(col, pal, grid); + + return vec4(col, 1.0); +} + +// I disklike boring colours, this paticular function comes from Kishimisu (see the wgsl file of same name to explore more of her/his/their ideas.) +fn palette(t: f32) -> vec3 { + let a = vec3(0.5, 0.5, 0.5); + let b = vec3(0.5, 0.5, 0.5); + let c = vec3(1.0, 1.0, 1.0); + let d = vec3(0.263, 0.416, 0.557); + + return a + b * cos(6.28318 * (c * t + d)); +} + +// inspired by https://www.shadertoy.com/view/Wt33Wf & https://www.shadertoy.com/view/XtBfzz +fn grid(uv: vec2)-> f32 { + let i = step(fract(uv), vec2(1.0/GRID_RATIO)); + return (1.0-i.x) * (1.0-i.y); + +} + + +// License: WTFPL, author: sam hocevar, found: https://stackoverflow.com/a/17897228/418488 +fn hsv2rgb(c: vec3) -> vec3 { + let K: vec4 = vec4(1.0, 2.0 / 3.0, 1.0 / 3.0, 3.0); + var p: vec3 = abs(fract(vec3(c.x) + K.xyz) * 6.0 - K.www); + return c.z * mix(K.xxx, clamp(p - K.xxx, vec3(0.0), vec3(1.0)), c.y); +} + +``` + +### assets/shaders/aura + +```rust +#import bevy_pbr::forward_io::{VertexOutput, FragmentOutput}; +#import bevy_pbr::mesh_view_bindings::globals +#import bevy_render::view::View + +/// Keep up-to-date with the rust definition! +struct AuraMaterial { + unused: f32, +} + +@group(0) @binding(0) var view: View; +@group(2) @binding(100) var aura_mat: AuraMaterial; + +// Colour picker tells us the values of the original.. +// Darkish +// #CEAA4F +const GOLD = vec3f(0.807843, 0.666667, 0.309804); +const SPIKE_NUM: f32 = 9.0; +const SPIKE_LEN: f32 = 1.68; +const SPIKE_SPEED:f32 = 32.0; +const PI: f32 = 3.141592653589; + +@fragment +fn fragment(in: VertexOutput) -> @location(0) vec4 { + var uv = in.uv; + uv = uv * 2.0 - 1.0; //normalise to 0 .. 1 + + let time = globals.time; + + let feet_mask = sdCircle(uv, 0.25); // Get a mask for the area around our feet. + + // Move into polar coordinates. + var pc = vec2f(atan2(uv.x, uv.y), length(uv)); + let x = (pc.x / PI) * SPIKE_NUM; // Divide the x coords by PI so they line up perfectly. + + // Make the spikes. + let f_x = fract(x); + let f2_x = fract(1.0 - x); + var m = min(f_x, f2_x); + m = m * SPIKE_LEN - pc.y; + + // Draw the spikes: + var c = smoothstep(0.03, 0.9, m); + var col = vec3f(c); + + let rate:f32 = time * SPIKE_SPEED; + + let idx: f32 = rate % (SPIKE_NUM * 2.0) - (SPIKE_NUM - 1.0) ; + var x_clamp = -floor(x); + let is_focused_spike = step(0.5, abs(idx - x_clamp)); + col *= mix(GOLD / 0.15, GOLD * 0.54, is_focused_spike); + + // Mask out the area around the character's feet.. + var out = vec4f(col, m); + out *= smoothstep(0.0, 0.09, feet_mask); + + // TODO: the index in the original's colour splashes either side of the other two indicies.. + // we should brighten them too, have this spill available as const. + return out; +} + +fn sdCircle(p: vec2f, r: f32) -> f32 { + return length(p) - r; +} + + + + +``` + +### assets/shaders/myshader_2d + +```rust +/// ***************************** /// +/// THIS IS THE DEFAULT 2D SHADER /// +/// You can always get back to this with `python3 scripts/reset-2d.py` /// +/// ***************************** /// + +#import bevy_sprite::mesh2d_view_bindings::globals +#import shadplay::shader_utils::common::{NEG_HALF_PI, shader_toy_default, rotate2D, TWO_PI} +#import bevy_render::view::View +#import bevy_pbr::forward_io::VertexOutput + +// #import bevy_sprite::mesh2d_vertex_output::VertexOutput + +@group(0) @binding(0) var view: View; + +const SPEED:f32 = 1.0; + +@fragment +fn fragment(mesh: VertexOutput) -> @location(0) vec4 { + // ensure our uv coords match shadertoy/the-lil-book-of-shaders + var uv = mesh.uv; + uv = (uv * 2.0) - 1.0; + let resolution = view.viewport.zw; + let t = globals.time * SPEED; + // uv.x *= resolution.x / resolution.y; + // uv *= rotate2D(NEG_HALF_PI); + + // return vec4f(shader_toy_default(t, uv), 1.0); + return vec4f(1.0); +} + + +``` + +### assets/shaders/howto-mouse + +```rust +/// How to use the mouse, in shadplay. +#import bevy_pbr::forward_io::VertexOutput +#import bevy_sprite::mesh2d_view_bindings globals +#import shadplay::shader_utils::common NEG_HALF_PI, shader_toy_default, rotate2D + +#import bevy_render::view View +@group(0) @binding(0) var view: View; + +@group(2) @binding(0) var mouse: YourShader2D; +struct YourShader2D{ + mouse_pos : vec2f, +} + +const SPEED:f32 = 1.0; + +@fragment +fn fragment(in: VertexOutput) -> @location(0) vec4 { + // ensure our uv coords match shadertoy/the-lil-book-of-shaders + var uv = (in.uv * 2.0) - 1.0; + let resolution = view.viewport.zw; + let mouse = mouse.mouse_pos; + let t = globals.time * SPEED; + uv.x *= resolution.x / resolution.y; + uv *= rotate2D(NEG_HALF_PI); + + var col =vec4f(shader_toy_default(t, uv), 1.0); + col.a *= abs(mouse.y); + col.a *= abs(mouse.x); + col.a *= 0.225; // prevent us from ever going truly transparent. + + return col; +} + + +``` + +### assets/shaders/common/notes + +```rust + + +fn trace(origin: vec3f, r: vec3f) -> f32 { + var t = 0.0; + for (var i = 0; i < 64; i++) { + let p = origin + r * t; + let d = mmap(p); + t += d * 0.22; + } + return t ; +} + +fn mmap(p: vec3f) -> f32 { + var p = p; + var q = p; + var qa = p; + + q = pmod3(q, vec3f(0.8, 1.0, 0.23)); + qa = pmod3(qa, vec3f(0.8, 1.0, 0.18)); + p.x = pmod1(p.x, 1.0); + + let s1 = sd_sphere(p, 0.75); + let s2 = sd_sphere(q, 0.5); + let s3 = sd_sphere(qa, 0.555); + + return min(min(s1, s2), s3); +} + +fn pmod1(in: f32, size: f32) -> f32 { + let halfsize = size * 0.5; + return (in + halfsize % size) - halfsize; +} + +fn pmod3(in: vec3f, size: vec3f) -> vec3f { + let out = (in % size * 0.5) - (size * 0.5); + + return out; +} + +fn sd_sphere(p: vec3f, radius: f32) -> f32 { + return (length(p) - radius); +} + + +// fn fade(col: vec4f, uv: vec2f) { +// let fade = max(abs(uv.x), abs(uv.y)) - 1.0 ; // This is really cool. +// let col = col * (fade / (0.005 + fade)); +// return col; +// } + +fn hsv2rgb(c: vec3f) -> vec3f { + var rgb: vec3f = clamp( + abs((c.x * 6.0 + vec3f(0.0, 4.0, 2.0)) % 6.0 - 3.0) - 1.0, + vec3f(0.0), + vec3f(1.0) + ); + return c.z * mix(vec3f(1.0), rgb, c.y); +} + +fn gradient(t: f32) -> vec3f { + let h: f32 = 0.6666 * (1.0 - t * t); + let s: f32 = 0.75; + let v: f32 = 1.0 - 0.9 * (1.0 - t) * (1.0 - t); + return hsv2rgb(vec3f(h, s, v)); +} + +/// MISC: +// License: MIT, author: Inigo Quilez, found: https://iquilezles.org/www/articles/distfunctions2d/distfunctions2d.htm +fn sd_hexagon(p: vec2f, r: f32) -> f32 { + let k = vec3f(-0.866025404, 0.5, 0.577350269); + var q: vec2f = abs(p); + q = q - 2. * min(dot(k.xy, q), 0.) * k.xy; + q = q - vec2f(clamp(q.x, -k.z * r, k.z * r), r); + return length(q) * sign(q.y); +} + +// Translate the GLSL hextile function to WGSL +fn hextile(p: vec2f) -> vec2f { + // See Art of Code: Hexagonal Tiling Explained! + // https://www.youtube.com/watch?v=VmrIDyYiJBA + var p = p; + + // Define constants + let sz: vec2f = vec2f(1.0, sqrt(3.0)); + let hsz: vec2f = 0.5 * sz; + + // Calculate p1 and p2 + let p1: vec2f = (p % sz) - hsz; + let p2: vec2f = ((p - hsz) % sz) - hsz; + + // Choose p3 based on dot product + var p3: vec2f = vec2(0.); + if dot(p1, p1) < dot(p2, p2) { + p3 = p1; + } else { + p3 = p2; + } + + // Calculate n + var n: vec2f = ((p3 - p + hsz) / sz); + p = p3; + + // Adjust n and round for well-behaved hextile 0,0 + n -= vec2(0.5); + return round(n * 2.0) * 0.5; +} + +// NOTE: swapped the hash +// fn hash(pp: vec2) -> f32 { //NOTE: from some other tutorial/bevy code? +// var p3 = fract(vec3(pp.xyx) * 0.1031); +// p3 += dot(p3, p3.yzx + 33.33); +// return fract((p3.x + p3.y) * p3.z); +// } +fn hash(co: vec2f) -> f32 { + // Add a constant + let co: vec2f = co + 1.234; + + // Calculate and return the fractal part of a sine function + return fract(sin(dot(co.xy, vec2f(12.9898, 58.233))) * 13758.5453); +} + + +fn off6(n: f32) -> vec2 { + return vec2(1.0, 0.0) * rotate2D(n * TAU / 6.0); +} + +fn sd_bezier(p: vec2f, A: vec2f, B: vec2f, C: vec2f) -> vec2f { + let a = B - A; + let b = A - 2. * B + C; + let c = a * 2.; + let d = A - p; + let kk = 1. / dot(b, b); + let kx = kk * dot(a, b); + let ky = kk * (2. * dot(a, a) + dot(d, b)) / 3.; + let kz = kk * dot(d, a); + + let p1 = ky - kx * kx; + let p3 = p1 * p1 * p1; + let q = kx * (2.0 * kx * kx - 3.0 * ky) + kz; + var h: f32 = q * q + 4. * p3; + + var res: vec2f; + if h >= 0. { + h = sqrt(h); + let x = (vec2f(h, -h) - q) / 2.; + let uv = sign(x) * pow(abs(x), vec2f(1. / 3.)); + let t = clamp(uv.x + uv.y - kx, 0., 1.); + let f = d + (c + b * t) * t; + res = vec2f(dot(f, f), t); + } else { + let z = sqrt(-p1); + let v = acos(q / (p1 * z * 2.)) / 3.; + let m = cos(v); + let n = sin(v) * 1.732050808; + let t = clamp(vec2f(m + m, -n - m) * z - kx, vec2f(0.0), vec2f(1.0)); + let f = d + (c + b * t.x) * t.x; + var dis: f32 = dot(f, f); + res = vec2f(dis, t.x); + + let g = d + (c + b * t.y) * t.y; + dis = dot(g, g); + res = select(res, vec2f(dis, t.y), dis < res.x); + } + res.x = sqrt(res.x); + return res; +} + + +fn coff(h: f32, time: f32) -> vec2 { + let h0: f32 = h; + let h1: f32 = fract(h0 * 9677.0); + let h2: f32 = fract(h0 * 8677.0); + let t: f32 = mix(0.5, 1.0, h2 * h2) * time + 1234.5 * h0; + return mix(vec2(0.1, 0.1), vec2(0.2, 0.2), h1 * h1) * sin(t * vec2(1.0, sqrt(0.5))); +} + +fn aces_approx(v: vec3) -> vec3 { + var v = max(v, vec3(0.0, 0.0, 0.0)); + v *= 0.6; + let a: f32 = 2.51; + let b: f32 = 0.03; + let c: f32 = 2.43; + let d: f32 = 0.59; + let e: f32 = 0.14; + return clamp((v * (a * v + b)) / (v * (c * v + d) + e), vec3(0.0, 0.0, 0.0), vec3(1.0, 1.0, 1.0)); +} + +fn to_smith(p: vec2) -> vec2 { + let d: f32 = (1.0 - p.x) * (1.0 - p.x) + p.y * p.y; + let x: f32 = (1.0 + p.x) * (1.0 - p.x) - p.y * p.y; + let y: f32 = 2.0 * p.y; + return vec2(x, y) / d; +} + +fn from_smith(p: vec2) -> vec2 { + let d: f32 = (p.x + 1.0) * (p.x + 1.0) + p.y * p.y; + let x: f32 = (p.x + 1.0) * (p.x - 1.0) + p.y * p.y; + let y: f32 = 2.0 * p.y; + return vec2(x, y) / d; +} + + +/// Clockwise by `theta` +fn rotate2D(theta: f32) -> mat2x2 { + let c = cos(theta); + let s = sin(theta); + return mat2x2(c, s, -s, c); +} + + +fn transform(p: vec2, TIME: f32) -> vec2 { + var p = p * 2.0; + let sp0: vec2 = to_smith(p - vec2(0.0, 0.0)); + let sp1: vec2 = to_smith(p + vec2(1.0) * rotate2D(0.12 * TIME)); + let sp2: vec2 = to_smith(p - vec2(1.0) * rotate2D(0.23 * TIME)); + p = from_smith(sp0 + sp1 - sp2); + return p; +} + +``` + +### assets/shaders/shadertoy-ports/light-spirals + +```rust +#import bevy_sprite::mesh2d_view_bindings::globals +#import shadplay::shader_utils::common::{NEG_HALF_PI, shader_toy_default, rotate2D, TWO_PI} +#import bevy_render::view::View +#import bevy_pbr::forward_io::VertexOutput; + +@group(0) @binding(0) var view: View; + +const SPEED:f32 = 0.30; +const PI: f32 = 3.14159265359; +const TWO_PI = 6.2848; +const NUM_ITER = 8.0; + +// This is a port of `light spirals` by `felipetovarhenao` on shadertoy. https://www.shadertoy.com/view/DlccR7 +@fragment +fn fragment(in: VertexOutput) -> @location(0) vec4 { + var uv = (in.uv * 2.0) - 1.0; + let resolution = view.viewport.zw; + let t = globals.time * SPEED; + uv.x *= resolution.x / resolution.y; + + //NOTE: we're not rotating, which allows us to remove the original's adjust_viewport + return spirals(uv, resolution, t); +} + +// draw the 'spirals' +fn spirals(_uv: vec2f, resolution: vec2f, t: f32) -> vec4f { + var uv = _uv; + // Something blank to paint onto! + var col: vec3f = vec3(0.0, 0.0, 0.0); + + // Setup a colour palette: + let c1: vec3f = vec3(0.5, 0.5, 0.5); + let c2: vec3f = vec3(0.5, 0.5, 0.5); + let c3: vec3f = vec3(0.1, 0.1, 0.1); + let c4: vec3f = vec3(0.6, 0.7, 0.8); + + let mag: f32 = length(uv); + + // `d` is distance, we'll get multiple 'distances' that we're interested in using them as we override them in the loop. + var d: f32 = 0.0; + for (var i: f32 = 0.0; i < NUM_ITER; i += 1.0) { + let h: f32 = i + 1.0; + let ph: f32 = t + noise_overload_3(uv); + let amp: f32 = pow(1.333, i); + let fq: f32 = uv.y * TWO_PI * h; + let sig: f32 = sin(fq + ph * h) * (0.333 / amp); + d = uv.x + sig; + d = abs(d); + let gap: f32 = cos(fq * 0.4 + t); + var a: f32 = abs(gap); + a = pow(a, 5.0); + d += a; + let lum: f32 = scale(sin(ph * h), -1.0, 1.0, 0.003, 0.007) * (NUM_ITER * 0.25 / h); + d = lum / abs(d); + col += d * gradient(h + t, c1, c2, c3, c4); + } + + return vec4(col, 1.0); +} + +// Helpers: +fn rand(x: f32, s: i32) -> f32 { + return fract(sin(x + f32(s)) * 43758.5453123); +} + + +//NOTE: wgsl doesn't support function overloading -- I know not whether or not there's a convention developing yet, so for now I've just enumerated them. +fn rand_overload_1(x: f32) -> f32 { + return rand(x, 0); +} + +fn rand_overload_2(uv: vec2f, seed: i32) -> f32 { + return fract(sin(dot(uv.xy, vec2f(12.9898, 78.233)) + f32(seed)) * 43758.5453123); +} + +fn rand_overload_3(uv: vec2f) -> f32 { + return rand_overload_2(uv, 0); +} + +fn noise(x: f32, s: i32) -> f32 { + let xi = floor(x); + let xf = fract(x); + return mix(rand(xi, s), rand(xi + 1.0, s), smoothstep(0.0, 1.0, xf)); +} + +fn noise_overload_1(x: f32) -> f32 { + return noise(x, 0); +} + +fn noise_overload_2(p: vec2f, s: i32) -> f32 { + let pi = floor(p); + let pf = fract(p); + + let bl = rand_overload_2(pi, s); + let br = rand_overload_2(pi + vec2f(1.0, 0.0), s); + let tl = rand_overload_2(pi + vec2f(0.0, 1.0), s); + let tr = rand_overload_2(pi + vec2f(1.0), s); + + let w = smoothstep(vec2f(0.0), vec2f(1.0), pf); + + let t = mix(tl, tr, w.x); + let b = mix(bl, br, w.x); + + return mix(b, t, w.y); +} + +fn noise_overload_3(p: vec2f) -> f32 { + return noise_overload_2(p, 0); +} + +fn scale(x: f32, a: f32, b: f32, c: f32, d: f32) -> f32 { + return (x - a) / (b - a) * (d - c) + c; +} + +fn gradient(t: f32, a: vec3f, b: vec3f, c: vec3f, d: vec3f) -> vec3f { + return a + b * cos(TWO_PI * (c * t + d)); +} + +``` + +### assets/shaders/shadertoy-ports/cyber-anim-arrowX + +```rust +/// +/// This is a port of CyberAnimArrowX by float1987 +/// Source: https://www.shadertoy.com/view/DsjfDt +/// Authour: https://www.shadertoy.com/user/float1987 +/// +#import bevy_pbr::forward_io::VertexOutput +#import bevy_sprite::mesh2d_view_bindings globals +#import shadplay::shader_utils::common rotate2D, QUARTER_PI + +#import bevy_render::view View +@group(0) @binding(0) var view: View; + +const SPEED:f32 = 0.2; //Global Speed multiplier +const NUM_ARROWS = 3.0; // Number of arrows spawned (see the for-loop below) +const COLOUR_TEMP = 0.02; // The 'intensity' of the red channel in the arrows. + +@fragment +fn fragment(in: VertexOutput) -> @location(0) vec4 { + var uv = (in.uv * 2.0) - 1.0; // | + let resolution = view.viewport.zw; // | + uv.x *= resolution.x / resolution.y; // normalising uvs. + + let t = globals.time * SPEED; + + uv *= rotate2D(t); // Play with the time to adjust the speed at which the arrows rotate, or commen out to prevent spin entirely. + // what happens is you put the negative -QUARTER_PI in here? + + return cyber_anim_arror_x(uv, t); +} + +fn cyber_anim_arror_x(uv: vec2f, t: f32) -> vec4f { + var out = vec3f(0.0); + + for (var i: f32 = 0.0; i < NUM_ARROWS; i += 1.0) { + // HOMEWORK IDEA 1: + // there's several colour pallete creators in this codebase, kishimisu, shaderToyDefault etc (grep for them), + // maybe you can add your own colour and multiply the sdf_arrow by that? + out += draw_arrow(uv, i) * vec3f(COLOUR_TEMP, abs(cos(t)), abs(sin(t) * cos(t))); + + // HOMEWORK IDEA 2: + // the dradraw_arrow() function is really just an sdf shape, maybe you can swap it out with some others https://gist.github.com/munrocket/30e645d584b5300ee69295e54674b3e4#bobbly-cross---exact + } + + return vec4f(out, 1.0); +} + +/// Draws an sdf_arrow, by manipulating a square +fn draw_arrow(uv: vec2f, offset: f32) -> f32 { + var uv = uv; + var sign_x = sign(uv.x); + + uv.y = abs(uv.y); + uv.x += sign_x * (uv.y - fract(globals.time) + offset); // Comment this out and you get a square. + + var a = QUARTER_PI;// There are more constants in the common.wgsl -- try some others! + uv *= rotate2D(a); // rotating our uvs by angle 'a', naming your 'angles' a1, a2, a3 etc seems to be very common. + + var t1 = smoothstep(0.3, 0.29, abs(uv.x) + abs(uv.y)); + var t2 = smoothstep(0.29, 0.28, abs(uv.x) + abs(uv.y)); + var t = step(0.1, t1 - t2); + + return t; +} + + +``` + +### assets/shaders/shadertoy-ports/cosmic + +```rust +/// A shadertoy port of 'Cosmic' https://www.shadertoy.com/view/msjXRK, by Xor. +/// I have sligthly adjusted the colours, and used a smoothstep to improve the contrast too. +#import bevy_pbr::forward_io::VertexOutput +#import bevy_sprite::mesh2d_view_bindings globals +#import shadplay::shader_utils::common NEG_HALF_PI, rotate2D + +#import bevy_render::view View +@group(0) @binding(0) var view: View; + + +@fragment +fn fragment(in: VertexOutput) -> @location(0) vec4 { + var uv = (in.uv *2.0) - 1.0; + uv *= 220.; // equivalent of zooming out. + uv *= rotate2D(NEG_HALF_PI); + let resolution = view.viewport.zw; + uv.x *= resolution.x / resolution.y; + + let col = cosmic(uv, resolution); + + // I like the bumped contrast to enhance the glow. + // I also like the glow being strongest on the red + let contrast_bumped = vec4f( + smoothstep(0.10, 0.95, col.r), + smoothstep(0.10, 0.85, col.g), + smoothstep(0.10, 0.85, col.b), + 1.0// Assuming you want to keep the alpha channel unchanged + ); + return contrast_bumped; + + +} + +// Generates a visual effect based on pixel coordinates and time +fn cosmic(uv: vec2f, r: vec2f) -> vec4f { + var p: vec2f = uv * mat2x2(vec2(1.0, -1.0), vec2(1.3, 5.0)); + + var col: vec4f = vec4f(0.0, 0.0, 0.0, 0.0); + + for (var i: f32 = 0.0; i < 30.0; i = i + 1.0) { + var tmp_uv: vec2f = p / -(r + r - p).y; + var a: f32 = atan2(tmp_uv.y, tmp_uv.x) * ceil(i * 0.1) + globals.time * sin(i * i) + i * i; + col += 0.2 / (abs(length(tmp_uv) * 80.0 - i) + 40.0 / r.y) * + clamp(cos(a), 0.0, 0.6) * + (cos(a - i + vec4f(0.3, 2.0, 2.8, 0.0)) + 1.0); + } + + return col; +} + +``` + +### assets/shaders/shadertoy-ports/voronoi_simple + +```rust +#import bevy_pbr::mesh_view_bindings globals +#import bevy_pbr::forward_io::VertexOutput +// +// Two simple voronoi shaders from https://www.youtube.com/watch?v=l-07BXzNdPw&t=19s&ab_channel=TheArtofCode +// Ported here to wgsl, I've tried to use the same varnames etc so you can benefit from Martien's fantastic videos. +// + +@fragment +fn fragment(in: VertexOutput) -> @location(0) vec4 { + // just comment in/out the one you want to see and save this file! + + return non_simple(in); + // return simple_non_uniform(in); +} + +fn non_simple(in: VertexOutput) -> vec4{ + var uv: vec2 = in.uv; + var m = 0.; + let t = globals.time / 10.0; + + var min_dist = 100.0; + var col = vec3(0.0); + var d = 0.; + + uv *= 5.0; + let gv = fract(uv); + let id = floor(uv); + var cell_id = vec2(0.); + + for (var i = -1.; i<1.; i+=1.){ + for (var j = -1.; j<1.; j+=1.){ + var offset = vec2(i,j); + let n = N22(id + offset); + var p = sin(n*t * 3.0); + + // Use Euclidian distance: + let ed = length(gv - p); + + // //Using Manhattan distance: + p -= gv; + var md = length(gv - p); + md = abs(p.x)+abs(p.y); + + // // Interprolate between the euc and manha: + d = mix(ed, md, sin(t *.2)); + + if d < min_dist{ + min_dist = d; + offset = offset + id; + cell_id = offset; + } + } + } + col = vec3(min_dist); + + return vec4(col, 1.0); +} + +fn simple_non_uniform(in: VertexOutput) -> vec4 { + let uv: vec2 = in.uv; + + var m = 0.; + let t = globals.time / 10.0; + var min_dist = 100.; + var cell_idx = 0; + + for (var i = 0; i < 200; i += 1) { + let n = N22(vec2(f32(i))); + let p = sin(n*t*3.0); + let d = length(uv - p); + + if d < min_dist { + min_dist = d; + cell_idx = i; + } + } + + var col = vec3(f32(cell_idx)/200.0); + return vec4(col, 1.0); +} + + +// Noise: two in -> two out in range [0..1] +fn N22(pp: vec2)->vec2{ + var a = fract(pp.xyx*vec3(123.34, 234.34, 345.65)); + a += dot(a, a + 34.45); + return fract(vec2(a.x*a.y, a.y*a.z)); +} + +``` + +### assets/shaders/shadertoy-ports/flame + +```rust +/// ***************************** /// +/// This is a port of 'Flame' by XT95 https://www.shadertoy.com/view/MdX3zr +/// ***************************** /// + +#import bevy_pbr::forward_io::VertexOutput +#import bevy_sprite::mesh2d_view_bindings globals +#import shadplay::shader_utils::common rotate2D, PI + +#import bevy_render::view View +@group(0) @binding(0) var view: View; + + +@fragment +fn fragment(in: VertexOutput) -> @location(0) vec4 { + var uv = (in.uv * 2.0) - 1.0; + let resolution = view.viewport.zw; + uv *= rotate2D(PI); + let org: vec3 = vec3(0.0, -2.0, 4.0); + let dir: vec3 = normalize(vec3(uv.x * 1.6, -uv.y, -1.5)); + let p: vec4 = raymarch(org, dir); + let glow: f32 = p.w; + let col: vec4 = mix(vec4(1.0, 0.25, 0.01, 1.0), vec4(0.07, 0.2, 0.8, 1.0), p.y * 0.02 + 0.4); + return mix(vec4(0.0, 0.0, 0.0, 0.0), col, pow(glow * 2.0, 4.0)); +} + +// Computes a noise value based on a 3D position +fn noise(_p: vec3) -> f32 { + var p = _p; + let i: vec3 = floor(p); + let a: vec4 = dot(i, vec3(1.0, 57.0, 21.0)) + vec4(0.0, 57.0, 21.0, 78.0); + let f: vec3 = cos((p - i) * acos(-1.0)) * (-0.5) + 0.5; + let a_val: vec4 = mix(sin(cos(a) * a), sin(cos(1.0 + a) * (1.0 + a)), f.x); + let a_xy: vec2 = mix(vec2(a_val.x, a_val.z), vec2(a_val.y, a_val.w), f.y); + return mix(a_xy.x, a_xy.y, f.z); +} + +// Computes the distance from a point to a sphere +fn sphere(_p: vec3, spr: vec4) -> f32 { + var p = _p; + return length(spr.xyz - p) - spr.w; +} + +// Computes a flame value based on a 3D position +fn flame(_p: vec3) -> f32 { + var p = _p; + let d: f32 = sphere(p * vec3(1.0, 0.25, 1.0), vec4(0.0, -1.0, 0.0, 1.0)); + return d + (noise(p + vec3(0.0, globals.time * 2.0, 0.0)) + noise(p * 3.0) * 0.5) * 0.25 * p.y; +} + +// Computes the distance from a ray to the scene +fn scene(_p: vec3) -> f32 { + var p = _p; + return min(100.0 - length(p), abs(flame(p))); +} + +// Raymarches the scene and returns the hit point and glow value +fn raymarch(org: vec3, dir: vec3) -> vec4 { + var d: f32 = 0.0; + var glow: f32 = 0.0; + let eps: f32 = 0.01; + var p: vec3 = org; + var glowed: bool = false; + + for(var i: i32 = 0; i < 96; i = i + 1) { + d = scene(p) + eps; + p += d * dir; + if d > eps { + if flame(p) < 0.0 { + glowed = true; + } + if glowed { + glow = f32(i) / 96.0; + } + } + } + return vec4(p, glow); +} + + +``` + +### assets/shaders/shadertoy-ports/warp + +```rust +/// +/// This is a port of "WARP" by Alro https://www.shadertoy.com/view/ttlGDf +/// +#import bevy_pbr::forward_io::VertexOutput; +#import bevy_sprite::mesh2d_view_bindings globals; +#import bevy_render::view View; + +@group(0) @binding(0) var view: View; + +const STRENGTH: f32 = 0.4; // Controls the strength of the waves +const SPEED: f32 = 0.33333; // Controls the speed at which the waves run + +@fragment +fn fragment(in: VertexOutput) -> @location(0) vec4 { + var uv: vec2 = (in.uv * 2.0) - 1.0; + let resolution: vec2 = view.viewport.zw; + let time: f32 = globals.time * SPEED; + + return warp_with_aa(uv, resolution, time); +} + +// Without the Anti-Aliasing. +fn warp_without_aa(uv: vec2f, resolution: vec2f, time: f32) -> vec4f { + // Normalized pixel coordinates (from -1 to 1) + var pos: vec2 = uv; + pos.y /= resolution.x / resolution.y; + pos = 4.0 * (vec2(0.5, 0.5) - pos); + + for (var k: f32 = 1.0; k < 7.0; k += 1.0) { + pos.x += STRENGTH * sin(2.0 * time + k * 1.5 * pos.y) + time * 0.5; + pos.y += STRENGTH * cos(2.0 * time + k * 1.5 * pos.x); + } + + // Time varying pixel color + let col: vec3 = 0.5 + 0.5 * cos(time + pos.xyx + vec3(0.0, 2.0, 4.0)); + + // Gamma correction + let gamma_corrected_col: vec3 = pow(col, vec3(0.4545, 0.4545, 0.4545)); + + // Fragment color + return vec4(gamma_corrected_col, 1.0); +} + +fn warp_with_aa(uv: vec2f, resolution: vec2f, time: f32) -> vec4f { + var color: vec3 = vec3(0.0, 0.0, 0.0); + var frag_coord: vec2 = uv * resolution; + + // Anti-aliasing loop + for (var i: i32 = -1; i <= 1; i = i + 1) { + for (var j: i32 = -1; j <= 1; j = j + 1) { + frag_coord = uv * resolution + vec2(f32(i), f32(j)) / 3.0; + + var pos: vec2 = frag_coord / resolution; + pos.y /= resolution.x / resolution.y; + pos = 4.0 * (vec2(0.5, 0.5) - pos); + + for (var k: f32 = 1.0; k < 7.0; k = k + 1.0) { + pos.x += STRENGTH * sin(2.0 * time + k * 1.5 * pos.y) + time * 0.5; + pos.y += STRENGTH * cos(2.0 * time + k * 1.5 * pos.x); + } + + color += 0.5 + 0.5 * cos(time + pos.xyx + vec3(0.0, 2.0, 4.0)); + } + } + + color /= 9.0; + + // Gamma correction + color = pow(color, vec3(0.4545, 0.4545, 0.4545)); + + return vec4(color, 1.0); +} +``` + +### assets/shaders/shadertoy-ports/BROKEN_fbm_lightning + +```rust +#import bevy_pbr::mesh_view_bindings globals +#import bevy_pbr::forward_io::VertexOutput + +@fragment +fn fragment(in: VertexOutput) -> @location(0) vec4 { + let uv: vec2 = in.uv; + return fbm_lightning(uv); +} + +// Cover of https://www.shadertoy.com/view/dsXfDn +fn fbm_lightning(uv: vec2) -> vec4 { + // Make the centre of our cube == 0,0 + var uv = ((uv.xy) * 2.0) - 1.5; + // uv.y += fract(h11(globals.time)); + + var time: f32 = globals.time; + + uv += fbm(uv - 0.004 * time, 2) ; + + var dist = abs(uv.x) * 18.0; // needs to be abs so that the glow goes in both directions. + var glow = 18.4; + var col = vec3(0.3, 0.6, 0.8) * pow(mix(0.0, 0.08, h11(time)) / dist, glow); + + return vec4(col, 1.0); +} + +/// Hash: one in -> one out +fn h11(p: f32) -> f32 { + var p = fract(p * .1031); + p *= p + 33.33; + p *= p + p; + return fract(p); +} + +/// Hash: two in -> one out +fn h12(pp: vec2) -> f32 { + var p3 = fract(vec3(pp.xyx) * 0.1031); + p3 += dot(p3, p3.yzx + 33.33); + return fract((p3.x + p3.y) * p3.z); +} + +/// Clockwise by `theta` +fn rotate2D(theta: f32) -> mat2x2 { + let c = cos(theta); + let s = sin(theta); + return mat2x2(c, - s, s, c); +} +/// Nosie: two in -> one out +fn noise21(pp: vec2) -> f32 { + let ip = floor(pp); + let fp = fract(pp); + + let a = h12(ip); + let b = h12(ip + vec2(1., 0.)); + let c = h12(ip + vec2(0., 1.)); + let d = h12(ip + vec2(1., 1.)); + + let t = smoothstep(vec2(0.0), vec2(1.0), fp); + + return mix(mix(a, b, t.x), mix(c, d, t.x), t.y); +} + +fn fbm(pp: vec2, octave_count: i32) -> f32 { + var value = 0.0; + var pp = pp; + var amp = 0.5; + + for (var i = 0; i < octave_count; i += 1) { + value += amp * noise21(pp); + pp *= rotate2D(h11(0.321)); + pp *= pp; + amp *= 0.33333333; + } + + return value; +} +``` + +### assets/shaders/shadertoy-ports/universe_within + +```rust +#import bevy_pbr::forward_io::VertexOutput +#import bevy_sprite::mesh2d_view_bindings globals + + +/// This is a cover/port of https://www.youtube.com/watch?v=KGJUl8Teipk&t=631s&ab_channel=TheArtofCode +// Note: it's missing the texel and mouse stuff because I've not made that available yet. +// Magics are (in the functions at least) kept basically as Martien had them, but those in main are adjusted somewhat to make more sense here. +const NUM_LAYERS: f32 = 5.0; +const SPEED: f32 = 1.0; + +@fragment +fn fragment(in: VertexOutput) -> @location(0) vec4 { + var uv = (in.uv * 2.0) - 1.0; + let resolution = view.viewport.zw; + let t = globals.time * SPEED; + uv.x *= resolution.x / resolution.y; + + var col = vec3f(0.0); + col = shader_toy_default(t, uv); + + return vec4(col, 1.0); +} + +/// This is the default (and rather pretty) shader you start with in ShaderToy +fn shader_toy_default(t: f32, uv: vec2f) -> vec3f { + var col = vec3f(0.0); + let v = vec3(t) + vec3(uv.xyx) + vec3(0., 2., 4.); + return 0.5 + 0.5 * cos(v); +} + + + +``` + +### assets/shaders/shadertoy-ports/kishimisu + +```rust +#import bevy_pbr::mesh_view_bindings globals +#import bevy_pbr::forward_io::VertexOutput + + +@fragment +fn fragment(in: VertexOutput) -> @location(0) vec4 { + return kishimisu(in); +} + +// This is a port/cover of Kimishisu's awesome YT tutotial: https://www.youtube.com/watch?v=f4s1h2YETNY +fn kishimisu(in: VertexOutput) -> vec4 { + let uv0 = ((in.uv.xy) * 2.0) - 1.0; + var uv = (in.uv.xy) ; + + var output = vec3(0.0); + + for (var i = 0.0; i < 1.0; i += 1.0) { + uv = fract((uv * .0982)) - 1.225; + + var d = length(uv) * exp(-length(uv0)); + + var col = palette(length(uv0) + (i * 4.3) + (globals.time * .4)); + + d = sin(d * 8. + globals.time) / 4.; + d = abs(d); + + d = pow(0.01 / d, 1.8); + + output += col * d; + } + + return vec4(output, 1.0); +} + +fn palette(t: f32) -> vec3 { + let a = vec3(0.5, 0.5, 0.5); + let b = vec3(0.5, 0.5, 0.5); + let c = vec3(1.0, 1.0, 1.0); + let d = vec3(0.263, 0.416, 0.557); + + return a + b * cos(6.28318 * (c * t + d)); +} + +``` + +### assets/shaders/shadertoy-ports/semi-circle-wave + +```rust +// +// This is a port of the Semi-circle Wave Animation by Shane https://www.shadertoy.com/view/cdycRt +// +#import bevy_pbr::forward_io::VertexOutput +#import bevy_sprite::mesh2d_view_bindings globals +#import shadplay::shader_utils::common NEG_HALF_PI, rotate2D, HALF_PI, PI, TAU + +#import bevy_render::view View +@group(0) @binding(0) var view: View; + +const SPEED:f32 = 2.0; +const NUM_ITERATIONS: f32 = 14.0; +const LINE_GIRTH:f32 = 0.4; + +@fragment +fn fragment(in: VertexOutput) -> @location(0) vec4 { + var uv = (in.uv * 2.0) - 1.0; + let resolution = view.viewport.zw; + let time = globals.time * SPEED; + uv.x *= resolution.x / resolution.y; + + let col = radial_arc_pattern(uv, resolution, time); + return col; +} + +// @param resolution - The screen resolution +// @param time - The current time for animation +// @return The computed color for the fragment +fn radial_arc_pattern(_uv: vec2f, resolution: vec2f, time: f32) -> vec4 { + var uv = _uv; + uv.y -= 1.0; // Move the Y axis down so the circle comes outta the floor, not halfway on the screen. + uv.y *= -1.0; + + // Convert to polar coordinates + var p: vec2f = vec2f(atan2(uv.y, uv.x) / TAU, length(uv)); + + // Constants for radial repetition + let l_num: f32 = 12.0; // The number of lines + p.y = clamp(p.y, 0.0, 1.0); + let iy: f32 = floor(p.y * l_num); + p.y -= (iy + 0.5) / l_num; + + // Calculate the restricted arc rendering angle + let ang: f32 = (sin(time + iy * PI / l_num * HALF_PI) * 0.9 + 1.0) / 4.0; + + var d: f32; + if p.x < ang { + d = 1e5; + } else { + d = abs(p.y) - LINE_GIRTH / l_num; + } + + // Add rounded line ends + var ang2: f32 = (PI + ang) * TAU; + if uv.x < 0.0 && uv.y < 0.0 { + ang2 = NEG_HALF_PI; + } + let c: f32 = cos(ang2); + let s: f32 = sin(ang2); + p = vec2f(c * uv.x + s * uv.y, -s * uv.x + c * uv.y); + p.y = clamp(p.y, 0.0, 1.0); + let iy2: f32 = floor(p.y * l_num); + p.y -= (iy2 + 0.5) / l_num; + d = min(d, length(p) - 0.2 / l_num); + + // Assign colors to individual arcs + let s_col: vec3 = 0.5 + 0.5 * cos(TAU * iy / l_num * 0.8 + vec3(0.0, 2.0, 4.0) + 2.0); + let col: vec3 = mix(vec3(0.0), s_col, 1.0 - smoothstep(0.0, 3.0 / resolution.y, d)); + + // Apply rough gamma correction and return the final color + return vec4(sqrt(col), 1.0); // Remove the sqrt() to see a more 'saturated' image. +} + +``` + +### assets/shaders/shadertoy-ports/star + +```rust +#import bevy_pbr::mesh_view_bindings globals view +#import bevy_pbr::forward_io::VertexOutput +#import bevy_pbr::utils PI HALF_PI +#import bevy_pbr::mesh_functions + + +@fragment +fn fragment(in: VertexOutput) -> @location(0) vec4 { + let t = globals.time; + var uv = in.uv - 0.5; + var col = vec3(0.0); + + return vec4(col, 1.0); +} + +// I disklike boring colours, this paticular function comes from Kishimisu (see the wgsl file of same name to explore more of her/his/their ideas.) +fn palette(t: f32) -> vec3 { + let a = vec3(0.5, 0.5, 0.5); + let b = vec3(0.5, 0.5, 0.5); + let c = vec3(1.0, 1.0, 1.0); + let d = vec3(0.263, 0.416, 0.557); + + return a + b * cos(6.28318 * (c * t + d)); +} + +// courtesy of : https://gist.github.com/munrocket/30e645d584b5300ee69295e54674b3e4 +fn sd_star(p: vec2, r: f32, n: i32, m: f32) -> f32 { + let an = 3.141593 / f32(n); + let en = 3.141593 / m; + + let acs = vec2(cos(an), sin(an)); + let ecs = vec2(cos(en), sin(en)); + + let bn = (atan2(abs(p.x), p.y) % (2. * an)) - an; + var q: vec2 = length(p) * vec2(cos(bn), abs(sin(bn))); + + q = q - r * acs; + q = q + ecs * clamp(-dot(q, ecs), 0., r * acs.y / ecs.y); + + return length(q) * sign(q.x); +} + +// License: WTFPL, author: sam hocevar, found: https://stackoverflow.com/a/17897228/418488 +fn hsv2rgb(c: vec3) -> vec3 { + let K: vec4 = vec4(1.0, 2.0 / 3.0, 1.0 / 3.0, 3.0); + var p: vec3 = abs(fract(vec3(c.x) + K.xyz) * 6.0 - K.www); + return c.z * mix(K.xxx, clamp(p - K.xxx, vec3(0.0), vec3(1.0)), c.y); +} + +``` + +### assets/shaders/shadertoy-ports/water-caustic-tileable + +```rust +/// ***************************** /// +/// This is a shadertoy port of 'Tileable Water Caustic' by Dave_Hoskins, who claims to of sound it on glsl sandbox, by 'joltz0r' +/// I have been unable to find the original. +/// ***************************** /// + +#import bevy_pbr::forward_io::VertexOutput +#import bevy_sprite::mesh2d_view_bindings globals +#import shadplay::shader_utils::common NEG_HALF_PI, shader_toy_default, rotate2D, TAU + +#import bevy_render::view View +@group(0) @binding(0) var view: View; + +const MAX_ITER: i32 = 3; +const SPEED:f32 = 1.0; + +@fragment +fn fragment(in: VertexOutput) -> @location(0) vec4 { + let time: f32 = globals.time * 0.5 + 23.0; + var uv: vec2 = in.uv; + + // Tiling calculation + var p: vec2; + // Note: Choose one of the following two lines based on whether SHOW_TILING is defined or not + // p = uv * TAU * 2.0 % TAU - 250.0; // show TILING + p = uv * TAU % TAU - 250.0; // hide TILING + + var i: vec2 = vec2(p); + var c: f32 = 1.0; + let inten: f32 = 0.005; + + for (var n: i32 = 0; n < MAX_ITER; n = n + 1) { + let t: f32 = time * (1.0 - (3.5 / f32(n + 1))); + i = p + vec2(cos(t - i.x) + sin(t + i.y), sin(t - i.y) + cos(t + i.x)); + c += 1.0 / length(vec2(p.x / (sin(i.x + t) / inten), p.y / (cos(i.y + t) / inten))); + } + + c /= f32(MAX_ITER); + c = 1.17 - pow(c, 1.4); + var colour: vec3 = vec3(pow(abs(c), 8.0)); + colour = clamp(colour + vec3(0.0, 0.35, 0.5), vec3(0.0, 0.0, 0.0), vec3(1.0, 1.0, 1.0)); + + + // Show grid: + // let pixel: vec2 = vec2(2.0) / view.viewport.zw; + // uv *= 2.0; + // let f: f32 = floor(globals.time * 0.5 % 2.0); + // let first: vec2 = step(pixel, uv) * f; + // uv = step(fract(uv), pixel); + // colour = mix(colour, vec3(1.0, 1.0, 0.0), (uv.x + uv.y) * first.x * first.y); + + return vec4(colour, 1.0); +} + +``` + +### assets/shaders/shadertoy-ports/sailing-beyond + +```rust +/// ***************************** /// +/// This is a port of 'Sailing beyond' by patu https://www.shadertoy.com/view/4t2cR1 +/// ***************************** /// + +#import bevy_pbr::forward_io::VertexOutput +#import bevy_sprite::mesh2d_view_bindings globals +#import shadplay::shader_utils::common rotate2D, PI, TAU + +#import bevy_render::view View +@group(0) @binding(0) var view: View; + +const FAR: f32 = 1e3; +const INFINITY: f32 = 1e32; +const MAX_ITERATIONS: i32 = 1000; +const FOV: f32 = 38.0; +const FOG: f32 = 0.6; +const PHI: f32 = 1.618033988749895; + +struct Geometry { + dist: f32, + hit: vec3, + iterations: i32, +} + +// Converts degrees to radians +fn d2r(angle: f32) -> f32 { + return angle * PI / 180.0; +} + +@fragment +fn fragment(in: VertexOutput) -> @location(0) vec4 { + let resolution = view.viewport.zw; + var uv = (in.uv * 2.0) - 1.0; + + uv *= tan(d2r(FOV) / 2.0) * 4.0; + + let vuv: vec3 = normalize(vec3(cos(globals.time), sin(globals.time * 0.11), sin(globals.time * 0.41))); // up + var ro: vec3 = vec3(0.0, 30.0 + globals.time * 100.0, -0.1); + + ro.x += y_c(ro.y * 0.1) * 3.0; + ro.z -= y_c(ro.y * 0.01) * 4.0; + + var vrp: vec3 = vec3(0.0, 50.0 + globals.time * 100.0, 2.0); + + vrp.x += y_c(vrp.y * 0.1) * 3.0; + vrp.z -= y_c(vrp.y * 0.01) * 4.0; + + let vpn: vec3 = normalize(vrp - ro); + let u: vec3 = normalize(cross(vuv, vpn)); + let v: vec3 = cross(vpn, u); + let vcv: vec3 = ro + vpn; + let scr_coord: vec3 = vcv + uv.x * u * resolution.x / resolution.y + uv.y * v; + let rd: vec3 = normalize(scr_coord - ro); + let oro: vec3 = ro; + + var scene_color: vec3 = vec3(0.0); + + var tr: Geometry = trace(ro, rd); + + tr.hit = ro + rd * tr.dist; + + var col: vec3 = vec3(1.0, 0.5, 0.4) * fbm(tr.hit.xzy * 0.01) * 20.0; + col.b *= fbm(tr.hit * 0.01) * 10.0; + + scene_color += min(0.8, f32(tr.iterations) / 90.0) * col + col * 0.03; + scene_color *= 1.0 + 0.9 * (abs(fbm(tr.hit * 0.002 + 3.0) * 10.0) * fbm(vec3(0.0, 0.0, globals.time * 0.05) * 2.0)) * 1.0; + scene_color = pow(scene_color, vec3(1.0)) * 0.6; // Adjusted the i_channel_time logic + + var steam_color1: vec3 = vec3(0.0, 0.4, 0.5); + var rro: vec3 = oro; + + ro = tr.hit; + + var dist_c: f32 = tr.dist; + var f: f32 = 0.0; + let st: f32 = 0.9; + + for (var i: i32 = 0; i < 24; i = i + 1) { + rro = ro - rd * dist_c; + f += fbm(rro * vec3(0.1, 0.1, 0.1) * 0.3) * 0.1; + dist_c -= 3.0; + if (dist_c < 3.0) { + break; + } + } + + steam_color1 *= 1.0; + scene_color += steam_color1 * pow(abs(f * 1.5), 3.0) * 4.0; + + var frag_color: vec4 = vec4(clamp(scene_color * (1.0 - length(uv) / 2.0), vec3(0.0, 0.0, 0.0), vec3(1.0, 1.0, 1.0)), 1.0); + frag_color = pow(abs(frag_color / tr.dist * 130.0), vec4(0.8)); + return frag_color; +} + +// Hash 2 into 1 +fn hash12(p: vec2) -> f32 { + let h: f32 = dot(p, vec2(127.1, 311.7)); + return fract(sin(h) * 43758.5453123); +} + +// 3D noise function +fn noise_3(p: vec3) -> f32 { + let i: vec3 = floor(p); + var f: vec3 = fract(p); + f -= vec3(1.0, 1.0, 1.0); // Decrement each component of the vector by 1 + var u: vec3 = 1.0 - f * f * f * f * -f; + + let ii: vec2 = i.xy + i.z * vec2(5.0, 5.0); + let a: f32 = hash12(ii + vec2(0.0, 0.0)); + let b: f32 = hash12(ii + vec2(1.0, 0.0)); + let c: f32 = hash12(ii + vec2(0.0, 1.0)); + let d: f32 = hash12(ii + vec2(1.0, 1.0)); + let v1: f32 = mix(mix(a, b, u.x), mix(c, d, u.x), u.y); + + let new_ii: vec2 = ii + vec2(5.0, 5.0); + let a_new: f32 = hash12(new_ii + vec2(0.0, 0.0)); + let b_new: f32 = hash12(new_ii + vec2(1.0, 0.0)); + let c_new: f32 = hash12(new_ii + vec2(0.0, 1.0)); + let d_new: f32 = hash12(new_ii + vec2(1.0, 1.0)); + let v2: f32 = mix(mix(a_new, b_new, u.x), mix(c_new, d_new, u.x), u.y); + + return max(mix(v1, v2, u.z), 0.0); +} + +// Computes the Fractional Brownian Motion value +fn fbm(position: vec3) -> f32 { + var result: f32 = 0.0; + var weight: f32 = 1.0; + var scale: f32 = 1.0; + for (var i: i32 = 0; i < 4; i = i + 1) { + weight *= 0.25; + scale *= 3.0; + result += weight * noise_3(scale * position); + } + return result; +} + +// Computes the y-coordinate based on x +fn y_c(x: f32) -> f32 { + let cosine_val: f32 = cos(x * -0.134); + let sine_val: f32 = sin(x * 0.13); + let fbm_val: f32 = fbm(vec3(x * 0.1, 0.0, 0.0) * 55.4); + return cosine_val * 1.0 * sine_val * 15.0 + fbm_val; +} + +// Rotates a 2D point by an angle +fn p_r(out_point: vec2, angle: f32) -> vec2f { + var point = out_point; + point = cos(angle) * point + sin(angle) * vec2(point.y, -point.x); + return point; +} + +// Computes the distance to an infinite cylinder +fn f_cylinder_inf(p: vec3, r: f32) -> f32 { + return length(vec2(p.x, p.z)) - r; +} + +// Maps the geometry based on the input position +fn map(p: vec3) -> Geometry { + var position = p; + position.x -= y_c(position.y * 0.1) * 3.0; + position.z += y_c(position.y * 0.01) * 4.0; + + let noise_val: f32 = pow(abs(fbm(position * 0.06)) * 12.0, 1.3); + let s: f32 = fbm(position * 0.01 + vec3(0.0, globals.time * 0.14, 0.0)) * 128.0; + + var obj: Geometry; + + obj.dist = max(0.0, -f_cylinder_inf(position, s + 18.0 - noise_val)); + + position.x -= sin(position.y * 0.02) * 34.0 + cos(position.z * 0.01) * 62.0; + + obj.dist = max(obj.dist, -f_cylinder_inf(position, s + 28.0 + noise_val * 2.0)); + + return obj; +} + +// Traces the geometry based on the input origin and direction +fn trace(o: vec3, d: vec3) -> Geometry { + let t_min: f32 = 10.0; + let t_max: f32 = FAR; + var omega: f32 = 1.3; + var t: f32 = t_min; + var candidate_error: f32 = INFINITY; + var candidate_t: f32 = t_min; + var previous_radius: f32 = 0.0; + var step_length: f32 = 0.0; + let pixel_radius: f32 = 1.0 / 1000.0; + + var mp: Geometry = map(o); + + var function_sign: f32; + if mp.dist < 0.0 { + function_sign = -1.0; + } else { + function_sign = 1.0; + }; + + var min_dist: f32 = FAR; + + for (var i: i32 = 0; i < MAX_ITERATIONS; i = i + 1) { + mp = map(d * t + o); + mp.iterations = i; + + let signed_radius: f32 = function_sign * mp.dist; + let radius: f32 = abs(signed_radius); + let sor_fail: bool = omega > 1.0 && (radius + previous_radius) < step_length; + + if sor_fail { + step_length -= omega * step_length; + omega = 1.0; + } else { + step_length = signed_radius * omega; + } + previous_radius = radius; + let error: f32 = radius / t; + + if !sor_fail && error < candidate_error { + candidate_t = t; + candidate_error = error; + } + + if !sor_fail && error < pixel_radius || t > t_max { + break; + } + + t += step_length * 0.5; + } + + mp.dist = candidate_t; + + if t > t_max || candidate_error > pixel_radius { + mp.dist = INFINITY; + } + + return mp; +} + +``` + +### assets/shaders/shadertoy-ports/BROKEN_tuesday_tinkering + +```rust +#import bevy_pbr::forward_io::VertexOutput +#import bevy_sprite::mesh2d_view_bindings globals +#import bevy_render::view View + +@group(0) @binding(0) var view: View; + +const HEIGHT:f32 = 4.0; +const INTENSITY:f32 = 5.0; +const NUM_LINES:f32 = 4.0; +const SPEED:f32 = 1.0; +const TAU: f32 = 6.283185; +const GA: f32 = 100.0; +const ICONST: i32 = 2; + + +// This is a port of "Tuesday tinkering" https://www.shadertoy.com/view/DsccRS by mrange https://www.shadertoy.com/user/mrange +@fragment +fn fragment(in: VertexOutput) -> @location(0) vec4 { + let uv = (in.uv * 2.0) - 1.0; + var col = vec3f(0.0); + let resolution = view.viewport.xy; + let time = globals.time; + + let off6: array, 6> = array, 6>( // How can you loop over arrays? + vec2(off6(0.)), + vec2(off6(1.)), + vec2(off6(2.)), + vec2(off6(3.)), + vec2(off6(4.)), + vec2(off6(5.)) + ); + + let noff6: array, 6> = array, 6>( + vec2(-1.0, 0.0), + vec2(-0.5, 0.5), + vec2(0.5, 0.5), + vec2(1.0, 0.0), + vec2(0.5, -0.5), + vec2(-0.5, -0.5) + ); + + + + let q: vec2 = uv / resolution.xy; + var p: vec2 = -1.0 + 2.0 * q; + let pp: vec2 = p; + p.x *= resolution.x / resolution.y; + let aa: f32 = 4.0 / resolution.y; + + var hp: vec2 = p; + hp *= 3.0; + hp += GA * sin(vec2(1.0, sqrt(0.5)) * TAU * (time - 300.0) / (8.0 * GA)); + + let hn: vec2 = hextile(hp); + let h0: f32 = hash(hn); + var p0: vec2 = coff(h0, time); + let bcol: vec3 = 0.5 * (1.0 + cos(vec3(0.0, 1.0, 2.0) + dot(p, p) - 0.5 * time)); + + let mx: f32 = 0.0005; + + // Loop through the off6 array + for (var i = 0; i <= 6; i ++) { + let i = i; + let v:vec2f= hn + noff6[ICONST]; // FIXME: this is fucking stupid you can only index by const... + let h1: f32 = hash(v); + + let p1: vec2 = off6[ICONST] + coff(h1, time); + + let fade: f32 = smoothstep(1.05, 0.85, distance(p0, p1)); + + let h2: f32 = h0 + h1; + let p2: vec2 = 0.5 * (p1 + p0) + coff(h2, time); + let dd: f32 = sd_bezier(hp, p0, p2, p1).x; //FIXME: this is not what the original does. + var gd: f32 = abs(dd); + gd *= sqrt(gd); + gd = max(gd, mx); + col = col + fade * 0.002 * bcol / (gd); + } + + // Calculate additional color contribution based on distance + var cd: f32 = length(hp - p0); + var gd2: f32 = abs(cd); + gd2 = gd2*gd2; + gd2 = max(gd2, mx); + col += 0.0025 * sqrt(bcol) / gd2; + + // Calculate additional color contribution based on hexagon pattern + let hd: f32 = sd_hexagon(hp, 0.485); + gd2 = abs(hd); + gd2 = max(gd2, mx * 10.0); + col += 0.0005 * bcol * bcol / gd2; + + // Apply smoothing based on length + col *= smoothstep(1.75, 0.5, length(pp)); + + // Apply ACES tone mapping and gamma correction + col = aces_approx(col); + col = sqrt(col); + + // return something.. + return vec4f(col, 1.0); +} + + +// License: MIT, author: Inigo Quilez, found: https://iquilezles.org/www/articles/distfunctions2d/distfunctions2d.htm +fn sd_hexagon(p: vec2f, r: f32) -> f32 { + let k = vec3f(-0.866025404, 0.5, 0.577350269); + var q: vec2f = abs(p); + q = q - 2. * min(dot(k.xy, q), 0.) * k.xy; + q = q - vec2f(clamp(q.x, -k.z * r, k.z * r), r); + return length(q) * sign(q.y); +} + +// Translate the GLSL hextile function to WGSL +fn hextile(p: vec2f) -> vec2f { + // See Art of Code: Hexagonal Tiling Explained! + // https://www.youtube.com/watch?v=VmrIDyYiJBA + var p = p; + + // Define constants + let sz: vec2f = vec2f(1.0, sqrt(3.0)); + let hsz: vec2f = 0.5 * sz; + + // Calculate p1 and p2 + let p1: vec2f = (p % sz) - hsz; + let p2: vec2f = ((p - hsz) % sz) - hsz; + + // Choose p3 based on dot product + var p3: vec2f = vec2(0.); + if dot(p1, p1) < dot(p2, p2) { + p3 = p1; + } else { + p3 = p2; + } + + // Calculate n + var n: vec2f = ((p3 - p + hsz) / sz); + p = p3; + + // Adjust n and round for well-behaved hextile 0,0 + n -= vec2(0.5); + return round(n * 2.0) * 0.5; +} + +// NOTE: swapped the hash +// fn hash(pp: vec2) -> f32 { //NOTE: from some other tutorial/bevy code? +// var p3 = fract(vec3(pp.xyx) * 0.1031); +// p3 += dot(p3, p3.yzx + 33.33); +// return fract((p3.x + p3.y) * p3.z); +// } +fn hash(co: vec2f) -> f32 { + // Add a constant + let co: vec2f = co + 1.234; + + // Calculate and return the fractal part of a sine function + return fract(sin(dot(co.xy, vec2f(12.9898, 58.233))) * 13758.5453); +} + + +fn off6(n: f32) -> vec2 { + return vec2(1.0, 0.0) * rotate2D(n * TAU / 6.0); +} + +fn sd_bezier(p: vec2f, A: vec2f, B: vec2f, C: vec2f) -> vec2f { + let a = B - A; + let b = A - 2. * B + C; + let c = a * 2.; + let d = A - p; + let kk = 1. / dot(b, b); + let kx = kk * dot(a, b); + let ky = kk * (2. * dot(a, a) + dot(d, b)) / 3.; + let kz = kk * dot(d, a); + + let p1 = ky - kx * kx; + let p3 = p1 * p1 * p1; + let q = kx * (2.0 * kx * kx - 3.0 * ky) + kz; + var h: f32 = q * q + 4. * p3; + + var res: vec2f; + if h >= 0. { + h = sqrt(h); + let x = (vec2f(h, -h) - q) / 2.; + let uv = sign(x) * pow(abs(x), vec2f(1. / 3.)); + let t = clamp(uv.x + uv.y - kx, 0., 1.); + let f = d + (c + b * t) * t; + res = vec2f(dot(f, f), t); + } else { + let z = sqrt(-p1); + let v = acos(q / (p1 * z * 2.)) / 3.; + let m = cos(v); + let n = sin(v) * 1.732050808; + let t = clamp(vec2f(m + m, -n - m) * z - kx, vec2f(0.0), vec2f(1.0)); + let f = d + (c + b * t.x) * t.x; + var dis: f32 = dot(f, f); + res = vec2f(dis, t.x); + + let g = d + (c + b * t.y) * t.y; + dis = dot(g, g); + res = select(res, vec2f(dis, t.y), dis < res.x); + } + res.x = sqrt(res.x); + return res; +} + + +fn coff(h: f32, time: f32) -> vec2 { + let h0: f32 = h; + let h1: f32 = fract(h0 * 9677.0); + let h2: f32 = fract(h0 * 8677.0); + let t: f32 = mix(0.5, 1.0, h2 * h2) * time + 1234.5 * h0; + return mix(vec2(0.1, 0.1), vec2(0.2, 0.2), h1 * h1) * sin(t * vec2(1.0, sqrt(0.5))); +} + +fn aces_approx(v: vec3) -> vec3 { + var v = max(v, vec3(0.0, 0.0, 0.0)); + v *= 0.6; + let a: f32 = 2.51; + let b: f32 = 0.03; + let c: f32 = 2.43; + let d: f32 = 0.59; + let e: f32 = 0.14; + return clamp((v * (a * v + b)) / (v * (c * v + d) + e), vec3(0.0, 0.0, 0.0), vec3(1.0, 1.0, 1.0)); +} + +fn to_smith(p: vec2) -> vec2 { + let d: f32 = (1.0 - p.x) * (1.0 - p.x) + p.y * p.y; + let x: f32 = (1.0 + p.x) * (1.0 - p.x) - p.y * p.y; + let y: f32 = 2.0 * p.y; + return vec2(x, y) / d; +} + +fn from_smith(p: vec2) -> vec2 { + let d: f32 = (p.x + 1.0) * (p.x + 1.0) + p.y * p.y; + let x: f32 = (p.x + 1.0) * (p.x - 1.0) + p.y * p.y; + let y: f32 = 2.0 * p.y; + return vec2(x, y) / d; +} + + +/// Clockwise by `theta` +fn rotate2D(theta: f32) -> mat2x2 { + let c = cos(theta); + let s = sin(theta); + return mat2x2(c, s, -s, c); +} + + +fn transform(p: vec2, TIME: f32) -> vec2 { + var p = p * 2.0; + let sp0: vec2 = to_smith(p - vec2(0.0, 0.0)); + let sp1: vec2 = to_smith(p + vec2(1.0) * rotate2D(0.12 * TIME)); + let sp2: vec2 = to_smith(p - vec2(1.0) * rotate2D(0.23 * TIME)); + p = from_smith(sp0 + sp1 - sp2); + return p; +} +``` + +### assets/shaders/shadertoy-ports/discoteq2 + +```rust +#import bevy_pbr::forward_io::VertexOutput +#import bevy_sprite::mesh2d_view_bindings globals + +const HEIGHT:f32 = 4.0; +const INTENSITY:f32 = 5.0; +const NUM_LINES:f32 = 4.0; +const SPEED:f32 = 1.0; + +// This is a port of Discoteq2 https://www.shadertoy.com/view/DtXfDr by 'supah' https://www.shadertoy.com/user/supah +@fragment +fn fragment(in: VertexOutput) -> @location(0) vec4 { + let uv = (in.uv * 2.0) - 1.0; + var col = vec4f(0.0); + + + for (var i = 0.0; i <= NUM_LINES; i += 1.0) { + let t = i / INTENSITY; + col += line(uv, SPEED + t, HEIGHT + t, vec3f(0.2 + t * 0.7, 0.2 + t * 0.4, 0.3)); + } + + return col; +} + +fn line(uv: vec2f, speed: f32, height: f32, col: vec3f) -> vec4f { + var uv = uv; + uv.y += smoothstep(1.0, 0.0, abs(uv.x)) * sin(globals.time * speed + uv.x * height) * 0.2; + return vec4(smoothstep(.06 * smoothstep(.2, .9, abs(uv.x)), 0., abs(uv.y) - .004) * col, 1.0) * smoothstep(1., .3, abs(uv.x)); +} + + +``` + +### assets/shaders/shadertoy-ports/polar-coords-experiments + +```rust +//! This is a shadertoy port of 'polar-coordinates-experiments' by toridango https://www.shadertoy.com/view/ttsGz8 +#import bevy_pbr::forward_io::VertexOutput +#import bevy_pbr::utils PI +#import bevy_sprite::mesh2d_view_bindings globals + + +@fragment +fn fragment(in: VertexOutput) -> @location(0) vec4 { + var uv = (in.uv * 2.0) - 1.0; + + // Polar coordinates + let pol: vec2 = vec2(atan2(uv.y, uv.x), length(uv)); + let col: vec3 = vec3(globals.time + sin(pol.y), cos(pol.y), sin(2.0 * globals.time + pol.x * globals.time * -0.015) / 1.9); + + let adjusted_pol: vec2 = vec2(pol.x / 5.24 - 0.1 * globals.time + pol.y, pol.y); + let m: f32 = min(fract(adjusted_pol.x * 5.0), fract(1.0 - adjusted_pol.x * 5.0)); + + let f: f32 = smoothstep(0.0, 0.1, m * 0.3 + 0.2 - adjusted_pol.y); + + return vec4(f * col, f); +} + + +``` + +### assets/shaders/shadertoy-ports/shadertoy + +```rust +#define_import_path bevy_pbr::shadbang + +// This is a port of the default shader you get from in www.shadertoy.com/new +fn shadertoy_default(uv: vec2)->vec4{ + var uv = uv; + let t = globals.time; + uv *= 3.1459; + + let temp: vec3 = uv.xyx + vec3(0.0, 2.0, 4.0); + let cos_val: vec3 = cos(globals.time + temp); + let col: vec3 = vec3(0.5) + vec3(0.5) * cos_val; + + return vec4(col, 1.0); +} + +``` + +### assets/shaders/shadertoy-ports/octagon_fun + +```rust +#import bevy_pbr::mesh_view_bindings globals view +#import bevy_pbr::forward_io::VertexOutput +#import bevy_pbr::utils PI HALF_PI +#import bevy_pbr::mesh_functions + + +@fragment +fn fragment(in: VertexOutput) -> @location(0) vec4 { + + let t = globals.time; + var uv = in.uv - 0.5; // Shift UV coordinates to make the center of each 'face', if we don't do this, x=0, y=0 is in the bottom-left of the faces of our cube. + + var col = vec3(0.); + + uv *= 2.5659 - sin(t); // Scale UV coordinates for the desired shape size + + // Create an SDF shape: + let d = sd_octogon(uv, 18.5); // What this really is is a distance from the 'in.uv - 0.5' we slap into 'uv' above. + + // Calculate a step based on the signed distance value + let st = smoothstep(0.0, 0.05, abs(d)); // try removing the abs (absolute makes negative numbers positive, and positive numbers going in will be as they were.) + col = vec3(st); + + // Check if the signed distance value is positive (outside) or negative (inside), + // note whilst we use an absolute value inside the smoothstep, we utilise the fact the distance may be negative here. + if (d > 0.0) { + col *= palette(0.007 ); + } else { + // Shading for points inside the shape + col *= palette(0.4987 * d * t/133.456); + } + + // Return the final color to shade each xy coordinate + return vec4(col, 1.0); +} + +// I disklike boring colours, this paticular function comes from Kishimisu (see the wgsl file of same name to explore more of her/his/their ideas.) +fn palette(t: f32) -> vec3 { + let a = vec3(0.5, 0.5, 0.5); + let b = vec3(0.5, 0.5, 0.5); + let c = vec3(1.0, 1.0, 1.0); + let d = vec3(0.263, 0.416, 0.557); + + return a + b * cos(6.28318 * (c * t + d)); +} + +// This number is from munrocket +fn sd_octogon(p: vec2, r: f32) -> f32 { + let k = vec3(-0.9238795325, 0.3826834323, 0.4142135623); + var q: vec2 = abs(p); + q = q - 2. * min(dot(vec2(k.x, k.y), q), 0.) * vec2(k.x, k.y); + q = q - 2. * min(dot(vec2(-k.x, k.y), q), 0.) * vec2(-k.x, k.y); + q = q - vec2(clamp(q.x, -k.z * r, k.z * r), r); + return length(q) * sign(q.y); +} +``` + +### assets/shaders/shadertoy-ports/w10 + +```rust +#import bevy_pbr::forward_io::VertexOutput +#import bevy_sprite::mesh2d_view_bindings globals +#import bevy_render::view View +#import bevy_pbr::utils PI + +@group(0) @binding(0) var view: View; + +const SPEED:f32 = 1.0; +const TAU: f32 = 6.283185; +const HALF_PI:f32 = 1.57079632679; +const NEG_HALF_PI:f32 = -1.57079632679; +const R: f32 = 0.0001; +const REP: i32 = 25; +const WBCOL: vec3f = vec3f(0.5, 0.7, 1.7); +const WBCOL2: vec3f = vec3f(0.15, 0.8, 1.7); + +// this is an attempted port of 'w10' https://www.shadertoy.com/view/lllSR2 by https://www.shadertoy.com/user/gyabo +@fragment +fn fragment(in: VertexOutput) -> @location(0) vec4 { + var frag_color: vec4 = vec4(0.0080); + var uv = (in.uv.xy * 2.0)- 1.0; + uv.y -= 0.55; // To keep the logo centered. + + let resolution = view.viewport.zw; + let scalefac = vec2f(resolution.x, resolution.y); + + // Loop for REP iterations + for (var count: i32 = 0; count < 2; count = count + 1) { + // Calculate uv coordinates + uv *= 1.4; + uv.x += hash(uv.xy + globals.time + f32(count)) / 512.0; + uv.y += hash(uv.yx + globals.time + f32(count)) / 512.0; + + + // Calculate the direction # I was unable to work out how to do this well, due to not being able to swizzle. + var dir: vec3 = normalize(vec3( + uv.xy * 0.33, + 2.0 + sin(globals.time) * 0.01 + )); + + let scale_factor = vec2f((resolution.x / resolution.y), 1.0); + + // Calculate rotations + var stash = rot(dir.xz, d2r(80.0)); //xz + dir.z = stash.y; + stash = rot(dir.xy, d2r(92.0)); //xy + dir.x = stash.y; + + // Initialize variables + var pos: vec3 = vec3( + -0.1 + sin(globals.time * 0.3) * 0.1, + 2.0 + cos(globals.time * 0.4) * 0.1, + -3.5 + ); + var col: vec3 = vec3(0.0); + var t: f32 = 0.0; + var M: f32 = 1.002; + var bsh: f32 = 0.01; + var dens: f32 = 0.0; + + // First loop, controls the intensity of the backlighting + for (var i: i32 = 0; i < REP * 24; i = i + 1) { + var temp: f32 = map1(pos + dir * t, 0.6); + if temp < 0.2 { + col += WBCOL * 0.005 * dens; + } + t += bsh * M; + bsh *= M; + dens += 0.025; + } + + t = 0.0; + var y: f32 = 0.0; + // Second loop, draws the windows... + for (var i: i32 = 0; i < REP * 50; i = i + 1) { + var temp: f32 = map2(pos + dir * t); + if temp < 0.1 { + col += WBCOL2 * 0.005; + } + t += temp; + y = y + 1.0; + } + + col += ((0.0 + uv.x) * WBCOL2) + (y / (25.0 * 50.0)); + // col += gennoise(vec2(dir.xz), globals.time) * 0.5; + // Tint it blue: + col *= 1.0 - uv.y * 0.28; + col *= vec3(0.25); + // get brigther toward the center + col = pow(col, vec3(0.717)); + + // Add the result to fragColor + frag_color = frag_color + vec4(col, 1.0 / t); + } + + // Divide fragColor by 2.0 + frag_color = frag_color / 2.0; + frag_color.b += 0.7; + + return frag_color; +} + + +// // Rotate 2D vectors around a specified axis +// fn roty(theta: f32, axis: i32) -> mat2x2 { +// let c = cos(theta); +// let s = sin(theta); + +// if (axis == 0) { +// // Rotate around the x-axis +// return mat2x2(1.0, 0.0, 0.0, c, 0.0, -s, 0.0, c); +// } else if (axis == 1) { +// // Rotate around the y-axis +// return mat2x2(c, 0.0, s, 1.0, -s, 0.0, c, 0.0); +// } else { +// // Default to no rotation +// return mat2x2(1.0, 0.0, 0.0, 1.0); +// } +// } + + +fn d2r(x: f32) -> f32 { + return x * PI / 180.0; +} + +// Generates noise based on a 2D vector 'p' +fn gennoise(_p: vec2f, i_time: f32) -> f32 { + var p = _p; + var d: f32 = 0.5; + var h: mat2x2f = mat2x2f( + vec2f(1.6, 1.2), + vec2f(-1.2, 1.6) + ); + + var color: f32 = 0.0; + for (var i: i32 = 0; i < 2; i = i + 1) { + color = color + d * noise(p * 5.0 + i_time); + p = p * h; + d = d / 2.0; + } + return color; +} + + +fn plot(st: vec2f, pct: f32) -> f32 { + let l = pct - 0.02; + let r = pct + 0.02; + + return smoothstep(l, pct, st.y) - smoothstep(pct, r, st.y); +} +// Computes a hash for a 2D vector +fn hash(p: vec2f) -> f32 { + var h: f32 = dot(p, vec2(127.1, 311.7)); + return fract(sin(h) * 458.325421) * 2.0 - 1.0; +} + +// Computes Perlin noise for a 2D vector +fn noise(p: vec2f) -> f32 { + let i: vec2f = floor(p); + var f: vec2f = fract(p); + + f = f * f * (3.0 - 2.0 * f); + + return mix( + mix(hash(i + vec2(0.0, 0.0)), hash(i + vec2(1.0, 0.0)), f.x), + mix(hash(i + vec2(0.0, 1.0)), hash(i + vec2(1.0, 1.0)), f.x), + f.y + ); +} + +// Rotates a 2D vector by an angle +fn rot(_p: vec2f, a: f32) -> vec2f { + var p = _p; + return vec2f( + p.x * cos(a) - p.y * sin(a), + p.x * sin(a) + p.y * cos(a) + ); +} + +// Computes the normalized absolute distance to a rectangular box +fn recta(_p: vec3f, F: vec3f, o: vec3f) -> f32 { + var p = _p; + var R: f32 = 0.0001; + p += o; + var abs_p: vec3f = abs(p); + var max_abs = max(abs_p - F, vec3f(0.0)); + return length(max_abs) - R; +} + + +// Computes the normalized absolute distance to a box +fn by(_p: vec3f, F: f32, o: vec3f) -> f32 { + var p = _p; + p += o; + var R: f32 = 0.0001; + var m: vec2f = p.xy % vec2f(3.0); + var max_abs = max(abs(m) - F, vec2f(0.0)); + return length(max_abs) - R; +} + +// Computes a mapping function +fn map1(p: vec3f, scale: f32) -> f32 { + var G: f32 = 0.50; + var F: f32 = 0.50 * scale; + var t: f32 = nac(p, vec2f(F, F), vec3f(G, G, 0.0)); + t = min(t, nac(p, vec2f(F, F), vec3f(G, -G, 0.0))); + t = min(t, nac(p, vec2f(F, F), vec3f(-G, G, 0.0))); + t = min(t, nac(p, vec2f(F, F), vec3f(-G, -G, 0.0))); + return t; +} + +// Computes the second mapping function +fn map2(p: vec3f) -> f32 { + var t: f32 = map1(p, 0.9); + t = max(t, recta(p, vec3f(1.0, 1.0, 0.02), vec3f(0.0, 0.0, 0.0))); + return t; +} + +// Computes the normalized absolute distance to a box +// defined by its half extents 'F' and an offset 'o' +// from a point 'p' +fn nac(_p: vec3f, F: vec2f, o: vec3f) -> f32 { + var p = _p; + p += o; + var R: f32 = 0.0001; + var max_abs = max(abs(p.xy) - F, vec2f(0.0)); + return length(max_abs) - R; +} + +``` + +### assets/Gallery/perlin-waves/perlin-waves + +```rust +/// ***************************** /// +/// This is a port of 'Perlin Waves' by zilian: https://www.shadertoy.com/view/DlVcRW /// +/// ***************************** /// + +#import bevy_sprite::mesh2d_view_bindings::globals +#import shadplay::shader_utils::common::{NEG_HALF_PI, shader_toy_default, rotate2D, TWO_PI} +#import bevy_render::view::View +#import bevy_pbr::forward_io::VertexOutput; + +@group(0) @binding(0) var view: View; + + +const TEMPERATURE: f32 = 5.; +const NOISESCALE: f32 = 0.2; +const EFFECTWIDTH: f32 = 1.; +const LINETHICKNESS: f32 = 0.008; +const SPEED: f32 = 0.2; + + +/// This is a port of 'Perlin Waves' by zilian: https://www.shadertoy.com/view/DlVcRW /// +@fragment +fn fragment(in: VertexOutput) -> @location(0) vec4 { + let resolution = view.viewport.zw; + var uv = in.uv; + uv.y *= 0.5; // Bumping the Y down a bit. + + let y_inverted_location = vec2(i32(uv.x), i32(resolution.y) - i32(uv.y)); + let location = vec2(i32(uv.x), i32(uv.y)); + + var fragColor: vec4; + var fragCoord = vec2(f32(location.x), f32(location.y) ); + + var sampleY: f32 = 0.; + sampleY = sampleY + (globals.time * SPEED); + var finalColor: vec3 = vec3(0.); + let deltaY: f32 = 0.003; + + for (var i: f32 = -10.; i <= 10.; i = i + (1.)) { + let p: vec2 = uv + vec2(0.06 * i, 0.05 * i); + sampleY = sampleY + (i * deltaY); + if (p.x < -EFFECTWIDTH || p.x > EFFECTWIDTH) { + continue; + } + let line: f32 = perline(p, sampleY, LINETHICKNESS, NOISESCALE); + let opacity: f32 = exp(-abs(i * 0.2)); + let col: vec3 = palette(i * 0.04 + 0.3) * 2. * line * opacity; + finalColor = max(finalColor, col); + } + + return vec4f(finalColor, 1.0); +} + + +fn fade(t: vec2) -> vec2 { + return t * t * t * (t * (t * 6. - 15.) + 10.); +} + +fn permute(x: vec4) -> vec4 { + return (((x * 34. + 1.) * x) % (289.)); +} + +fn cnoise(P: vec2) -> f32 { + var Pi: vec4 = floor(P.xyxy) + vec4(0., 0., 1., 1.); + let Pf: vec4 = fract(P.xyxy) - vec4(0., 0., 1., 1.); + Pi = ((Pi) % (289.)); + let ix: vec4 = Pi.xzxz; + let iy: vec4 = Pi.yyww; + let fx: vec4 = Pf.xzxz; + let fy: vec4 = Pf.yyww; + var i: vec4 = permute(permute(ix) + iy); + var gx: vec4 = 2. * fract(i * 0.024390243) - 1.; + let gy: vec4 = abs(gx) - 0.5; + let tx: vec4 = floor(gx + 0.5); + gx = gx - tx; + var g00: vec2 = vec2(gx.x, gy.x); + var g10: vec2 = vec2(gx.y, gy.y); + var g01: vec2 = vec2(gx.z, gy.z); + var g11: vec2 = vec2(gx.w, gy.w); + let norm: vec4 = 1.7928429 - 0.85373473 * vec4(dot(g00, g00), dot(g01, g01), dot(g10, g10), dot(g11, g11)); + g00 = g00 * (norm.x); + g01 = g01 * (norm.y); + g10 = g10 * (norm.z); + g11 = g11 * (norm.w); + let n00: f32 = dot(g00, vec2(fx.x, fy.x)); + let n10: f32 = dot(g10, vec2(fx.y, fy.y)); + let n01: f32 = dot(g01, vec2(fx.z, fy.z)); + let n11: f32 = dot(g11, vec2(fx.w, fy.w)); + let fade_xy: vec2 = fade(Pf.xy); + let n_x: vec2 = mix(vec2(n00, n01), vec2(n10, n11), fade_xy.x); + let n_xy: f32 = mix(n_x.x, n_x.y, fade_xy.y); + return 2.3 * n_xy; +} + +fn perline(p: vec2, noiseY: f32, lineThickness: f32, noiseScale: f32) -> f32 { + let x: f32 = p.x / 2.; + let s: f32 = cnoise(vec2(x, noiseY) * TEMPERATURE) * noiseScale; + let distanceToLine: f32 = abs(p.y - s); + return 0.009 / distanceToLine; +} + +/// Regular shadplayers will recognise this one... +fn palette(t: f32) -> vec3 { + let a: vec3 = vec3(0.5, 0.5, 0.5); + let b: vec3 = vec3(0.5, 0.5, 0.5); + let c: vec3 = vec3(1., 1., 1.); + let d: vec3 = vec3(0.263, 0.416, 0.557); + return a + b * cos(6.28318 * (c * t + d)); +} + + +``` + +### assets/Gallery/lines/dotted_line + +```rust +#import bevy_pbr::mesh_view_bindings globals +#import bevy_pbr::forward_io::VertexOutput + +// If you're passing this in from bevy declare them over there. +struct DottedLineShader { + tint: vec4, + line_width: f32, + segments: f32, + phase: f32, + line_spacing: f32, +}; + +@group(2) @binding(0) +var material: DottedLineShader; + +@fragment +fn fragment(in: VertexOutput) -> @location(0) vec4 { + var uv: vec2 = (in.uv * 2.0) - 1.0; // normalize uvs to [-1..1] + let t = globals.time; //TODO: animate. + var col: vec4 = vec4(0.0); // Initialize to transparent + + // draw x line + if abs(uv.x) < 0.025{ + uv += t * 0.5; + // segment the line, only tint the areas we want + var uv_segmented: vec2 = fract(uv * 3.0) * 0.05; + let step_y: f32 = step(0.025, abs(uv_segmented.y)); + col += vec4f(0.23, 0.88, 0.238, 1.0)* step_y; + } + + return col; +} + +``` + +### assets/Gallery/fbmCloud/fmb_cloud + +```rust +/// ***************************** /// +/// This is a port fo the FBM quick example in the little book of shaders: Author @patriciogv - 2015 http://patriciogonzalezvivo.com +/// Ours looks a lot like theirs at sufficently small resolutions, but to dream a little larger there's a custom gussianBlur added. +/// ***************************** /// + +#import bevy_sprite::mesh2d_view_bindings::globals +#import shadplay::shader_utils::common::{NEG_HALF_PI, shader_toy_default, rotate2D, TWO_PI} +#import bevy_render::view::View +#import bevy_pbr::forward_io::VertexOutput; + +@group(0) @binding(0) var view: View; + +const SPEED:f32 = 1.0; +const NUM_OCTAVES: i32 = 8; + +@fragment +fn fragment(in: VertexOutput) -> @location(0) vec4 { + var uv = in.uv; + let resolution = view.viewport.zw; + let time = globals.time * SPEED; + uv *= rotate2D(NEG_HALF_PI); + + + // Slapping in a gaussian blur: + // let blurRadius: f32 = 1.0; // Adjust the radius to control the blur amount, maybe don't go too HIGH! + // var blurredColor: vec4 = vec4(0.0, 0.0, 0.0, 0.0); + // var totalWeight: f32 = 0.0; + // let intRadius: i32 = i32(blurRadius); + + // for (var x: i32 = -intRadius; x <= intRadius; x++) { + // for (var y: i32 = -intRadius; y <= intRadius; y++) { + // var sampleUv: vec2 = uv + vec2(f32(x), f32(y)) / resolution; + // var sampleColor: vec4 = fmb_cloud(sampleUv, time, resolution); + // var weight: f32 = exp(-f32(x * x + y * y) / (2.0 * blurRadius * blurRadius)); + // blurredColor += sampleColor * weight; + // totalWeight += weight; + // } + // } + + // blurredColor /= totalWeight; + // return blurredColor; + + // or, the vanilla port: + return fmb_cloud(uv, time, resolution); +} + + +fn fmb_cloud(uv: vec2f, time: f32, resolution: vec2f)->vec4f{ + var color: vec3 = vec3(0.); + + var q: vec2 = vec2(0.); + q.x = fbm(uv + 0. * time); + q.y = fbm(uv + vec2(1.)); + + var r: vec2 = vec2(0.); + r.x = fbm(uv + 1. * q + vec2(1.7, 9.2) + 0.15 * time); + r.y = fbm(uv + 1. * q + vec2(8.3, 2.8) + 0.126 * time); + + let f: f32 = fbm(uv + r); + + color = mix(vec3(0.101961, 0.619608, 0.666667), vec3(0.666667, 0.666667, 0.498039), clamp(f * f * 4., 0., 1.)); + color = mix(color, vec3(0., 0., 0.164706), clamp(length(q), 0., 1.)); + color = mix(color, vec3(0.666667, 1., 1.), clamp(length(r.x), 0., 1.)); + + return vec4((f * f * f + 0.6 * f * f + 0.5 * f) * color, 1.0); +} + +fn random(uv: vec2) -> f32 { + return fract(sin(dot(uv.xy, vec2(12.9898, 78.233))) * 43758.547); +} + +fn noise(uv: vec2) -> f32 { + var i: vec2 = floor(uv); + var f: vec2 = fract(uv); + var a: f32 = random(i); + let b: f32 = random(i + vec2(1., 0.)); + let c: f32 = random(i + vec2(0., 1.)); + let d: f32 = random(i + vec2(1., 1.)); + let u: vec2 = f * f * (3. - 2. * f); + return mix(a, b, u.x) + (c - a) * u.y * (1. - u.x) + (d - b) * u.x * u.y; +} + +fn fbm(_uv: vec2) -> f32 { + var uv = _uv; + var v: f32 = 0.; + var a: f32 = 0.5; + let shift: vec2 = vec2(100.); + let rot: mat2x2 = mat2x2(cos(0.5), sin(0.5), -sin(0.5), cos(0.5)); + + for (var i: i32 = 0; i < NUM_OCTAVES; i++) { + v = v + (a * noise(uv)); + uv = rot * uv * 2. + shift; + a = a * (0.5); + } + + return v; +} + + +``` + +### bevy/assets/shaders/custom_ui_material + +```rust +// This shader draws a circle with a given input color +#import bevy_ui::ui_vertex_output::UiVertexOutput + +@group(1) @binding(0) var color: vec4; +@group(1) @binding(1) var slider: f32; +@group(1) @binding(2) var material_color_texture: texture_2d; +@group(1) @binding(3) var material_color_sampler: sampler; + + +@fragment +fn fragment(in: UiVertexOutput) -> @location(0) vec4 { + if in.uv.x < slider { + let output_color = textureSample(material_color_texture, material_color_sampler, in.uv) * color; + return output_color; + } else { + return vec4(0.0); + } +} + +``` + +### bevy/assets/shaders/array_texture + +```rust +#import bevy_pbr::{ + forward_io::VertexOutput, + mesh_view_bindings::view, + pbr_types::{STANDARD_MATERIAL_FLAGS_DOUBLE_SIDED_BIT, PbrInput, pbr_input_new}, + pbr_functions as fns, + pbr_bindings, +} +#import bevy_core_pipeline::tonemapping::tone_mapping + +@group(2) @binding(0) var my_array_texture: texture_2d_array; +@group(2) @binding(1) var my_array_texture_sampler: sampler; + +@fragment +fn fragment( + @builtin(front_facing) is_front: bool, + mesh: VertexOutput, +) -> @location(0) vec4 { + let layer = i32(mesh.world_position.x) & 0x3; + + // Prepare a 'processed' StandardMaterial by sampling all textures to resolve + // the material members + var pbr_input: PbrInput = pbr_input_new(); + + pbr_input.material.base_color = textureSample(my_array_texture, my_array_texture_sampler, mesh.uv, layer); +#ifdef VERTEX_COLORS + pbr_input.material.base_color = pbr_input.material.base_color * mesh.color; +#endif + + let double_sided = (pbr_input.material.flags & STANDARD_MATERIAL_FLAGS_DOUBLE_SIDED_BIT) != 0u; + + pbr_input.frag_coord = mesh.position; + pbr_input.world_position = mesh.world_position; + pbr_input.world_normal = fns::prepare_world_normal( + mesh.world_normal, + double_sided, + is_front, + ); + + pbr_input.is_orthographic = view.clip_from_view[3].w == 1.0; + + pbr_input.N = normalize(pbr_input.world_normal); + +#ifdef VERTEX_TANGENTS + let Nt = textureSampleBias(pbr_bindings::normal_map_texture, pbr_bindings::normal_map_sampler, mesh.uv, view.mip_bias).rgb; + let TBN = fns::calculate_tbn_mikktspace(mesh.world_normal, mesh.world_tangent); + pbr_input.N = fns::apply_normal_mapping( + pbr_input.material.flags, + TBN, + double_sided, + is_front, + Nt, + ); +#endif + + pbr_input.V = fns::calculate_view(mesh.world_position, pbr_input.is_orthographic); + + return tone_mapping(fns::apply_pbr_lighting(pbr_input), view.color_grading); +} + +``` + +### bevy/assets/shaders/texture_binding_array + +```rust +#import bevy_pbr::forward_io::VertexOutput + +@group(2) @binding(0) var textures: binding_array>; +@group(2) @binding(1) var nearest_sampler: sampler; +// We can also have array of samplers +// var samplers: binding_array; + +@fragment +fn fragment( + mesh: VertexOutput, +) -> @location(0) vec4 { + // Select the texture to sample from using non-uniform uv coordinates + let coords = clamp(vec2(mesh.uv * 4.0), vec2(0u), vec2(3u)); + let index = coords.y * 4u + coords.x; + let inner_uv = fract(mesh.uv * 4.0); + return textureSample(textures[index], nearest_sampler, inner_uv); +} + +``` + +### bevy/assets/shaders/cubemap_unlit + +```rust +#import bevy_pbr::forward_io::VertexOutput + +#ifdef CUBEMAP_ARRAY +@group(2) @binding(0) var base_color_texture: texture_cube_array; +#else +@group(2) @binding(0) var base_color_texture: texture_cube; +#endif + +@group(2) @binding(1) var base_color_sampler: sampler; + +@fragment +fn fragment( + mesh: VertexOutput, +) -> @location(0) vec4 { + let fragment_position_view_lh = mesh.world_position.xyz * vec3(1.0, 1.0, -1.0); + return textureSample( + base_color_texture, + base_color_sampler, + fragment_position_view_lh + ); +} + +``` + +### bevy/assets/shaders/custom_material + +```rust +#import bevy_pbr::forward_io::VertexOutput +// we can import items from shader modules in the assets folder with a quoted path +#import "shaders/custom_material_import.wgsl"::COLOR_MULTIPLIER + +@group(2) @binding(0) var material_color: vec4; +@group(2) @binding(1) var material_color_texture: texture_2d; +@group(2) @binding(2) var material_color_sampler: sampler; + +@fragment +fn fragment( + mesh: VertexOutput, +) -> @location(0) vec4 { + return material_color * textureSample(material_color_texture, material_color_sampler, mesh.uv) * COLOR_MULTIPLIER; +} + +``` + +### bevy/assets/shaders/post_processing + +```rust +// This shader computes the chromatic aberration effect + +// Since post processing is a fullscreen effect, we use the fullscreen vertex shader provided by bevy. +// This will import a vertex shader that renders a single fullscreen triangle. +// +// A fullscreen triangle is a single triangle that covers the entire screen. +// The box in the top left in that diagram is the screen. The 4 x are the corner of the screen +// +// Y axis +// 1 | x-----x...... +// 0 | | s | . ´ +// -1 | x_____x´ +// -2 | : .´ +// -3 | :´ +// +--------------- X axis +// -1 0 1 2 3 +// +// As you can see, the triangle ends up bigger than the screen. +// +// You don't need to worry about this too much since bevy will compute the correct UVs for you. +#import bevy_core_pipeline::fullscreen_vertex_shader::FullscreenVertexOutput + +@group(0) @binding(0) var screen_texture: texture_2d; +@group(0) @binding(1) var texture_sampler: sampler; +struct PostProcessSettings { + intensity: f32, +#ifdef SIXTEEN_BYTE_ALIGNMENT + // WebGL2 structs must be 16 byte aligned. + _webgl2_padding: vec3 +#endif +} +@group(0) @binding(2) var settings: PostProcessSettings; + +@fragment +fn fragment(in: FullscreenVertexOutput) -> @location(0) vec4 { + // Chromatic aberration strength + let offset_strength = settings.intensity; + + // Sample each color channel with an arbitrary shift + return vec4( + textureSample(screen_texture, texture_sampler, in.uv + vec2(offset_strength, -offset_strength)).r, + textureSample(screen_texture, texture_sampler, in.uv + vec2(-offset_strength, 0.0)).g, + textureSample(screen_texture, texture_sampler, in.uv + vec2(0.0, offset_strength)).b, + 1.0 + ); +} + + +``` + +### bevy/assets/shaders/gpu_readback + +```rust +// This shader is used for the gpu_readback example +// The actual work it does is not important for the example + +// This is the data that lives in the gpu only buffer +@group(0) @binding(0) var data: array; + +@compute @workgroup_size(1) +fn main(@builtin(global_invocation_id) global_id: vec3) { + // We use the global_id to index the array to make sure we don't + // access data used in another workgroup + data[global_id.x] += 1u; +} + +``` + +### bevy/assets/shaders/custom_material_2d + +```rust +#import bevy_sprite::mesh2d_vertex_output::VertexOutput +// we can import items from shader modules in the assets folder with a quoted path +#import "shaders/custom_material_import.wgsl"::COLOR_MULTIPLIER + +@group(2) @binding(0) var material_color: vec4; +@group(2) @binding(1) var base_color_texture: texture_2d; +@group(2) @binding(2) var base_color_sampler: sampler; + +@fragment +fn fragment(mesh: VertexOutput) -> @location(0) vec4 { + return material_color * textureSample(base_color_texture, base_color_sampler, mesh.uv) * COLOR_MULTIPLIER; +} + +``` + +### bevy/assets/shaders/shader_defs + +```rust +#import bevy_pbr::forward_io::VertexOutput + +struct CustomMaterial { + color: vec4, +}; + +@group(2) @binding(0) var material: CustomMaterial; + +@fragment +fn fragment( + mesh: VertexOutput, +) -> @location(0) vec4 { +#ifdef IS_RED + return vec4(1.0, 0.0, 0.0, 1.0); +#else + return material.color; +#endif +} + +``` + +### bevy/assets/shaders/line_material + +```rust +#import bevy_pbr::forward_io::VertexOutput + +struct LineMaterial { + color: vec4, +}; + +@group(2) @binding(0) var material: LineMaterial; + +@fragment +fn fragment( + mesh: VertexOutput, +) -> @location(0) vec4 { + return material.color; +} + +``` + +### bevy/assets/shaders/animate_shader + +```rust +// The time since startup data is in the globals binding which is part of the mesh_view_bindings import +#import bevy_pbr::{ + mesh_view_bindings::globals, + forward_io::VertexOutput, +} + +fn oklab_to_linear_srgb(c: vec3) -> vec3 { + let L = c.x; + let a = c.y; + let b = c.z; + + let l_ = L + 0.3963377774 * a + 0.2158037573 * b; + let m_ = L - 0.1055613458 * a - 0.0638541728 * b; + let s_ = L - 0.0894841775 * a - 1.2914855480 * b; + + let l = l_ * l_ * l_; + let m = m_ * m_ * m_; + let s = s_ * s_ * s_; + + return vec3( + 4.0767416621 * l - 3.3077115913 * m + 0.2309699292 * s, + -1.2684380046 * l + 2.6097574011 * m - 0.3413193965 * s, + -0.0041960863 * l - 0.7034186147 * m + 1.7076147010 * s, + ); +} + +@fragment +fn fragment(in: VertexOutput) -> @location(0) vec4 { + let speed = 2.0; + // The globals binding contains various global values like time + // which is the time since startup in seconds + let t_1 = sin(globals.time * speed) * 0.5 + 0.5; + let t_2 = cos(globals.time * speed); + + let distance_to_center = distance(in.uv, vec2(0.5)) * 1.4; + + // blending is done in a perceptual color space: https://bottosson.github.io/posts/oklab/ + let red = vec3(0.627955, 0.224863, 0.125846); + let green = vec3(0.86644, -0.233887, 0.179498); + let blue = vec3(0.701674, 0.274566, -0.169156); + let white = vec3(1.0, 0.0, 0.0); + let mixed = mix(mix(red, blue, t_1), mix(green, white, t_2), distance_to_center); + + return vec4(oklab_to_linear_srgb(mixed), 1.0); +} + +``` + +### bevy/assets/shaders/extended_material + +```rust +#import bevy_pbr::{ + pbr_fragment::pbr_input_from_standard_material, + pbr_functions::alpha_discard, +} + +#ifdef PREPASS_PIPELINE +#import bevy_pbr::{ + prepass_io::{VertexOutput, FragmentOutput}, + pbr_deferred_functions::deferred_output, +} +#else +#import bevy_pbr::{ + forward_io::{VertexOutput, FragmentOutput}, + pbr_functions::{apply_pbr_lighting, main_pass_post_lighting_processing}, +} +#endif + +struct MyExtendedMaterial { + quantize_steps: u32, +} + +@group(2) @binding(100) +var my_extended_material: MyExtendedMaterial; + +@fragment +fn fragment( + in: VertexOutput, + @builtin(front_facing) is_front: bool, +) -> FragmentOutput { + // generate a PbrInput struct from the StandardMaterial bindings + var pbr_input = pbr_input_from_standard_material(in, is_front); + + // we can optionally modify the input before lighting and alpha_discard is applied + pbr_input.material.base_color.b = pbr_input.material.base_color.r; + + // alpha discard + pbr_input.material.base_color = alpha_discard(pbr_input.material, pbr_input.material.base_color); + +#ifdef PREPASS_PIPELINE + // in deferred mode we can't modify anything after that, as lighting is run in a separate fullscreen shader. + let out = deferred_output(in, pbr_input); +#else + var out: FragmentOutput; + // apply lighting + out.color = apply_pbr_lighting(pbr_input); + + // we can optionally modify the lit color before post-processing is applied + out.color = vec4(vec4(out.color * f32(my_extended_material.quantize_steps))) / f32(my_extended_material.quantize_steps); + + // apply in-shader post processing (fog, alpha-premultiply, and also tonemapping, debanding if the camera is non-hdr) + // note this does not include fullscreen postprocessing effects like bloom. + out.color = main_pass_post_lighting_processing(pbr_input, out.color); + + // we can optionally modify the final result here + out.color = out.color * 2.0; +#endif + + return out; +} + +``` + +### bevy/assets/shaders/custom_vertex_attribute + +```rust +#import bevy_pbr::mesh_functions::{get_world_from_local, mesh_position_local_to_clip} + +struct CustomMaterial { + color: vec4, +}; +@group(2) @binding(0) var material: CustomMaterial; + +struct Vertex { + @builtin(instance_index) instance_index: u32, + @location(0) position: vec3, + @location(1) blend_color: vec4, +}; + +struct VertexOutput { + @builtin(position) clip_position: vec4, + @location(0) blend_color: vec4, +}; + +@vertex +fn vertex(vertex: Vertex) -> VertexOutput { + var out: VertexOutput; + out.clip_position = mesh_position_local_to_clip( + get_world_from_local(vertex.instance_index), + vec4(vertex.position, 1.0), + ); + out.blend_color = vertex.blend_color; + return out; +} + +struct FragmentInput { + @location(0) blend_color: vec4, +}; + +@fragment +fn fragment(input: FragmentInput) -> @location(0) vec4 { + return material.color * input.blend_color; +} + +``` + +### bevy/assets/shaders/show_prepass + +```rust +#import bevy_pbr::{ + mesh_view_bindings::globals, + prepass_utils, + forward_io::VertexOutput, +} + +struct ShowPrepassSettings { + show_depth: u32, + show_normals: u32, + show_motion_vectors: u32, + padding_1: u32, + padding_2: u32, +} +@group(2) @binding(0) var settings: ShowPrepassSettings; + +@fragment +fn fragment( +#ifdef MULTISAMPLED + @builtin(sample_index) sample_index: u32, +#endif + mesh: VertexOutput, +) -> @location(0) vec4 { +#ifndef MULTISAMPLED + let sample_index = 0u; +#endif + if settings.show_depth == 1u { + let depth = bevy_pbr::prepass_utils::prepass_depth(mesh.position, sample_index); + return vec4(depth, depth, depth, 1.0); + } else if settings.show_normals == 1u { + let normal = bevy_pbr::prepass_utils::prepass_normal(mesh.position, sample_index); + return vec4(normal, 1.0); + } else if settings.show_motion_vectors == 1u { + let motion_vector = bevy_pbr::prepass_utils::prepass_motion_vector(mesh.position, sample_index); + return vec4(motion_vector / globals.delta_time, 0.0, 1.0); + } + + return vec4(0.0); +} + +``` + +### bevy/assets/shaders/water_material + +```rust +// A shader that creates water ripples by overlaying 4 normal maps on top of one +// another. +// +// This is used in the `ssr` example. It only supports deferred rendering. + +#import bevy_pbr::{ + pbr_deferred_functions::deferred_output, + pbr_fragment::pbr_input_from_standard_material, + prepass_io::{VertexOutput, FragmentOutput}, +} +#import bevy_render::globals::Globals + +// Parameters to the water shader. +struct WaterSettings { + // How much to displace each octave each frame, in the u and v directions. + // Two octaves are packed into each `vec4`. + octave_vectors: array, 2>, + // How wide the waves are in each octave. + octave_scales: vec4, + // How high the waves are in each octave. + octave_strengths: vec4, +} + +@group(0) @binding(1) var globals: Globals; + +@group(2) @binding(100) var water_normals_texture: texture_2d; +@group(2) @binding(101) var water_normals_sampler: sampler; +@group(2) @binding(102) var water_settings: WaterSettings; + +// Samples a single octave of noise and returns the resulting normal. +fn sample_noise_octave(uv: vec2, strength: f32) -> vec3 { + let N = textureSample(water_normals_texture, water_normals_sampler, uv).rbg * 2.0 - 1.0; + // This isn't slerp, but it's good enough. + return normalize(mix(vec3(0.0, 1.0, 0.0), N, strength)); +} + +// Samples all four octaves of noise and returns the resulting normal. +fn sample_noise(uv: vec2, time: f32) -> vec3 { + let uv0 = uv * water_settings.octave_scales[0] + water_settings.octave_vectors[0].xy * time; + let uv1 = uv * water_settings.octave_scales[1] + water_settings.octave_vectors[0].zw * time; + let uv2 = uv * water_settings.octave_scales[2] + water_settings.octave_vectors[1].xy * time; + let uv3 = uv * water_settings.octave_scales[3] + water_settings.octave_vectors[1].zw * time; + return normalize( + sample_noise_octave(uv0, water_settings.octave_strengths[0]) + + sample_noise_octave(uv1, water_settings.octave_strengths[1]) + + sample_noise_octave(uv2, water_settings.octave_strengths[2]) + + sample_noise_octave(uv3, water_settings.octave_strengths[3]) + ); +} + +@fragment +fn fragment(in: VertexOutput, @builtin(front_facing) is_front: bool) -> FragmentOutput { + // Create the PBR input. + var pbr_input = pbr_input_from_standard_material(in, is_front); + // Bump the normal. + pbr_input.N = sample_noise(in.uv, globals.time); + // Send the rest to the deferred shader. + return deferred_output(in, pbr_input); +} + +``` + +### bevy/assets/shaders/custom_material_import + +```rust +// this is made available to the importing module +const COLOR_MULTIPLIER: vec4 = vec4(1.0, 1.0, 1.0, 0.5); + +``` + +### bevy/assets/shaders/game_of_life + +```rust +// The shader reads the previous frame's state from the `input` texture, and writes the new state of +// each pixel to the `output` texture. The textures are flipped each step to progress the +// simulation. +// Two textures are needed for the game of life as each pixel of step N depends on the state of its +// neighbors at step N-1. + +@group(0) @binding(0) var input: texture_storage_2d; + +@group(0) @binding(1) var output: texture_storage_2d; + +fn hash(value: u32) -> u32 { + var state = value; + state = state ^ 2747636419u; + state = state * 2654435769u; + state = state ^ state >> 16u; + state = state * 2654435769u; + state = state ^ state >> 16u; + state = state * 2654435769u; + return state; +} + +fn randomFloat(value: u32) -> f32 { + return f32(hash(value)) / 4294967295.0; +} + +@compute @workgroup_size(8, 8, 1) +fn init(@builtin(global_invocation_id) invocation_id: vec3, @builtin(num_workgroups) num_workgroups: vec3) { + let location = vec2(i32(invocation_id.x), i32(invocation_id.y)); + + let randomNumber = randomFloat(invocation_id.y << 16u | invocation_id.x); + let alive = randomNumber > 0.9; + let color = vec4(f32(alive)); + + textureStore(output, location, color); +} + +fn is_alive(location: vec2, offset_x: i32, offset_y: i32) -> i32 { + let value: vec4 = textureLoad(input, location + vec2(offset_x, offset_y)); + return i32(value.x); +} + +fn count_alive(location: vec2) -> i32 { + return is_alive(location, -1, -1) + + is_alive(location, -1, 0) + + is_alive(location, -1, 1) + + is_alive(location, 0, -1) + + is_alive(location, 0, 1) + + is_alive(location, 1, -1) + + is_alive(location, 1, 0) + + is_alive(location, 1, 1); +} + +@compute @workgroup_size(8, 8, 1) +fn update(@builtin(global_invocation_id) invocation_id: vec3) { + let location = vec2(i32(invocation_id.x), i32(invocation_id.y)); + + let n_alive = count_alive(location); + + var alive: bool; + if (n_alive == 3) { + alive = true; + } else if (n_alive == 2) { + let currently_alive = is_alive(location, 0, 0); + alive = bool(currently_alive); + } else { + alive = false; + } + let color = vec4(f32(alive)); + + textureStore(output, location, color); +} + +``` + +### bevy/assets/shaders/fallback_image_test + +```rust +#import bevy_pbr::forward_io::VertexOutput + +@group(2) @binding(0) var test_texture_1d: texture_1d; +@group(2) @binding(1) var test_texture_1d_sampler: sampler; + +@group(2) @binding(2) var test_texture_2d: texture_2d; +@group(2) @binding(3) var test_texture_2d_sampler: sampler; + +@group(2) @binding(4) var test_texture_2d_array: texture_2d_array; +@group(2) @binding(5) var test_texture_2d_array_sampler: sampler; + +@group(2) @binding(6) var test_texture_cube: texture_cube; +@group(2) @binding(7) var test_texture_cube_sampler: sampler; + +@group(2) @binding(8) var test_texture_cube_array: texture_cube_array; +@group(2) @binding(9) var test_texture_cube_array_sampler: sampler; + +@group(2) @binding(10) var test_texture_3d: texture_3d; +@group(2) @binding(11) var test_texture_3d_sampler: sampler; + +@fragment +fn fragment(in: VertexOutput) {} + +``` + +### bevy/assets/shaders/irradiance_volume_voxel_visualization + +```rust +#import bevy_pbr::forward_io::VertexOutput +#import bevy_pbr::irradiance_volume +#import bevy_pbr::mesh_view_bindings + +struct VoxelVisualizationIrradianceVolumeInfo { + world_from_voxel: mat4x4, + voxel_from_world: mat4x4, + resolution: vec3, + // A scale factor that's applied to the diffuse and specular light from the + // light probe. This is in units of cd/m² (candela per square meter). + intensity: f32, +} + +@group(2) @binding(100) +var irradiance_volume_info: VoxelVisualizationIrradianceVolumeInfo; + +@fragment +fn fragment(mesh: VertexOutput) -> @location(0) vec4 { + // Snap the world position we provide to `irradiance_volume_light()` to the + // middle of the nearest texel. + var unit_pos = (irradiance_volume_info.voxel_from_world * + vec4(mesh.world_position.xyz, 1.0f)).xyz; + let resolution = vec3(irradiance_volume_info.resolution); + let stp = clamp((unit_pos + 0.5) * resolution, vec3(0.5f), resolution - vec3(0.5f)); + let stp_rounded = round(stp - 0.5f) + 0.5f; + let rounded_world_pos = (irradiance_volume_info.world_from_voxel * vec4(stp_rounded, 1.0f)).xyz; + + // `irradiance_volume_light()` multiplies by intensity, so cancel it out. + // If we take intensity into account, the cubes will be way too bright. + let rgb = irradiance_volume::irradiance_volume_light( + mesh.world_position.xyz, + mesh.world_normal) / irradiance_volume_info.intensity; + + return vec4(rgb, 1.0f); +} + +``` + +### bevy/assets/shaders/custom_phase_item + +```rust +// `custom_phase_item.wgsl` +// +// This shader goes with the `custom_phase_item` example. It demonstrates how to +// enqueue custom rendering logic in a `RenderPhase`. + +// The GPU-side vertex structure. +struct Vertex { + // The world-space position of the vertex. + @location(0) position: vec3, + // The color of the vertex. + @location(1) color: vec3, +}; + +// Information passed from the vertex shader to the fragment shader. +struct VertexOutput { + // The clip-space position of the vertex. + @builtin(position) clip_position: vec4, + // The color of the vertex. + @location(0) color: vec3, +}; + +// The vertex shader entry point. +@vertex +fn vertex(vertex: Vertex) -> VertexOutput { + // Use an orthographic projection. + var vertex_output: VertexOutput; + vertex_output.clip_position = vec4(vertex.position.xyz, 1.0); + vertex_output.color = vertex.color; + return vertex_output; +} + +// The fragment shader entry point. +@fragment +fn fragment(vertex_output: VertexOutput) -> @location(0) vec4 { + return vec4(vertex_output.color, 1.0); +} + +``` + +### bevy/assets/shaders/instancing + +```rust +#import bevy_pbr::mesh_functions::{get_world_from_local, mesh_position_local_to_clip} + +struct Vertex { + @location(0) position: vec3, + @location(1) normal: vec3, + @location(2) uv: vec2, + + @location(3) i_pos_scale: vec4, + @location(4) i_color: vec4, +}; + +struct VertexOutput { + @builtin(position) clip_position: vec4, + @location(0) color: vec4, +}; + +@vertex +fn vertex(vertex: Vertex) -> VertexOutput { + let position = vertex.position * vertex.i_pos_scale.w + vertex.i_pos_scale.xyz; + var out: VertexOutput; + // NOTE: Passing 0 as the instance_index to get_world_from_local() is a hack + // for this example as the instance_index builtin would map to the wrong + // index in the Mesh array. This index could be passed in via another + // uniform instead but it's unnecessary for the example. + out.clip_position = mesh_position_local_to_clip( + get_world_from_local(0u), + vec4(position, 1.0) + ); + out.color = vertex.i_color; + return out; +} + +@fragment +fn fragment(in: VertexOutput) -> @location(0) vec4 { + return in.color; +} + +``` + +### bevy/assets/shaders/tonemapping_test_patterns + +```rust +#import bevy_pbr::{ + mesh_view_bindings, + forward_io::VertexOutput, +} + +#import bevy_render::maths::PI + +#ifdef TONEMAP_IN_SHADER +#import bevy_core_pipeline::tonemapping::tone_mapping +#endif + +// Sweep across hues on y axis with value from 0.0 to +15EV across x axis +// quantized into 24 steps for both axis. +fn color_sweep(uv_input: vec2) -> vec3 { + var uv = uv_input; + let steps = 24.0; + uv.y = uv.y * (1.0 + 1.0 / steps); + let ratio = 2.0; + + let h = PI * 2.0 * floor(1.0 + steps * uv.y) / steps; + let L = floor(uv.x * steps * ratio) / (steps * ratio) - 0.5; + + var color = vec3(0.0); + if uv.y < 1.0 { + color = cos(h + vec3(0.0, 1.0, 2.0) * PI * 2.0 / 3.0); + let maxRGB = max(color.r, max(color.g, color.b)); + let minRGB = min(color.r, min(color.g, color.b)); + color = exp(15.0 * L) * (color - minRGB) / (maxRGB - minRGB); + } else { + color = vec3(exp(15.0 * L)); + } + return color; +} + +fn hsv_to_srgb(c: vec3) -> vec3 { + let K = vec4(1.0, 2.0 / 3.0, 1.0 / 3.0, 3.0); + let p = abs(fract(c.xxx + K.xyz) * 6.0 - K.www); + return c.z * mix(K.xxx, clamp(p - K.xxx, vec3(0.0), vec3(1.0)), c.y); +} + +// Generates a continuous sRGB sweep. +fn continuous_hue(uv: vec2) -> vec3 { + return hsv_to_srgb(vec3(uv.x, 1.0, 1.0)) * max(0.0, exp2(uv.y * 9.0) - 1.0); +} + +@fragment +fn fragment( + in: VertexOutput, +) -> @location(0) vec4 { + var uv = in.uv; + var out = vec3(0.0); + if uv.y > 0.5 { + uv.y = 1.0 - uv.y; + out = color_sweep(vec2(uv.x, uv.y * 2.0)); + } else { + out = continuous_hue(vec2(uv.y * 2.0, uv.x)); + } + var color = vec4(out, 1.0); +#ifdef TONEMAP_IN_SHADER + color = tone_mapping(color, mesh_view_bindings::view.color_grading); +#endif + return color; +} + +``` + +### bevy/assets/shaders/custom_gltf_2d + +```rust +#import bevy_sprite::{ + mesh2d_view_bindings::globals, + mesh2d_functions::{get_world_from_local, mesh2d_position_local_to_clip}, +} + +struct Vertex { + @builtin(instance_index) instance_index: u32, + @location(0) position: vec3, + @location(1) color: vec4, + @location(2) barycentric: vec3, +}; + +struct VertexOutput { + @builtin(position) clip_position: vec4, + @location(0) color: vec4, + @location(1) barycentric: vec3, +}; + +@vertex +fn vertex(vertex: Vertex) -> VertexOutput { + var out: VertexOutput; + let world_from_local = get_world_from_local(vertex.instance_index); + out.clip_position = mesh2d_position_local_to_clip(world_from_local, vec4(vertex.position, 1.0)); + out.color = vertex.color; + out.barycentric = vertex.barycentric; + return out; +} + +struct FragmentInput { + @location(0) color: vec4, + @location(1) barycentric: vec3, +}; + +@fragment +fn fragment(input: FragmentInput) -> @location(0) vec4 { + let d = min(input.barycentric.x, min(input.barycentric.y, input.barycentric.z)); + let t = 0.05 * (0.85 + sin(5.0 * globals.time)); + return mix(vec4(1.0,1.0,1.0,1.0), input.color, smoothstep(t, t+0.01, d)); +} + +``` + +### bevy/assets/shaders/custom_material_screenspace_texture + +```rust +#import bevy_pbr::{ + mesh_view_bindings::view, + forward_io::VertexOutput, + utils::coords_to_viewport_uv, +} + +@group(2) @binding(0) var texture: texture_2d; +@group(2) @binding(1) var texture_sampler: sampler; + +@fragment +fn fragment( + mesh: VertexOutput, +) -> @location(0) vec4 { + let viewport_uv = coords_to_viewport_uv(mesh.position.xy, view.viewport); + let color = textureSample(texture, texture_sampler, viewport_uv); + return color; +} + +``` + +### bevy/crates/bevy_core_pipeline/src/deferred/copy_deferred_lighting_id + +```rust +#import bevy_core_pipeline::fullscreen_vertex_shader::FullscreenVertexOutput + +@group(0) @binding(0) +var material_id_texture: texture_2d; + +struct FragmentOutput { + @builtin(frag_depth) frag_depth: f32, + +} + +@fragment +fn fragment(in: FullscreenVertexOutput) -> FragmentOutput { + var out: FragmentOutput; + // Depth is stored as unorm, so we are dividing the u8 by 255.0 here. + out.frag_depth = f32(textureLoad(material_id_texture, vec2(in.position.xy), 0).x) / 255.0; + return out; +} + + +``` + +### bevy/crates/bevy_core_pipeline/src/motion_blur/motion_blur + +```rust +#import bevy_pbr::prepass_utils +#import bevy_pbr::utils +#import bevy_core_pipeline::fullscreen_vertex_shader::FullscreenVertexOutput +#import bevy_render::globals::Globals + +#ifdef MULTISAMPLED +@group(0) @binding(0) var screen_texture: texture_2d; +@group(0) @binding(1) var motion_vectors: texture_multisampled_2d; +@group(0) @binding(2) var depth: texture_depth_multisampled_2d; +#else +@group(0) @binding(0) var screen_texture: texture_2d; +@group(0) @binding(1) var motion_vectors: texture_2d; +@group(0) @binding(2) var depth: texture_depth_2d; +#endif +@group(0) @binding(3) var texture_sampler: sampler; +struct MotionBlur { + shutter_angle: f32, + samples: u32, +#ifdef SIXTEEN_BYTE_ALIGNMENT + // WebGL2 structs must be 16 byte aligned. + _webgl2_padding: vec2 +#endif +} +@group(0) @binding(4) var settings: MotionBlur; +@group(0) @binding(5) var globals: Globals; + +@fragment +fn fragment( + #ifdef MULTISAMPLED + @builtin(sample_index) sample_index: u32, + #endif + in: FullscreenVertexOutput +) -> @location(0) vec4 { + let texture_size = vec2(textureDimensions(screen_texture)); + let frag_coords = vec2(in.uv * texture_size); + +#ifdef MULTISAMPLED + let base_color = textureLoad(screen_texture, frag_coords, i32(sample_index)); +#else + let base_color = textureSample(screen_texture, texture_sampler, in.uv); +#endif + + let shutter_angle = settings.shutter_angle; + +#ifdef MULTISAMPLED + let this_motion_vector = textureLoad(motion_vectors, frag_coords, i32(sample_index)).rg; +#else + let this_motion_vector = textureSample(motion_vectors, texture_sampler, in.uv).rg; +#endif + +#ifdef NO_DEPTH_TEXTURE_SUPPORT + let this_depth = 0.0; + let depth_supported = false; +#else + let depth_supported = true; +#ifdef MULTISAMPLED + let this_depth = textureLoad(depth, frag_coords, i32(sample_index)); +#else + let this_depth = textureSample(depth, texture_sampler, in.uv); +#endif +#endif + + // The exposure vector is the distance that this fragment moved while the camera shutter was + // open. This is the motion vector (total distance traveled) multiplied by the shutter angle (a + // fraction). In film, the shutter angle is commonly 0.5 or "180 degrees" (out of 360 total). + // This means that for a frame time of 20ms, the shutter is only open for 10ms. + // + // Using a shutter angle larger than 1.0 is non-physical, objects would need to move further + // than they physically travelled during a frame, which is not possible. Note: we allow values + // larger than 1.0 because it may be desired for artistic reasons. + let exposure_vector = shutter_angle * this_motion_vector; + + var accumulator: vec4; + var weight_total = 0.0; + let n_samples = i32(settings.samples); + let noise = utils::interleaved_gradient_noise(vec2(frag_coords), globals.frame_count); // 0 to 1 + + for (var i = -n_samples; i < n_samples; i++) { + // The current sample step vector, from in.uv + let step_vector = 0.5 * exposure_vector * (f32(i) + noise) / f32(n_samples); + var sample_uv = in.uv + step_vector; + + // If the sample is off screen, skip it. + if sample_uv.x < 0.0 || sample_uv.x > 1.0 || sample_uv.y < 0.0 || sample_uv.y > 1.0 { + continue; + } + + let sample_coords = vec2(sample_uv * texture_size); + + #ifdef MULTISAMPLED + let sample_color = textureLoad(screen_texture, sample_coords, i32(sample_index)); + #else + let sample_color = textureSample(screen_texture, texture_sampler, sample_uv); + #endif + #ifdef MULTISAMPLED + let sample_motion = textureLoad(motion_vectors, sample_coords, i32(sample_index)).rg; + #else + let sample_motion = textureSample(motion_vectors, texture_sampler, sample_uv).rg; + #endif + #ifdef NO_DEPTH_TEXTURE_SUPPORT + let sample_depth = 0.0; + #else + #ifdef MULTISAMPLED + let sample_depth = textureLoad(depth, sample_coords, i32(sample_index)); + #else + let sample_depth = textureSample(depth, texture_sampler, sample_uv); + #endif + #endif + + var weight = 1.0; + let is_sample_in_fg = !(depth_supported && sample_depth < this_depth && sample_depth > 0.0); + // If the depth is 0.0, this fragment has no depth written to it and we assume it is in the + // background. This ensures that things like skyboxes, which do not write to depth, are + // correctly sampled in motion blur. + if sample_depth != 0.0 && is_sample_in_fg { + // The following weight calculation is used to eliminate ghosting artifacts that are + // common in motion-vector-based motion blur implementations. While some resources + // recommend using depth, I've found that sampling the velocity results in significantly + // better results. Unlike a depth heuristic, this is not scale dependent. + // + // The most distracting artifacts occur when a stationary foreground object is + // incorrectly sampled while blurring a moving background object, causing the stationary + // object to blur when it should be sharp ("background bleeding"). This is most obvious + // when the camera is tracking a fast moving object. The tracked object should be sharp, + // and should not bleed into the motion blurred background. + // + // To attenuate these incorrect samples, we compare the motion of the fragment being + // blurred to the UV being sampled, to answer the question "is it possible that this + // sample was occluding the fragment?" + // + // Note to future maintainers: proceed with caution when making any changes here, and + // ensure you check all occlusion/disocclusion scenarios and fullscreen camera rotation + // blur for regressions. + let frag_speed = length(step_vector); + let sample_speed = length(sample_motion) / 2.0; // Halved because the sample is centered + let cos_angle = dot(step_vector, sample_motion) / (frag_speed * sample_speed * 2.0); + let motion_similarity = clamp(abs(cos_angle), 0.0, 1.0); + if sample_speed * motion_similarity < frag_speed { + // Project the sample's motion onto the frag's motion vector. If the sample did not + // cover enough distance to reach the original frag, there is no way it could have + // influenced this frag at all, and should be discarded. + weight = 0.0; + } + } + weight_total += weight; + accumulator += weight * sample_color; + } + + let has_moved_less_than_a_pixel = + dot(this_motion_vector * texture_size, this_motion_vector * texture_size) < 1.0; + // In case no samples were accepted, fall back to base color. + // We also fall back if motion is small, to not break antialiasing. + if weight_total <= 0.0 || has_moved_less_than_a_pixel { + accumulator = base_color; + weight_total = 1.0; + } + return accumulator / weight_total; +} +``` + +### bevy/crates/bevy_core_pipeline/src/tonemapping/tonemapping_shared + +```rust +#define_import_path bevy_core_pipeline::tonemapping + +#import bevy_render::{ + view::ColorGrading, + color_operations::{hsv_to_rgb, rgb_to_hsv}, + maths::{PI_2, powsafe}, +} + +#import bevy_core_pipeline::tonemapping_lut_bindings::{ + dt_lut_texture, + dt_lut_sampler, +} + +// Half the size of the crossfade region between shadows and midtones and +// between midtones and highlights. This value, 0.1, corresponds to 10% of the +// gamut on either side of the cutoff point. +const LEVEL_MARGIN: f32 = 0.1; + +// The inverse reciprocal of twice the above, used when scaling the midtone +// region. +const LEVEL_MARGIN_DIV: f32 = 0.5 / LEVEL_MARGIN; + +fn sample_current_lut(p: vec3) -> vec3 { + // Don't include code that will try to sample from LUTs if tonemap method doesn't require it + // Allows this file to be imported without necessarily needing the lut texture bindings +#ifdef TONEMAP_METHOD_AGX + return textureSampleLevel(dt_lut_texture, dt_lut_sampler, p, 0.0).rgb; +#else ifdef TONEMAP_METHOD_TONY_MC_MAPFACE + return textureSampleLevel(dt_lut_texture, dt_lut_sampler, p, 0.0).rgb; +#else ifdef TONEMAP_METHOD_BLENDER_FILMIC + return textureSampleLevel(dt_lut_texture, dt_lut_sampler, p, 0.0).rgb; +#else + return vec3(1.0, 0.0, 1.0); + #endif +} + +// -------------------------------------- +// --- SomewhatBoringDisplayTransform --- +// -------------------------------------- +// By Tomasz Stachowiak + +fn rgb_to_ycbcr(col: vec3) -> vec3 { + let m = mat3x3( + 0.2126, 0.7152, 0.0722, + -0.1146, -0.3854, 0.5, + 0.5, -0.4542, -0.0458 + ); + return col * m; +} + +fn ycbcr_to_rgb(col: vec3) -> vec3 { + let m = mat3x3( + 1.0, 0.0, 1.5748, + 1.0, -0.1873, -0.4681, + 1.0, 1.8556, 0.0 + ); + return max(vec3(0.0), col * m); +} + +fn tonemap_curve(v: f32) -> f32 { +#ifdef 0 + // Large linear part in the lows, but compresses highs. + float c = v + v * v + 0.5 * v * v * v; + return c / (1.0 + c); +#else + return 1.0 - exp(-v); +#endif +} + +fn tonemap_curve3_(v: vec3) -> vec3 { + return vec3(tonemap_curve(v.r), tonemap_curve(v.g), tonemap_curve(v.b)); +} + +fn somewhat_boring_display_transform(col: vec3) -> vec3 { + var boring_color = col; + let ycbcr = rgb_to_ycbcr(boring_color); + + let bt = tonemap_curve(length(ycbcr.yz) * 2.4); + var desat = max((bt - 0.7) * 0.8, 0.0); + desat *= desat; + + let desat_col = mix(boring_color.rgb, ycbcr.xxx, desat); + + let tm_luma = tonemap_curve(ycbcr.x); + let tm0 = boring_color.rgb * max(0.0, tm_luma / max(1e-5, tonemapping_luminance(boring_color.rgb))); + let final_mult = 0.97; + let tm1 = tonemap_curve3_(desat_col); + + boring_color = mix(tm0, tm1, bt * bt); + + return boring_color * final_mult; +} + +// ------------------------------------------ +// ------------- Tony McMapface ------------- +// ------------------------------------------ +// By Tomasz Stachowiak +// https://github.com/h3r2tic/tony-mc-mapface + +const TONY_MC_MAPFACE_LUT_DIMS: f32 = 48.0; + +fn sample_tony_mc_mapface_lut(stimulus: vec3) -> vec3 { + var uv = (stimulus / (stimulus + 1.0)) * (f32(TONY_MC_MAPFACE_LUT_DIMS - 1.0) / f32(TONY_MC_MAPFACE_LUT_DIMS)) + 0.5 / f32(TONY_MC_MAPFACE_LUT_DIMS); + return sample_current_lut(saturate(uv)).rgb; +} + +// --------------------------------- +// ---------- ACES Fitted ---------- +// --------------------------------- + +// Same base implementation that Godot 4.0 uses for Tonemap ACES. + +// https://github.com/TheRealMJP/BakingLab/blob/master/BakingLab/ACES.hlsl + +// The code in this file was originally written by Stephen Hill (@self_shadow), who deserves all +// credit for coming up with this fit and implementing it. Buy him a beer next time you see him. :) + +fn RRTAndODTFit(v: vec3) -> vec3 { + let a = v * (v + 0.0245786) - 0.000090537; + let b = v * (0.983729 * v + 0.4329510) + 0.238081; + return a / b; +} + +fn ACESFitted(color: vec3) -> vec3 { + var fitted_color = color; + + // sRGB => XYZ => D65_2_D60 => AP1 => RRT_SAT + let rgb_to_rrt = mat3x3( + vec3(0.59719, 0.35458, 0.04823), + vec3(0.07600, 0.90834, 0.01566), + vec3(0.02840, 0.13383, 0.83777) + ); + + // ODT_SAT => XYZ => D60_2_D65 => sRGB + let odt_to_rgb = mat3x3( + vec3(1.60475, -0.53108, -0.07367), + vec3(-0.10208, 1.10813, -0.00605), + vec3(-0.00327, -0.07276, 1.07602) + ); + + fitted_color *= rgb_to_rrt; + + // Apply RRT and ODT + fitted_color = RRTAndODTFit(fitted_color); + + fitted_color *= odt_to_rgb; + + // Clamp to [0, 1] + fitted_color = saturate(fitted_color); + + return fitted_color; +} + +// ------------------------------- +// ------------- AgX ------------- +// ------------------------------- +// By Troy Sobotka +// https://github.com/MrLixm/AgXc +// https://github.com/sobotka/AgX + +/* + Increase color saturation of the given color data. + :param color: expected sRGB primaries input + :param saturationAmount: expected 0-1 range with 1=neutral, 0=no saturation. + -- ref[2] [4] +*/ +fn saturation(color: vec3, saturationAmount: f32) -> vec3 { + let luma = tonemapping_luminance(color); + return mix(vec3(luma), color, vec3(saturationAmount)); +} + +/* + Output log domain encoded data. + Similar to OCIO lg2 AllocationTransform. + ref[0] +*/ +fn convertOpenDomainToNormalizedLog2_(color: vec3, minimum_ev: f32, maximum_ev: f32) -> vec3 { + let in_midgray = 0.18; + + // remove negative before log transform + var normalized_color = max(vec3(0.0), color); + // avoid infinite issue with log -- ref[1] + normalized_color = select(normalized_color, 0.00001525878 + normalized_color, normalized_color < vec3(0.00003051757)); + normalized_color = clamp( + log2(normalized_color / in_midgray), + vec3(minimum_ev), + vec3(maximum_ev) + ); + let total_exposure = maximum_ev - minimum_ev; + + return (normalized_color - minimum_ev) / total_exposure; +} + +// Inverse of above +fn convertNormalizedLog2ToOpenDomain(color: vec3, minimum_ev: f32, maximum_ev: f32) -> vec3 { + var open_color = color; + let in_midgray = 0.18; + let total_exposure = maximum_ev - minimum_ev; + + open_color = (open_color * total_exposure) + minimum_ev; + open_color = pow(vec3(2.0), open_color); + open_color = open_color * in_midgray; + + return open_color; +} + + +/*================= + Main processes +=================*/ + +// Prepare the data for display encoding. Converted to log domain. +fn applyAgXLog(Image: vec3) -> vec3 { + var prepared_image = max(vec3(0.0), Image); // clamp negatives + let r = dot(prepared_image, vec3(0.84247906, 0.0784336, 0.07922375)); + let g = dot(prepared_image, vec3(0.04232824, 0.87846864, 0.07916613)); + let b = dot(prepared_image, vec3(0.04237565, 0.0784336, 0.87914297)); + prepared_image = vec3(r, g, b); + + prepared_image = convertOpenDomainToNormalizedLog2_(prepared_image, -10.0, 6.5); + + prepared_image = clamp(prepared_image, vec3(0.0), vec3(1.0)); + return prepared_image; +} + +fn applyLUT3D(Image: vec3, block_size: f32) -> vec3 { + return sample_current_lut(Image * ((block_size - 1.0) / block_size) + 0.5 / block_size).rgb; +} + +// ------------------------- +// ------------------------- +// ------------------------- + +fn sample_blender_filmic_lut(stimulus: vec3) -> vec3 { + let block_size = 64.0; + let normalized = saturate(convertOpenDomainToNormalizedLog2_(stimulus, -11.0, 12.0)); + return applyLUT3D(normalized, block_size); +} + +// from https://64.github.io/tonemapping/ +// reinhard on RGB oversaturates colors +fn tonemapping_reinhard(color: vec3) -> vec3 { + return color / (1.0 + color); +} + +fn tonemapping_reinhard_extended(color: vec3, max_white: f32) -> vec3 { + let numerator = color * (1.0 + (color / vec3(max_white * max_white))); + return numerator / (1.0 + color); +} + +// luminance coefficients from Rec. 709. +// https://en.wikipedia.org/wiki/Rec._709 +fn tonemapping_luminance(v: vec3) -> f32 { + return dot(v, vec3(0.2126, 0.7152, 0.0722)); +} + +fn tonemapping_change_luminance(c_in: vec3, l_out: f32) -> vec3 { + let l_in = tonemapping_luminance(c_in); + return c_in * (l_out / l_in); +} + +fn tonemapping_reinhard_luminance(color: vec3) -> vec3 { + let l_old = tonemapping_luminance(color); + let l_new = l_old / (1.0 + l_old); + return tonemapping_change_luminance(color, l_new); +} + +fn rgb_to_srgb_simple(color: vec3) -> vec3 { + return pow(color, vec3(1.0 / 2.2)); +} + +// Source: Advanced VR Rendering, GDC 2015, Alex Vlachos, Valve, Slide 49 +// https://media.steampowered.com/apps/valve/2015/Alex_Vlachos_Advanced_VR_Rendering_GDC2015.pdf +fn screen_space_dither(frag_coord: vec2) -> vec3 { + var dither = vec3(dot(vec2(171.0, 231.0), frag_coord)).xxx; + dither = fract(dither.rgb / vec3(103.0, 71.0, 97.0)); + return (dither - 0.5) / 255.0; +} + +// Performs the "sectional" color grading: i.e. the color grading that applies +// individually to shadows, midtones, and highlights. +fn sectional_color_grading( + in: vec3, + color_grading: ptr, +) -> vec3 { + var color = in; + + // Determine whether the color is a shadow, midtone, or highlight. Colors + // close to the edges are considered a mix of both, to avoid sharp + // discontinuities. The formulas are taken from Blender's compositor. + + let level = (color.r + color.g + color.b) / 3.0; + + // Determine whether this color is a shadow, midtone, or highlight. If close + // to the cutoff points, blend between the two to avoid sharp color + // discontinuities. + var levels = vec3(0.0); + let midtone_range = (*color_grading).midtone_range; + if (level < midtone_range.x - LEVEL_MARGIN) { + levels.x = 1.0; + } else if (level < midtone_range.x + LEVEL_MARGIN) { + levels.y = ((level - midtone_range.x) * LEVEL_MARGIN_DIV) + 0.5; + levels.z = 1.0 - levels.y; + } else if (level < midtone_range.y - LEVEL_MARGIN) { + levels.y = 1.0; + } else if (level < midtone_range.y + LEVEL_MARGIN) { + levels.z = ((level - midtone_range.y) * LEVEL_MARGIN_DIV) + 0.5; + levels.y = 1.0 - levels.z; + } else { + levels.z = 1.0; + } + + // Calculate contrast/saturation/gamma/gain/lift. + let contrast = dot(levels, (*color_grading).contrast); + let saturation = dot(levels, (*color_grading).saturation); + let gamma = dot(levels, (*color_grading).gamma); + let gain = dot(levels, (*color_grading).gain); + let lift = dot(levels, (*color_grading).lift); + + // Adjust saturation and contrast. + let luma = tonemapping_luminance(color); + color = luma + saturation * (color - luma); + color = 0.5 + (color - 0.5) * contrast; + + // The [ASC CDL] formula for color correction. Given *i*, an input color, we + // have: + // + // out = (i × s + o)ⁿ + // + // Following the normal photographic naming convention, *gain* is the *s* + // factor, *lift* is the *o* term, and the inverse of *gamma* is the *n* + // exponent. + // + // [ASC CDL]: https://en.wikipedia.org/wiki/ASC_CDL#Combined_Function + color = powsafe(color * gain + lift, 1.0 / gamma); + + // Account for exposure. + color = color * powsafe(vec3(2.0), (*color_grading).exposure); + return max(color, vec3(0.0)); +} + +fn tone_mapping(in: vec4, in_color_grading: ColorGrading) -> vec4 { + var color = max(in.rgb, vec3(0.0)); + var color_grading = in_color_grading; // So we can take pointers to it. + + // Rotate hue if needed, by converting to and from HSV. Remember that hue is + // an angle, so it needs to be modulo 2π. +#ifdef HUE_ROTATE + var hsv = rgb_to_hsv(color); + hsv.r = (hsv.r + color_grading.hue) % PI_2; + color = hsv_to_rgb(hsv); +#endif + + // Perform white balance correction. Conveniently, this is a linear + // transform. The matrix was pre-calculated from the temperature and tint + // values on the CPU. +#ifdef WHITE_BALANCE + color = max(color_grading.balance * color, vec3(0.0)); +#endif + + // Perform the "sectional" color grading: i.e. the color grading that + // applies individually to shadows, midtones, and highlights. +#ifdef SECTIONAL_COLOR_GRADING + color = sectional_color_grading(color, &color_grading); +#else + // If we're not doing sectional color grading, the exposure might still need + // to be applied, for example when using auto exposure. + color = color * powsafe(vec3(2.0), color_grading.exposure); +#endif + + // tone_mapping +#ifdef TONEMAP_METHOD_NONE + color = color; +#else ifdef TONEMAP_METHOD_REINHARD + color = tonemapping_reinhard(color.rgb); +#else ifdef TONEMAP_METHOD_REINHARD_LUMINANCE + color = tonemapping_reinhard_luminance(color.rgb); +#else ifdef TONEMAP_METHOD_ACES_FITTED + color = ACESFitted(color.rgb); +#else ifdef TONEMAP_METHOD_AGX + color = applyAgXLog(color); + color = applyLUT3D(color, 32.0); +#else ifdef TONEMAP_METHOD_SOMEWHAT_BORING_DISPLAY_TRANSFORM + color = somewhat_boring_display_transform(color.rgb); +#else ifdef TONEMAP_METHOD_TONY_MC_MAPFACE + color = sample_tony_mc_mapface_lut(color); +#else ifdef TONEMAP_METHOD_BLENDER_FILMIC + color = sample_blender_filmic_lut(color.rgb); +#endif + + // Perceptual post tonemapping grading + color = saturation(color, color_grading.post_saturation); + + return vec4(color, in.a); +} + +// This is an **incredibly crude** approximation of the inverse of the tone mapping function. +// We assume here that there's a simple linear relationship between the input and output +// which is not true at all, but useful to at least preserve the overall luminance of colors +// when sampling from an already tonemapped image. (e.g. for transmissive materials when HDR is off) +fn approximate_inverse_tone_mapping(in: vec4, color_grading: ColorGrading) -> vec4 { + let out = tone_mapping(in, color_grading); + let approximate_ratio = length(in.rgb) / length(out.rgb); + return vec4(in.rgb * approximate_ratio, in.a); +} + +``` + +### bevy/crates/bevy_core_pipeline/src/tonemapping/lut_bindings + +```rust +#define_import_path bevy_core_pipeline::tonemapping_lut_bindings + +@group(0) @binding(#TONEMAPPING_LUT_TEXTURE_BINDING_INDEX) var dt_lut_texture: texture_3d; +@group(0) @binding(#TONEMAPPING_LUT_SAMPLER_BINDING_INDEX) var dt_lut_sampler: sampler; + + +``` + +### bevy/crates/bevy_core_pipeline/src/tonemapping/tonemapping + +```rust +#define TONEMAPPING_PASS + +#import bevy_render::{ + view::View, + maths::powsafe, +} +#import bevy_core_pipeline::{ + fullscreen_vertex_shader::FullscreenVertexOutput, + tonemapping::{tone_mapping, screen_space_dither}, +} + +@group(0) @binding(0) var view: View; + +@group(0) @binding(1) var hdr_texture: texture_2d; +@group(0) @binding(2) var hdr_sampler: sampler; +@group(0) @binding(3) var dt_lut_texture: texture_3d; +@group(0) @binding(4) var dt_lut_sampler: sampler; + +@fragment +fn fragment(in: FullscreenVertexOutput) -> @location(0) vec4 { + let hdr_color = textureSample(hdr_texture, hdr_sampler, in.uv); + + var output_rgb = tone_mapping(hdr_color, view.color_grading).rgb; + +#ifdef DEBAND_DITHER + output_rgb = powsafe(output_rgb.rgb, 1.0 / 2.2); + output_rgb = output_rgb + screen_space_dither(in.position.xy); + // This conversion back to linear space is required because our output texture format is + // SRGB; the GPU will assume our output is linear and will apply an SRGB conversion. + output_rgb = powsafe(output_rgb.rgb, 2.2); +#endif + + return vec4(output_rgb, hdr_color.a); +} + +``` + +### bevy/crates/bevy_core_pipeline/src/fullscreen_vertex_shader/fullscreen + +```rust +#define_import_path bevy_core_pipeline::fullscreen_vertex_shader + +struct FullscreenVertexOutput { + @builtin(position) + position: vec4, + @location(0) + uv: vec2, +}; + +// This vertex shader produces the following, when drawn using indices 0..3: +// +// 1 | 0-----x.....2 +// 0 | | s | . ´ +// -1 | x_____x´ +// -2 | : .´ +// -3 | 1´ +// +--------------- +// -1 0 1 2 3 +// +// The axes are clip-space x and y. The region marked s is the visible region. +// The digits in the corners of the right-angled triangle are the vertex +// indices. +// +// The top-left has UV 0,0, the bottom-left has 0,2, and the top-right has 2,0. +// This means that the UV gets interpolated to 1,1 at the bottom-right corner +// of the clip-space rectangle that is at 1,-1 in clip space. +@vertex +fn fullscreen_vertex_shader(@builtin(vertex_index) vertex_index: u32) -> FullscreenVertexOutput { + // See the explanation above for how this works + let uv = vec2(f32(vertex_index >> 1u), f32(vertex_index & 1u)) * 2.0; + let clip_position = vec4(uv * vec2(2.0, -2.0) + vec2(-1.0, 1.0), 0.0, 1.0); + + return FullscreenVertexOutput(clip_position, uv); +} + +``` + +### bevy/crates/bevy_core_pipeline/src/smaa/smaa + +```rust +/** + * Copyright (C) 2013 Jorge Jimenez (jorge@iryoku.com) + * Copyright (C) 2013 Jose I. Echevarria (joseignacioechevarria@gmail.com) + * Copyright (C) 2013 Belen Masia (bmasia@unizar.es) + * Copyright (C) 2013 Fernando Navarro (fernandn@microsoft.com) + * Copyright (C) 2013 Diego Gutierrez (diegog@unizar.es) + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies + * of the Software, and to permit persons to whom the Software is furnished to + * do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. As clarification, there + * is no requirement that the copyright notice and permission be included in + * binary distributions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +/** + * _______ ___ ___ ___ ___ + * / || \/ | / \ / \ + * | (---- | \ / | / ^ \ / ^ \ + * \ \ | |\/| | / /_\ \ / /_\ \ + * ----) | | | | | / _____ \ / _____ \ + * |_______/ |__| |__| /__/ \__\ /__/ \__\ + * + * E N H A N C E D + * S U B P I X E L M O R P H O L O G I C A L A N T I A L I A S I N G + * + * http://www.iryoku.com/smaa/ + * + * Hi, welcome aboard! + * + * Here you'll find instructions to get the shader up and running as fast as + * possible. + * + * IMPORTANTE NOTICE: when updating, remember to update both this file and the + * precomputed textures! They may change from version to version. + * + * The shader has three passes, chained together as follows: + * + * |input|------------------� + * v | + * [ SMAA*EdgeDetection ] | + * v | + * |edgesTex| | + * v | + * [ SMAABlendingWeightCalculation ] | + * v | + * |blendTex| | + * v | + * [ SMAANeighborhoodBlending ] <------� + * v + * |output| + * + * Note that each [pass] has its own vertex and pixel shader. Remember to use + * oversized triangles instead of quads to avoid overshading along the + * diagonal. + * + * You've three edge detection methods to choose from: luma, color or depth. + * They represent different quality/performance and anti-aliasing/sharpness + * tradeoffs, so our recommendation is for you to choose the one that best + * suits your particular scenario: + * + * - Depth edge detection is usually the fastest but it may miss some edges. + * + * - Luma edge detection is usually more expensive than depth edge detection, + * but catches visible edges that depth edge detection can miss. + * + * - Color edge detection is usually the most expensive one but catches + * chroma-only edges. + * + * For quickstarters: just use luma edge detection. + * + * The general advice is to not rush the integration process and ensure each + * step is done correctly (don't try to integrate SMAA T2x with predicated edge + * detection from the start!). Ok then, let's go! + * + * 1. The first step is to create two RGBA temporal render targets for holding + * |edgesTex| and |blendTex|. + * + * In DX10 or DX11, you can use a RG render target for the edges texture. + * In the case of NVIDIA GPUs, using RG render targets seems to actually be + * slower. + * + * On the Xbox 360, you can use the same render target for resolving both + * |edgesTex| and |blendTex|, as they aren't needed simultaneously. + * + * 2. Both temporal render targets |edgesTex| and |blendTex| must be cleared + * each frame. Do not forget to clear the alpha channel! + * + * 3. The next step is loading the two supporting precalculated textures, + * 'areaTex' and 'searchTex'. You'll find them in the 'Textures' folder as + * C++ headers, and also as regular DDS files. They'll be needed for the + * 'SMAABlendingWeightCalculation' pass. + * + * If you use the C++ headers, be sure to load them in the format specified + * inside of them. + * + * You can also compress 'areaTex' and 'searchTex' using BC5 and BC4 + * respectively, if you have that option in your content processor pipeline. + * When compressing then, you get a non-perceptible quality decrease, and a + * marginal performance increase. + * + * 4. All samplers must be set to linear filtering and clamp. + * + * After you get the technique working, remember that 64-bit inputs have + * half-rate linear filtering on GCN. + * + * If SMAA is applied to 64-bit color buffers, switching to point filtering + * when accessing them will increase the performance. Search for + * 'SMAASamplePoint' to see which textures may benefit from point + * filtering, and where (which is basically the color input in the edge + * detection and resolve passes). + * + * 5. All texture reads and buffer writes must be non-sRGB, with the exception + * of the input read and the output write in + * 'SMAANeighborhoodBlending' (and only in this pass!). If sRGB reads in + * this last pass are not possible, the technique will work anyway, but + * will perform antialiasing in gamma space. + * + * IMPORTANT: for best results the input read for the color/luma edge + * detection should *NOT* be sRGB. + * + * 6. Before including SMAA.h you'll have to setup the render target metrics, + * the target and any optional configuration defines. Optionally you can + * use a preset. + * + * You have the following targets available: + * SMAA_HLSL_3 + * SMAA_HLSL_4 + * SMAA_HLSL_4_1 + * SMAA_GLSL_3 * + * SMAA_GLSL_4 * + * + * * (See SMAA_INCLUDE_VS and SMAA_INCLUDE_PS below). + * + * And four presets: + * SMAA_PRESET_LOW (%60 of the quality) + * SMAA_PRESET_MEDIUM (%80 of the quality) + * SMAA_PRESET_HIGH (%95 of the quality) + * SMAA_PRESET_ULTRA (%99 of the quality) + * + * For example: + * #define SMAA_RT_METRICS float4(1.0 / 1280.0, 1.0 / 720.0, 1280.0, 720.0) + * #define SMAA_HLSL_4 + * #define SMAA_PRESET_HIGH + * #include "SMAA.h" + * + * Note that SMAA_RT_METRICS doesn't need to be a macro, it can be a + * uniform variable. The code is designed to minimize the impact of not + * using a constant value, but it is still better to hardcode it. + * + * Depending on how you encoded 'areaTex' and 'searchTex', you may have to + * add (and customize) the following defines before including SMAA.h: + * #define SMAA_AREATEX_SELECT(sample) sample.rg + * #define SMAA_SEARCHTEX_SELECT(sample) sample.r + * + * If your engine is already using porting macros, you can define + * SMAA_CUSTOM_SL, and define the porting functions by yourself. + * + * 7. Then, you'll have to setup the passes as indicated in the scheme above. + * You can take a look into SMAA.fx, to see how we did it for our demo. + * Checkout the function wrappers, you may want to copy-paste them! + * + * 8. It's recommended to validate the produced |edgesTex| and |blendTex|. + * You can use a screenshot from your engine to compare the |edgesTex| + * and |blendTex| produced inside of the engine with the results obtained + * with the reference demo. + * + * 9. After you get the last pass to work, it's time to optimize. You'll have + * to initialize a stencil buffer in the first pass (discard is already in + * the code), then mask execution by using it the second pass. The last + * pass should be executed in all pixels. + * + * + * After this point you can choose to enable predicated thresholding, + * temporal supersampling and motion blur integration: + * + * a) If you want to use predicated thresholding, take a look into + * SMAA_PREDICATION; you'll need to pass an extra texture in the edge + * detection pass. + * + * b) If you want to enable temporal supersampling (SMAA T2x): + * + * 1. The first step is to render using subpixel jitters. I won't go into + * detail, but it's as simple as moving each vertex position in the + * vertex shader, you can check how we do it in our DX10 demo. + * + * 2. Then, you must setup the temporal resolve. You may want to take a look + * into SMAAResolve for resolving 2x modes. After you get it working, you'll + * probably see ghosting everywhere. But fear not, you can enable the + * CryENGINE temporal reprojection by setting the SMAA_REPROJECTION macro. + * Check out SMAA_DECODE_VELOCITY if your velocity buffer is encoded. + * + * 3. The next step is to apply SMAA to each subpixel jittered frame, just as + * done for 1x. + * + * 4. At this point you should already have something usable, but for best + * results the proper area textures must be set depending on current jitter. + * For this, the parameter 'subsampleIndices' of + * 'SMAABlendingWeightCalculationPS' must be set as follows, for our T2x + * mode: + * + * @SUBSAMPLE_INDICES + * + * | S# | Camera Jitter | subsampleIndices | + * +----+------------------+---------------------+ + * | 0 | ( 0.25, -0.25) | float4(1, 1, 1, 0) | + * | 1 | (-0.25, 0.25) | float4(2, 2, 2, 0) | + * + * These jitter positions assume a bottom-to-top y axis. S# stands for the + * sample number. + * + * More information about temporal supersampling here: + * http://iryoku.com/aacourse/downloads/13-Anti-Aliasing-Methods-in-CryENGINE-3.pdf + * + * c) If you want to enable spatial multisampling (SMAA S2x): + * + * 1. The scene must be rendered using MSAA 2x. The MSAA 2x buffer must be + * created with: + * - DX10: see below (*) + * - DX10.1: D3D10_STANDARD_MULTISAMPLE_PATTERN or + * - DX11: D3D11_STANDARD_MULTISAMPLE_PATTERN + * + * This allows to ensure that the subsample order matches the table in + * @SUBSAMPLE_INDICES. + * + * (*) In the case of DX10, we refer the reader to: + * - SMAA::detectMSAAOrder and + * - SMAA::msaaReorder + * + * These functions allow to match the standard multisample patterns by + * detecting the subsample order for a specific GPU, and reordering + * them appropriately. + * + * 2. A shader must be run to output each subsample into a separate buffer + * (DX10 is required). You can use SMAASeparate for this purpose, or just do + * it in an existing pass (for example, in the tone mapping pass, which has + * the advantage of feeding tone mapped subsamples to SMAA, which will yield + * better results). + * + * 3. The full SMAA 1x pipeline must be run for each separated buffer, storing + * the results in the final buffer. The second run should alpha blend with + * the existing final buffer using a blending factor of 0.5. + * 'subsampleIndices' must be adjusted as in the SMAA T2x case (see point + * b). + * + * d) If you want to enable temporal supersampling on top of SMAA S2x + * (which actually is SMAA 4x): + * + * 1. SMAA 4x consists on temporally jittering SMAA S2x, so the first step is + * to calculate SMAA S2x for current frame. In this case, 'subsampleIndices' + * must be set as follows: + * + * | F# | S# | Camera Jitter | Net Jitter | subsampleIndices | + * +----+----+--------------------+-------------------+----------------------+ + * | 0 | 0 | ( 0.125, 0.125) | ( 0.375, -0.125) | float4(5, 3, 1, 3) | + * | 0 | 1 | ( 0.125, 0.125) | (-0.125, 0.375) | float4(4, 6, 2, 3) | + * +----+----+--------------------+-------------------+----------------------+ + * | 1 | 2 | (-0.125, -0.125) | ( 0.125, -0.375) | float4(3, 5, 1, 4) | + * | 1 | 3 | (-0.125, -0.125) | (-0.375, 0.125) | float4(6, 4, 2, 4) | + * + * These jitter positions assume a bottom-to-top y axis. F# stands for the + * frame number. S# stands for the sample number. + * + * 2. After calculating SMAA S2x for current frame (with the new subsample + * indices), previous frame must be reprojected as in SMAA T2x mode (see + * point b). + * + * e) If motion blur is used, you may want to do the edge detection pass + * together with motion blur. This has two advantages: + * + * 1. Pixels under heavy motion can be omitted from the edge detection process. + * For these pixels we can just store "no edge", as motion blur will take + * care of them. + * 2. The center pixel tap is reused. + * + * Note that in this case depth testing should be used instead of stenciling, + * as we have to write all the pixels in the motion blur pass. + * + * That's it! + */ + +struct SmaaInfo { + rt_metrics: vec4, +} + +struct VertexVaryings { + clip_coord: vec2, + tex_coord: vec2, +} + +struct EdgeDetectionVaryings { + @builtin(position) position: vec4, + @location(0) offset_0: vec4, + @location(1) offset_1: vec4, + @location(2) offset_2: vec4, + @location(3) tex_coord: vec2, +} + +struct BlendingWeightCalculationVaryings { + @builtin(position) position: vec4, + @location(0) offset_0: vec4, + @location(1) offset_1: vec4, + @location(2) offset_2: vec4, + @location(3) tex_coord: vec2, +} + +struct NeighborhoodBlendingVaryings { + @builtin(position) position: vec4, + @location(0) offset: vec4, + @location(1) tex_coord: vec2, +} + +@group(0) @binding(0) var color_texture: texture_2d; +@group(0) @binding(1) var smaa_info: SmaaInfo; + +#ifdef SMAA_EDGE_DETECTION +@group(1) @binding(0) var color_sampler: sampler; +#endif // SMAA_EDGE_DETECTION + +#ifdef SMAA_BLENDING_WEIGHT_CALCULATION +@group(1) @binding(0) var edges_texture: texture_2d; +@group(1) @binding(1) var edges_sampler: sampler; +@group(1) @binding(2) var search_texture: texture_2d; +@group(1) @binding(3) var area_texture: texture_2d; +#endif // SMAA_BLENDING_WEIGHT_CALCULATION + +#ifdef SMAA_NEIGHBORHOOD_BLENDING +@group(1) @binding(0) var blend_texture: texture_2d; +@group(1) @binding(1) var blend_sampler: sampler; +#endif // SMAA_NEIGHBORHOOD_BLENDING + +//----------------------------------------------------------------------------- +// SMAA Presets + +#ifdef SMAA_PRESET_LOW +const SMAA_THRESHOLD: f32 = 0.15; +const SMAA_MAX_SEARCH_STEPS: u32 = 4u; +#define SMAA_DISABLE_DIAG_DETECTION +#define SMAA_DISABLE_CORNER_DETECTION +#else ifdef SMAA_PRESET_MEDIUM // SMAA_PRESET_LOW +const SMAA_THRESHOLD: f32 = 0.1; +const SMAA_MAX_SEARCH_STEPS: u32 = 8u; +#define SMAA_DISABLE_DIAG_DETECTION +#define SMAA_DISABLE_CORNER_DETECTION +#else ifdef SMAA_PRESET_HIGH // SMAA_PRESET_MEDIUM +const SMAA_THRESHOLD: f32 = 0.1; +const SMAA_MAX_SEARCH_STEPS: u32 = 16u; +const SMAA_MAX_SEARCH_STEPS_DIAG: u32 = 8u; +const SMAA_CORNER_ROUNDING: u32 = 25u; +#else ifdef SMAA_PRESET_ULTRA // SMAA_PRESET_HIGH +const SMAA_THRESHOLD: f32 = 0.05; +const SMAA_MAX_SEARCH_STEPS: u32 = 32u; +const SMAA_MAX_SEARCH_STEPS_DIAG: u32 = 16u; +const SMAA_CORNER_ROUNDING: u32 = 25u; +#else // SMAA_PRESET_ULTRA +const SMAA_THRESHOLD: f32 = 0.1; +const SMAA_MAX_SEARCH_STEPS: u32 = 16u; +const SMAA_MAX_SEARCH_STEPS_DIAG: u32 = 8u; +const SMAA_CORNER_ROUNDING: u32 = 25u; +#endif // SMAA_PRESET_ULTRA + +//----------------------------------------------------------------------------- +// Configurable Defines + +/** + * SMAA_THRESHOLD specifies the threshold or sensitivity to edges. + * Lowering this value you will be able to detect more edges at the expense of + * performance. + * + * Range: [0, 0.5] + * 0.1 is a reasonable value, and allows to catch most visible edges. + * 0.05 is a rather overkill value, that allows to catch 'em all. + * + * If temporal supersampling is used, 0.2 could be a reasonable value, as low + * contrast edges are properly filtered by just 2x. + */ +// (In the WGSL version of this shader, `SMAA_THRESHOLD` is set above, in "SMAA +// Presets".) + +/** + * SMAA_MAX_SEARCH_STEPS specifies the maximum steps performed in the + * horizontal/vertical pattern searches, at each side of the pixel. + * + * In number of pixels, it's actually the double. So the maximum line length + * perfectly handled by, for example 16, is 64 (by perfectly, we meant that + * longer lines won't look as good, but still antialiased). + * + * Range: [0, 112] + */ +// (In the WGSL version of this shader, `SMAA_MAX_SEARCH_STEPS` is set above, in +// "SMAA Presets".) + +/** + * SMAA_MAX_SEARCH_STEPS_DIAG specifies the maximum steps performed in the + * diagonal pattern searches, at each side of the pixel. In this case we jump + * one pixel at time, instead of two. + * + * Range: [0, 20] + * + * On high-end machines it is cheap (between a 0.8x and 0.9x slower for 16 + * steps), but it can have a significant impact on older machines. + * + * Define SMAA_DISABLE_DIAG_DETECTION to disable diagonal processing. + */ +// (In the WGSL version of this shader, `SMAA_MAX_SEARCH_STEPS_DIAG` is set +// above, in "SMAA Presets".) + +/** + * SMAA_CORNER_ROUNDING specifies how much sharp corners will be rounded. + * + * Range: [0, 100] + * + * Define SMAA_DISABLE_CORNER_DETECTION to disable corner processing. + */ +// (In the WGSL version of this shader, `SMAA_CORNER_ROUNDING` is set above, in +// "SMAA Presets".) + +/** + * If there is an neighbor edge that has SMAA_LOCAL_CONTRAST_FACTOR times + * bigger contrast than current edge, current edge will be discarded. + * + * This allows to eliminate spurious crossing edges, and is based on the fact + * that, if there is too much contrast in a direction, that will hide + * perceptually contrast in the other neighbors. + */ +const SMAA_LOCAL_CONTRAST_ADAPTATION_FACTOR: f32 = 2.0; + +//----------------------------------------------------------------------------- +// Non-Configurable Defines + +const SMAA_AREATEX_MAX_DISTANCE: f32 = 16.0; +const SMAA_AREATEX_MAX_DISTANCE_DIAG: f32 = 20.0; +const SMAA_AREATEX_PIXEL_SIZE: vec2 = (1.0 / vec2(160.0, 560.0)); +const SMAA_AREATEX_SUBTEX_SIZE: f32 = (1.0 / 7.0); +const SMAA_SEARCHTEX_SIZE: vec2 = vec2(66.0, 33.0); +const SMAA_SEARCHTEX_PACKED_SIZE: vec2 = vec2(64.0, 16.0); + +#ifndef SMAA_DISABLE_CORNER_DETECTION +const SMAA_CORNER_ROUNDING_NORM: f32 = f32(SMAA_CORNER_ROUNDING) / 100.0; +#endif // SMAA_DISABLE_CORNER_DETECTION + +//----------------------------------------------------------------------------- +// WGSL-Specific Functions + +// This vertex shader produces the following, when drawn using indices 0..3: +// +// 1 | 0-----x.....2 +// 0 | | s | . ´ +// -1 | x_____x´ +// -2 | : .´ +// -3 | 1´ +// +--------------- +// -1 0 1 2 3 +// +// The axes are clip-space x and y. The region marked s is the visible region. +// The digits in the corners of the right-angled triangle are the vertex +// indices. +// +// The top-left has UV 0,0, the bottom-left has 0,2, and the top-right has 2,0. +// This means that the UV gets interpolated to 1,1 at the bottom-right corner +// of the clip-space rectangle that is at 1,-1 in clip space. +fn calculate_vertex_varyings(vertex_index: u32) -> VertexVaryings { + // See the explanation above for how this works + let uv = vec2(f32(vertex_index >> 1u), f32(vertex_index & 1u)) * 2.0; + let clip_position = vec2(uv * vec2(2.0, -2.0) + vec2(-1.0, 1.0)); + + return VertexVaryings(clip_position, uv); +} + +//----------------------------------------------------------------------------- +// Vertex Shaders + +#ifdef SMAA_EDGE_DETECTION + +/** + * Edge Detection Vertex Shader + */ +@vertex +fn edge_detection_vertex_main(@builtin(vertex_index) vertex_index: u32) -> EdgeDetectionVaryings { + let varyings = calculate_vertex_varyings(vertex_index); + + var edge_detection_varyings = EdgeDetectionVaryings(); + edge_detection_varyings.position = vec4(varyings.clip_coord, 0.0, 1.0); + edge_detection_varyings.tex_coord = varyings.tex_coord; + + edge_detection_varyings.offset_0 = smaa_info.rt_metrics.xyxy * vec4(-1.0, 0.0, 0.0, -1.0) + + varyings.tex_coord.xyxy; + edge_detection_varyings.offset_1 = smaa_info.rt_metrics.xyxy * vec4(1.0, 0.0, 0.0, 1.0) + + varyings.tex_coord.xyxy; + edge_detection_varyings.offset_2 = smaa_info.rt_metrics.xyxy * vec4(-2.0, 0.0, 0.0, -2.0) + + varyings.tex_coord.xyxy; + + return edge_detection_varyings; +} + +#endif // SMAA_EDGE_DETECTION + +#ifdef SMAA_BLENDING_WEIGHT_CALCULATION + +/** + * Blend Weight Calculation Vertex Shader + */ +@vertex +fn blending_weight_calculation_vertex_main(@builtin(vertex_index) vertex_index: u32) + -> BlendingWeightCalculationVaryings { + let varyings = calculate_vertex_varyings(vertex_index); + + var weight_varyings = BlendingWeightCalculationVaryings(); + weight_varyings.position = vec4(varyings.clip_coord, 0.0, 1.0); + weight_varyings.tex_coord = varyings.tex_coord; + + // We will use these offsets for the searches later on (see @PSEUDO_GATHER4): + weight_varyings.offset_0 = smaa_info.rt_metrics.xyxy * vec4(-0.25, -0.125, 1.25, -0.125) + + varyings.tex_coord.xyxy; + weight_varyings.offset_1 = smaa_info.rt_metrics.xyxy * vec4(-0.125, -0.25, -0.125, 1.25) + + varyings.tex_coord.xyxy; + + // And these for the searches, they indicate the ends of the loops: + weight_varyings.offset_2 = + smaa_info.rt_metrics.xxyy * vec4(-2.0, 2.0, -2.0, 2.0) * f32(SMAA_MAX_SEARCH_STEPS) + + vec4(weight_varyings.offset_0.xz, weight_varyings.offset_1.yw); + + return weight_varyings; +} + +#endif // SMAA_BLENDING_WEIGHT_CALCULATION + +#ifdef SMAA_NEIGHBORHOOD_BLENDING + +/** + * Neighborhood Blending Vertex Shader + */ +@vertex +fn neighborhood_blending_vertex_main(@builtin(vertex_index) vertex_index: u32) + -> NeighborhoodBlendingVaryings { + let varyings = calculate_vertex_varyings(vertex_index); + let offset = smaa_info.rt_metrics.xyxy * vec4(1.0, 0.0, 0.0, 1.0) + varyings.tex_coord.xyxy; + return NeighborhoodBlendingVaryings( + vec4(varyings.clip_coord, 0.0, 1.0), + offset, + varyings.tex_coord + ); +} + +#endif // SMAA_NEIGHBORHOOD_BLENDING + +//----------------------------------------------------------------------------- +// Edge Detection Pixel Shaders (First Pass) + +#ifdef SMAA_EDGE_DETECTION + +/** + * Luma Edge Detection + * + * IMPORTANT NOTICE: luma edge detection requires gamma-corrected colors, and + * thus 'color_texture' should be a non-sRGB texture. + */ +@fragment +fn luma_edge_detection_fragment_main(in: EdgeDetectionVaryings) -> @location(0) vec4 { + // Calculate the threshold: + // TODO: Predication. + let threshold = vec2(SMAA_THRESHOLD); + + // Calculate luma: + let weights = vec3(0.2126, 0.7152, 0.0722); + let L = dot(textureSample(color_texture, color_sampler, in.tex_coord).rgb, weights); + + let Lleft = dot(textureSample(color_texture, color_sampler, in.offset_0.xy).rgb, weights); + let Ltop = dot(textureSample(color_texture, color_sampler, in.offset_0.zw).rgb, weights); + + // We do the usual threshold: + var delta: vec4 = vec4(abs(L - vec2(Lleft, Ltop)), 0.0, 0.0); + var edges = step(threshold, delta.xy); + + // Then discard if there is no edge: + if (dot(edges, vec2(1.0)) == 0.0) { + discard; + } + + // Calculate right and bottom deltas: + let Lright = dot(textureSample(color_texture, color_sampler, in.offset_1.xy).rgb, weights); + let Lbottom = dot(textureSample(color_texture, color_sampler, in.offset_1.zw).rgb, weights); + delta = vec4(delta.xy, abs(L - vec2(Lright, Lbottom))); + + // Calculate the maximum delta in the direct neighborhood: + var max_delta = max(delta.xy, delta.zw); + + // Calculate left-left and top-top deltas: + let Lleftleft = dot(textureSample(color_texture, color_sampler, in.offset_2.xy).rgb, weights); + let Ltoptop = dot(textureSample(color_texture, color_sampler, in.offset_2.zw).rgb, weights); + delta = vec4(delta.xy, abs(vec2(Lleft, Ltop) - vec2(Lleftleft, Ltoptop))); + + // Calculate the final maximum delta: + max_delta = max(max_delta.xy, delta.zw); + let final_delta = max(max_delta.x, max_delta.y); + + // Local contrast adaptation: + edges *= step(vec2(final_delta), SMAA_LOCAL_CONTRAST_ADAPTATION_FACTOR * delta.xy); + + return vec4(edges, 0.0, 1.0); +} + +#endif // SMAA_EDGE_DETECTION + +#ifdef SMAA_BLENDING_WEIGHT_CALCULATION + +//----------------------------------------------------------------------------- +// Diagonal Search Functions + +#ifndef SMAA_DISABLE_DIAG_DETECTION + +/** + * Allows to decode two binary values from a bilinear-filtered access. + */ +fn decode_diag_bilinear_access_2(in_e: vec2) -> vec2 { + // Bilinear access for fetching 'e' have a 0.25 offset, and we are + // interested in the R and G edges: + // + // +---G---+-------+ + // | x o R x | + // +-------+-------+ + // + // Then, if one of these edge is enabled: + // Red: (0.75 * X + 0.25 * 1) => 0.25 or 1.0 + // Green: (0.75 * 1 + 0.25 * X) => 0.75 or 1.0 + // + // This function will unpack the values (mad + mul + round): + // wolframalpha.com: round(x * abs(5 * x - 5 * 0.75)) plot 0 to 1 + var e = in_e; + e.r = e.r * abs(5.0 * e.r - 5.0 * 0.75); + return round(e); +} + +fn decode_diag_bilinear_access_4(e: vec4) -> vec4 { + let e_rb = e.rb * abs(5.0 * e.rb - 5.0 * 0.75); + return round(vec4(e_rb.x, e.g, e_rb.y, e.a)); +} + +/** + * These functions allows to perform diagonal pattern searches. + */ +fn search_diag_1(tex_coord: vec2, dir: vec2, e: ptr>) -> vec2 { + var coord = vec4(tex_coord, -1.0, 1.0); + let t = vec3(smaa_info.rt_metrics.xy, 1.0); + while (coord.z < f32(SMAA_MAX_SEARCH_STEPS_DIAG - 1u) && coord.w > 0.9) { + coord = vec4(t * vec3(dir, 1.0) + coord.xyz, coord.w); + *e = textureSampleLevel(edges_texture, edges_sampler, coord.xy, 0.0).rg; + coord.w = dot(*e, vec2(0.5)); + } + return coord.zw; +} + +fn search_diag_2(tex_coord: vec2, dir: vec2, e: ptr>) -> vec2 { + var coord = vec4(tex_coord, -1.0, 1.0); + coord.x += 0.25 * smaa_info.rt_metrics.x; // See @SearchDiag2Optimization + let t = vec3(smaa_info.rt_metrics.xy, 1.0); + while (coord.z < f32(SMAA_MAX_SEARCH_STEPS_DIAG - 1u) && coord.w > 0.9) { + coord = vec4(t * vec3(dir, 1.0) + coord.xyz, coord.w); + + // @SearchDiag2Optimization + // Fetch both edges at once using bilinear filtering: + *e = textureSampleLevel(edges_texture, edges_sampler, coord.xy, 0.0).rg; + *e = decode_diag_bilinear_access_2(*e); + + // Non-optimized version: + // e.g = SMAASampleLevelZero(edgesTex, coord.xy).g; + // e.r = SMAASampleLevelZeroOffset(edgesTex, coord.xy, int2(1, 0)).r; + + coord.w = dot(*e, vec2(0.5)); + } + return coord.zw; +} + +/** + * Similar to SMAAArea, this calculates the area corresponding to a certain + * diagonal distance and crossing edges 'e'. + */ +fn area_diag(dist: vec2, e: vec2, offset: f32) -> vec2 { + var tex_coord = vec2(SMAA_AREATEX_MAX_DISTANCE_DIAG) * e + dist; + + // We do a scale and bias for mapping to texel space: + tex_coord = SMAA_AREATEX_PIXEL_SIZE * tex_coord + 0.5 * SMAA_AREATEX_PIXEL_SIZE; + + // Diagonal areas are on the second half of the texture: + tex_coord.x += 0.5; + + // Move to proper place, according to the subpixel offset: + tex_coord.y += SMAA_AREATEX_SUBTEX_SIZE * offset; + + // Do it! + return textureSampleLevel(area_texture, edges_sampler, tex_coord, 0.0).rg; +} + +/** + * This searches for diagonal patterns and returns the corresponding weights. + */ +fn calculate_diag_weights(tex_coord: vec2, e: vec2, subsample_indices: vec4) + -> vec2 { + var weights = vec2(0.0, 0.0); + + // Search for the line ends: + var d = vec4(0.0); + var end = vec2(0.0); + if (e.r > 0.0) { + let d_xz = search_diag_1(tex_coord, vec2(-1.0, 1.0), &end); + d = vec4(d_xz.x, d.y, d_xz.y, d.w); + d.x += f32(end.y > 0.9); + } else { + d = vec4(0.0, d.y, 0.0, d.w); + } + let d_yw = search_diag_1(tex_coord, vec2(1.0, -1.0), &end); + d = vec4(d.x, d_yw.x, d.y, d_yw.y); + + if (d.x + d.y > 2.0) { // d.x + d.y + 1 > 3 + // Fetch the crossing edges: + let coords = vec4(-d.x + 0.25, d.x, d.y, -d.y - 0.25) * smaa_info.rt_metrics.xyxy + + tex_coord.xyxy; + var c = vec4( + textureSampleLevel(edges_texture, edges_sampler, coords.xy, 0.0, vec2(-1, 0)).rg, + textureSampleLevel(edges_texture, edges_sampler, coords.zw, 0.0, vec2( 1, 0)).rg, + ); + let c_yxwz = decode_diag_bilinear_access_4(c.xyzw); + c = c_yxwz.yxwz; + + // Non-optimized version: + // float4 coords = mad(float4(-d.x, d.x, d.y, -d.y), SMAA_RT_METRICS.xyxy, texcoord.xyxy); + // float4 c; + // c.x = SMAASampleLevelZeroOffset(edgesTex, coords.xy, int2(-1, 0)).g; + // c.y = SMAASampleLevelZeroOffset(edgesTex, coords.xy, int2( 0, 0)).r; + // c.z = SMAASampleLevelZeroOffset(edgesTex, coords.zw, int2( 1, 0)).g; + // c.w = SMAASampleLevelZeroOffset(edgesTex, coords.zw, int2( 1, -1)).r; + + // Merge crossing edges at each side into a single value: + var cc = vec2(2.0) * c.xz + c.yw; + + // Remove the crossing edge if we didn't found the end of the line: + cc = select(cc, vec2(0.0, 0.0), vec2(step(vec2(0.9), d.zw))); + + // Fetch the areas for this line: + weights += area_diag(d.xy, cc, subsample_indices.z); + } + + // Search for the line ends: + let d_xz = search_diag_2(tex_coord, vec2(-1.0, -1.0), &end); + if (textureSampleLevel(edges_texture, edges_sampler, tex_coord, 0.0, vec2(1, 0)).r > 0.0) { + let d_yw = search_diag_2(tex_coord, vec2(1.0, 1.0), &end); + d = vec4(d.x, d_yw.x, d.z, d_yw.y); + d.y += f32(end.y > 0.9); + } else { + d = vec4(d.x, 0.0, d.z, 0.0); + } + + if (d.x + d.y > 2.0) { // d.x + d.y + 1 > 3 + // Fetch the crossing edges: + let coords = vec4(-d.x, -d.x, d.y, d.y) * smaa_info.rt_metrics.xyxy + tex_coord.xyxy; + let c = vec4( + textureSampleLevel(edges_texture, edges_sampler, coords.xy, 0.0, vec2(-1, 0)).g, + textureSampleLevel(edges_texture, edges_sampler, coords.xy, 0.0, vec2( 0, -1)).r, + textureSampleLevel(edges_texture, edges_sampler, coords.zw, 0.0, vec2( 1, 0)).gr, + ); + var cc = vec2(2.0) * c.xz + c.yw; + + // Remove the crossing edge if we didn't found the end of the line: + cc = select(cc, vec2(0.0, 0.0), vec2(step(vec2(0.9), d.zw))); + + // Fetch the areas for this line: + weights += area_diag(d.xy, cc, subsample_indices.w).gr; + } + + return weights; +} + +#endif // SMAA_DISABLE_DIAG_DETECTION + +//----------------------------------------------------------------------------- +// Horizontal/Vertical Search Functions + +/** + * This allows to determine how much length should we add in the last step + * of the searches. It takes the bilinearly interpolated edge (see + * @PSEUDO_GATHER4), and adds 0, 1 or 2, depending on which edges and + * crossing edges are active. + */ +fn search_length(e: vec2, offset: f32) -> f32 { + // The texture is flipped vertically, with left and right cases taking half + // of the space horizontally: + var scale = SMAA_SEARCHTEX_SIZE * vec2(0.5, -1.0); + var bias = SMAA_SEARCHTEX_SIZE * vec2(offset, 1.0); + + // Scale and bias to access texel centers: + scale += vec2(-1.0, 1.0); + bias += vec2( 0.5, -0.5); + + // Convert from pixel coordinates to texcoords: + // (We use SMAA_SEARCHTEX_PACKED_SIZE because the texture is cropped) + scale *= 1.0 / SMAA_SEARCHTEX_PACKED_SIZE; + bias *= 1.0 / SMAA_SEARCHTEX_PACKED_SIZE; + + // Lookup the search texture: + return textureSampleLevel(search_texture, edges_sampler, scale * e + bias, 0.0).r; +} + +/** + * Horizontal/vertical search functions for the 2nd pass. + */ +fn search_x_left(in_tex_coord: vec2, end: f32) -> f32 { + var tex_coord = in_tex_coord; + + /** + * @PSEUDO_GATHER4 + * This texcoord has been offset by (-0.25, -0.125) in the vertex shader to + * sample between edge, thus fetching four edges in a row. + * Sampling with different offsets in each direction allows to disambiguate + * which edges are active from the four fetched ones. + */ + var e = vec2(0.0, 1.0); + while (tex_coord.x > end && + e.g > 0.8281 && // Is there some edge not activated? + e.r == 0.0) { // Or is there a crossing edge that breaks the line? + e = textureSampleLevel(edges_texture, edges_sampler, tex_coord, 0.0).rg; + tex_coord += -vec2(2.0, 0.0) * smaa_info.rt_metrics.xy; + } + let offset = -(255.0 / 127.0) * search_length(e, 0.0) + 3.25; + return smaa_info.rt_metrics.x * offset + tex_coord.x; +} + +fn search_x_right(in_tex_coord: vec2, end: f32) -> f32 { + var tex_coord = in_tex_coord; + + var e = vec2(0.0, 1.0); + while (tex_coord.x < end && + e.g > 0.8281 && // Is there some edge not activated? + e.r == 0.0) { // Or is there a crossing edge that breaks the line? + e = textureSampleLevel(edges_texture, edges_sampler, tex_coord, 0.0).rg; + tex_coord += vec2(2.0, 0.0) * smaa_info.rt_metrics.xy; + } + let offset = -(255.0 / 127.0) * search_length(e, 0.5) + 3.25; + return -smaa_info.rt_metrics.x * offset + tex_coord.x; +} + +fn search_y_up(in_tex_coord: vec2, end: f32) -> f32 { + var tex_coord = in_tex_coord; + + var e = vec2(1.0, 0.0); + while (tex_coord.y > end && + e.r > 0.8281 && // Is there some edge not activated? + e.g == 0.0) { // Or is there a crossing edge that breaks the line? + e = textureSampleLevel(edges_texture, edges_sampler, tex_coord, 0.0).rg; + tex_coord += -vec2(0.0, 2.0) * smaa_info.rt_metrics.xy; + } + let offset = -(255.0 / 127.0) * search_length(e.gr, 0.0) + 3.25; + return smaa_info.rt_metrics.y * offset + tex_coord.y; +} + +fn search_y_down(in_tex_coord: vec2, end: f32) -> f32 { + var tex_coord = in_tex_coord; + + var e = vec2(1.0, 0.0); + while (tex_coord.y < end && + e.r > 0.8281 && // Is there some edge not activated? + e.g == 0.0) { // Or is there a crossing edge that breaks the line? + e = textureSampleLevel(edges_texture, edges_sampler, tex_coord, 0.0).rg; + tex_coord += vec2(0.0, 2.0) * smaa_info.rt_metrics.xy; + } + let offset = -(255.0 / 127.0) * search_length(e.gr, 0.5) + 3.25; + return -smaa_info.rt_metrics.y * offset + tex_coord.y; +} + +/** + * Ok, we have the distance and both crossing edges. So, what are the areas + * at each side of current edge? + */ +fn area(dist: vec2, e1: f32, e2: f32, offset: f32) -> vec2 { + // Rounding prevents precision errors of bilinear filtering: + var tex_coord = SMAA_AREATEX_MAX_DISTANCE * round(4.0 * vec2(e1, e2)) + dist; + + // We do a scale and bias for mapping to texel space: + tex_coord = SMAA_AREATEX_PIXEL_SIZE * tex_coord + 0.5 * SMAA_AREATEX_PIXEL_SIZE; + + // Move to proper place, according to the subpixel offset: + tex_coord.y += SMAA_AREATEX_SUBTEX_SIZE * offset; + + // Do it! + return textureSample(area_texture, edges_sampler, tex_coord).rg; +} + +//----------------------------------------------------------------------------- +// Corner Detection Functions + +fn detect_horizontal_corner_pattern(weights: vec2, tex_coord: vec4, d: vec2) + -> vec2 { +#ifndef SMAA_DISABLE_CORNER_DETECTION + let left_right = step(d.xy, d.yx); + var rounding = (1.0 - SMAA_CORNER_ROUNDING_NORM) * left_right; + + rounding /= left_right.x + left_right.y; // Reduce blending for pixels in the center of a line. + + var factor = vec2(1.0, 1.0); + factor.x -= rounding.x * + textureSampleLevel(edges_texture, edges_sampler, tex_coord.xy, 0.0, vec2(0, 1)).r; + factor.x -= rounding.y * + textureSampleLevel(edges_texture, edges_sampler, tex_coord.zw, 0.0, vec2(1, 1)).r; + factor.y -= rounding.x * + textureSampleLevel(edges_texture, edges_sampler, tex_coord.xy, 0.0, vec2(0, -2)).r; + factor.y -= rounding.y * + textureSampleLevel(edges_texture, edges_sampler, tex_coord.zw, 0.0, vec2(1, -2)).r; + + return weights * saturate(factor); +#else // SMAA_DISABLE_CORNER_DETECTION + return weights; +#endif // SMAA_DISABLE_CORNER_DETECTION +} + +fn detect_vertical_corner_pattern(weights: vec2, tex_coord: vec4, d: vec2) + -> vec2 { +#ifndef SMAA_DISABLE_CORNER_DETECTION + let left_right = step(d.xy, d.yx); + var rounding = (1.0 - SMAA_CORNER_ROUNDING_NORM) * left_right; + + rounding /= left_right.x + left_right.y; + + var factor = vec2(1.0, 1.0); + factor.x -= rounding.x * + textureSampleLevel(edges_texture, edges_sampler, tex_coord.xy, 0.0, vec2( 1, 0)).g; + factor.x -= rounding.y * + textureSampleLevel(edges_texture, edges_sampler, tex_coord.zw, 0.0, vec2( 1, 1)).g; + factor.y -= rounding.x * + textureSampleLevel(edges_texture, edges_sampler, tex_coord.xy, 0.0, vec2(-2, 0)).g; + factor.y -= rounding.y * + textureSampleLevel(edges_texture, edges_sampler, tex_coord.zw, 0.0, vec2(-2, 1)).g; + + return weights * saturate(factor); +#else // SMAA_DISABLE_CORNER_DETECTION + return weights; +#endif // SMAA_DISABLE_CORNER_DETECTION +} + +//----------------------------------------------------------------------------- +// Blending Weight Calculation Pixel Shader (Second Pass) + +@fragment +fn blending_weight_calculation_fragment_main(in: BlendingWeightCalculationVaryings) + -> @location(0) vec4 { + let subsample_indices = vec4(0.0); // Just pass zero for SMAA 1x, see @SUBSAMPLE_INDICES. + + var weights = vec4(0.0); + + var e = textureSample(edges_texture, edges_sampler, in.tex_coord).rg; + + if (e.g > 0.0) { // Edge at north +#ifndef SMAA_DISABLE_DIAG_DETECTION + // Diagonals have both north and west edges, so searching for them in + // one of the boundaries is enough. + weights = vec4(calculate_diag_weights(in.tex_coord, e, subsample_indices), weights.ba); + + // We give priority to diagonals, so if we find a diagonal we skip + // horizontal/vertical processing. + if (weights.r + weights.g != 0.0) { + return weights; + } +#endif // SMAA_DISABLE_DIAG_DETECTION + + var d: vec2; + + // Find the distance to the left: + var coords: vec3; + coords.x = search_x_left(in.offset_0.xy, in.offset_2.x); + // in.offset_1.y = in.tex_coord.y - 0.25 * smaa_info.rt_metrics.y (@CROSSING_OFFSET) + coords.y = in.offset_1.y; + d.x = coords.x; + + // Now fetch the left crossing edges, two at a time using bilinear + // filtering. Sampling at -0.25 (see @CROSSING_OFFSET) enables to + // discern what value each edge has: + let e1 = textureSampleLevel(edges_texture, edges_sampler, coords.xy, 0.0).r; + + // Find the distance to the right: + coords.z = search_x_right(in.offset_0.zw, in.offset_2.y); + d.y = coords.z; + + // We want the distances to be in pixel units (doing this here allow to + // better interleave arithmetic and memory accesses): + d = abs(round(smaa_info.rt_metrics.zz * d - in.position.xx)); + + // SMAAArea below needs a sqrt, as the areas texture is compressed + // quadratically: + let sqrt_d = sqrt(d); + + // Fetch the right crossing edges: + let e2 = textureSampleLevel( + edges_texture, edges_sampler, coords.zy, 0.0, vec2(1, 0)).r; + + // Ok, we know how this pattern looks like, now it is time for getting + // the actual area: + weights = vec4(area(sqrt_d, e1, e2, subsample_indices.y), weights.ba); + + // Fix corners: + coords.y = in.tex_coord.y; + weights = vec4( + detect_horizontal_corner_pattern(weights.rg, coords.xyzy, d), + weights.ba + ); + } + + if (e.r > 0.0) { // Edge at west + var d: vec2; + + // Find the distance to the top: + var coords: vec3; + coords.y = search_y_up(in.offset_1.xy, in.offset_2.z); + // in.offset_1.x = in.tex_coord.x - 0.25 * smaa_info.rt_metrics.x + coords.x = in.offset_0.x; + d.x = coords.y; + + // Fetch the top crossing edges: + let e1 = textureSampleLevel(edges_texture, edges_sampler, coords.xy, 0.0).g; + + // Find the distance to the bottom: + coords.z = search_y_down(in.offset_1.zw, in.offset_2.w); + d.y = coords.z; + + // We want the distances to be in pixel units: + d = abs(round(smaa_info.rt_metrics.ww * d - in.position.yy)); + + // SMAAArea below needs a sqrt, as the areas texture is compressed + // quadratically: + let sqrt_d = sqrt(d); + + // Fetch the bottom crossing edges: + let e2 = textureSampleLevel( + edges_texture, edges_sampler, coords.xz, 0.0, vec2(0, 1)).g; + + // Get the area for this direction: + weights = vec4(weights.rg, area(sqrt_d, e1, e2, subsample_indices.x)); + + // Fix corners: + coords.x = in.tex_coord.x; + weights = vec4(weights.rg, detect_vertical_corner_pattern(weights.ba, coords.xyxz, d)); + } + + return weights; +} + +#endif // SMAA_BLENDING_WEIGHT_CALCULATION + +#ifdef SMAA_NEIGHBORHOOD_BLENDING + +//----------------------------------------------------------------------------- +// Neighborhood Blending Pixel Shader (Third Pass) + +@fragment +fn neighborhood_blending_fragment_main(in: NeighborhoodBlendingVaryings) -> @location(0) vec4 { + // Fetch the blending weights for current pixel: + let a = vec4( + textureSample(blend_texture, blend_sampler, in.offset.xy).a, // Right + textureSample(blend_texture, blend_sampler, in.offset.zw).g, // Top + textureSample(blend_texture, blend_sampler, in.tex_coord).zx, // Bottom / Left + ); + + // Is there any blending weight with a value greater than 0.0? + if (dot(a, vec4(1.0)) < 1.0e-5) { + let color = textureSampleLevel(color_texture, blend_sampler, in.tex_coord, 0.0); + // TODO: Reprojection + return color; + } else { + let h = max(a.x, a.z) > max(a.y, a.w); // max(horizontal) > max(vertical) + + // Calculate the blending offsets: + var blending_offset = vec4(0.0, a.y, 0.0, a.w); + var blending_weight = a.yw; + blending_offset = select(blending_offset, vec4(a.x, 0.0, a.z, 0.0), h); + blending_weight = select(blending_weight, a.xz, h); + blending_weight /= dot(blending_weight, vec2(1.0)); + + // Calculate the texture coordinates: + let blending_coord = + blending_offset * vec4(smaa_info.rt_metrics.xy, -smaa_info.rt_metrics.xy) + + in.tex_coord.xyxy; + + // We exploit bilinear filtering to mix current pixel with the chosen + // neighbor: + var color = blending_weight.x * + textureSampleLevel(color_texture, blend_sampler, blending_coord.xy, 0.0); + color += blending_weight.y * + textureSampleLevel(color_texture, blend_sampler, blending_coord.zw, 0.0); + + // TODO: Reprojection + + return color; + } +} + +#endif // SMAA_NEIGHBORHOOD_BLENDING + +``` + +### bevy/crates/bevy_core_pipeline/src/auto_exposure/auto_exposure + +```rust +// Auto exposure +// +// This shader computes an auto exposure value for the current frame, +// which is then used as an exposure correction in the tone mapping shader. +// +// The auto exposure value is computed in two passes: +// * The compute_histogram pass calculates a histogram of the luminance values in the scene, +// taking into account the metering mask texture. The metering mask is a grayscale texture +// that defines the areas of the screen that should be given more weight when calculating +// the average luminance value. For example, the middle area of the screen might be more important +// than the edges. +// * The compute_average pass calculates the average luminance value of the scene, taking +// into account the low_percent and high_percent settings. These settings define the +// percentage of the histogram that should be excluded when calculating the average. This +// is useful to avoid overexposure when you have a lot of shadows, or underexposure when you +// have a lot of bright specular reflections. +// +// The final target_exposure is finally used to smoothly adjust the exposure value over time. + +#import bevy_render::view::View +#import bevy_render::globals::Globals + +// Constant to convert RGB to luminance, taken from Real Time Rendering, Vol 4 pg. 278, 4th edition +const RGB_TO_LUM = vec3(0.2125, 0.7154, 0.0721); + +struct AutoExposure { + min_log_lum: f32, + inv_log_lum_range: f32, + log_lum_range: f32, + low_percent: f32, + high_percent: f32, + speed_up: f32, + speed_down: f32, + exponential_transition_distance: f32, +} + +struct CompensationCurve { + min_log_lum: f32, + inv_log_lum_range: f32, + min_compensation: f32, + compensation_range: f32, +} + +@group(0) @binding(0) var globals: Globals; + +@group(0) @binding(1) var settings: AutoExposure; + +@group(0) @binding(2) var tex_color: texture_2d; + +@group(0) @binding(3) var tex_mask: texture_2d; + +@group(0) @binding(4) var tex_compensation: texture_1d; + +@group(0) @binding(5) var compensation_curve: CompensationCurve; + +@group(0) @binding(6) var histogram: array, 64>; + +@group(0) @binding(7) var exposure: f32; + +@group(0) @binding(8) var view: View; + +var histogram_shared: array, 64>; + +// For a given color, return the histogram bin index +fn color_to_bin(hdr: vec3) -> u32 { + // Convert color to luminance + let lum = dot(hdr, RGB_TO_LUM); + + if lum < exp2(settings.min_log_lum) { + return 0u; + } + + // Calculate the log_2 luminance and express it as a value in [0.0, 1.0] + // where 0.0 represents the minimum luminance, and 1.0 represents the max. + let log_lum = saturate((log2(lum) - settings.min_log_lum) * settings.inv_log_lum_range); + + // Map [0, 1] to [1, 63]. The zeroth bin is handled by the epsilon check above. + return u32(log_lum * 62.0 + 1.0); +} + +// Read the metering mask at the given UV coordinates, returning a weight for the histogram. +// +// Since the histogram is summed in the compute_average step, there is a limit to the amount of +// distinct values that can be represented. When using the chosen value of 16, the maximum +// amount of pixels that can be weighted and summed is 2^32 / 16 = 16384^2. +fn metering_weight(coords: vec2) -> u32 { + let pos = vec2(coords * vec2(textureDimensions(tex_mask))); + let mask = textureLoad(tex_mask, pos, 0).r; + return u32(mask * 16.0); +} + +@compute @workgroup_size(16, 16, 1) +fn compute_histogram( + @builtin(global_invocation_id) global_invocation_id: vec3, + @builtin(local_invocation_index) local_invocation_index: u32 +) { + // Clear the workgroup shared histogram + if local_invocation_index < 64 { + histogram_shared[local_invocation_index] = 0u; + } + + // Wait for all workgroup threads to clear the shared histogram + workgroupBarrier(); + + let dim = vec2(textureDimensions(tex_color)); + let uv = vec2(global_invocation_id.xy) / vec2(dim); + + if global_invocation_id.x < dim.x && global_invocation_id.y < dim.y { + let col = textureLoad(tex_color, vec2(global_invocation_id.xy), 0).rgb; + let index = color_to_bin(col); + let weight = metering_weight(uv); + + // Increment the shared histogram bin by the weight obtained from the metering mask + atomicAdd(&histogram_shared[index], weight); + } + + // Wait for all workgroup threads to finish updating the workgroup histogram + workgroupBarrier(); + + // Accumulate the workgroup histogram into the global histogram. + // Note that the global histogram was not cleared at the beginning, + // as it will be cleared in compute_average. + atomicAdd(&histogram[local_invocation_index], histogram_shared[local_invocation_index]); +} + +@compute @workgroup_size(1, 1, 1) +fn compute_average(@builtin(local_invocation_index) local_index: u32) { + var histogram_sum = 0u; + + // Calculate the cumulative histogram and clear the histogram bins. + // Each bin in the cumulative histogram contains the sum of all bins up to that point. + // This way we can quickly exclude the portion of lowest and highest samples as required by + // the low_percent and high_percent settings. + for (var i=0u; i<64u; i+=1u) { + histogram_sum += histogram[i]; + histogram_shared[i] = histogram_sum; + + // Clear the histogram bin for the next frame + histogram[i] = 0u; + } + + let first_index = u32(f32(histogram_sum) * settings.low_percent); + let last_index = u32(f32(histogram_sum) * settings.high_percent); + + var count = 0u; + var sum = 0.0; + for (var i=1u; i<64u; i+=1u) { + // The number of pixels in the bin. The histogram values are clamped to + // first_index and last_index to exclude the lowest and highest samples. + let bin_count = + clamp(histogram_shared[i], first_index, last_index) - + clamp(histogram_shared[i - 1u], first_index, last_index); + + sum += f32(bin_count) * f32(i); + count += bin_count; + } + + var avg_lum = settings.min_log_lum; + + if count > 0u { + // The average luminance of the included histogram samples. + avg_lum = sum / (f32(count) * 63.0) + * settings.log_lum_range + + settings.min_log_lum; + } + + // The position in the compensation curve texture to sample for avg_lum. + let u = (avg_lum - compensation_curve.min_log_lum) * compensation_curve.inv_log_lum_range; + + // The target exposure is the negative of the average log luminance. + // The compensation value is added to the target exposure to adjust the exposure for + // artistic purposes. + let target_exposure = textureLoad(tex_compensation, i32(saturate(u) * 255.0), 0).r + * compensation_curve.compensation_range + + compensation_curve.min_compensation + - avg_lum; + + // Smoothly adjust the `exposure` towards the `target_exposure` + let delta = target_exposure - exposure; + if target_exposure > exposure { + let speed_down = settings.speed_down * globals.delta_time; + let exp_down = speed_down / settings.exponential_transition_distance; + exposure = exposure + min(speed_down, delta * exp_down); + } else { + let speed_up = settings.speed_up * globals.delta_time; + let exp_up = speed_up / settings.exponential_transition_distance; + exposure = exposure + max(-speed_up, delta * exp_up); + } + + // Apply the exposure to the color grading settings, from where it will be used for the color + // grading pass. + view.color_grading.exposure += exposure; +} + +``` + +### bevy/crates/bevy_core_pipeline/src/skybox/skybox_prepass + +```rust +#import bevy_render::view::View +#import bevy_core_pipeline::fullscreen_vertex_shader::FullscreenVertexOutput +#import bevy_pbr::view_transformations::uv_to_ndc + +struct PreviousViewUniforms { + view_from_world: mat4x4, + clip_from_world: mat4x4, +} + +@group(0) @binding(0) var view: View; +@group(0) @binding(1) var previous_view: PreviousViewUniforms; + +@fragment +fn fragment(in: FullscreenVertexOutput) -> @location(1) vec4 { + let clip_pos = uv_to_ndc(in.uv); // Convert from uv to clip space + let world_pos = view.world_from_clip * vec4(clip_pos, 0.0, 1.0); + let prev_clip_pos = (previous_view.clip_from_world * world_pos).xy; + let velocity = (clip_pos - prev_clip_pos) * vec2(0.5, -0.5); // Copied from mesh motion vectors + + return vec4(velocity.x, velocity.y, 0.0, 1.0); +} + +``` + +### bevy/crates/bevy_core_pipeline/src/skybox/skybox + +```rust +#import bevy_render::view::View +#import bevy_pbr::utils::coords_to_viewport_uv + +struct SkyboxUniforms { + brightness: f32, +#ifdef SIXTEEN_BYTE_ALIGNMENT + _wasm_padding_8b: u32, + _wasm_padding_12b: u32, + _wasm_padding_16b: u32, +#endif +} + +@group(0) @binding(0) var skybox: texture_cube; +@group(0) @binding(1) var skybox_sampler: sampler; +@group(0) @binding(2) var view: View; +@group(0) @binding(3) var uniforms: SkyboxUniforms; + +fn coords_to_ray_direction(position: vec2, viewport: vec4) -> vec3 { + // Using world positions of the fragment and camera to calculate a ray direction + // breaks down at large translations. This code only needs to know the ray direction. + // The ray direction is along the direction from the camera to the fragment position. + // In view space, the camera is at the origin, so the view space ray direction is + // along the direction of the fragment position - (0,0,0) which is just the + // fragment position. + // Use the position on the near clipping plane to avoid -inf world position + // because the far plane of an infinite reverse projection is at infinity. + let view_position_homogeneous = view.view_from_clip * vec4( + coords_to_viewport_uv(position, viewport) * vec2(2.0, -2.0) + vec2(-1.0, 1.0), + 1.0, + 1.0, + ); + let view_ray_direction = view_position_homogeneous.xyz / view_position_homogeneous.w; + // Transforming the view space ray direction by the view matrix, transforms the + // direction to world space. Note that the w element is set to 0.0, as this is a + // vector direction, not a position, That causes the matrix multiplication to ignore + // the translations from the view matrix. + let ray_direction = (view.world_from_view * vec4(view_ray_direction, 0.0)).xyz; + + return normalize(ray_direction); +} + +struct VertexOutput { + @builtin(position) position: vec4, +}; + +// 3 | 2. +// 2 | : `. +// 1 | x-----x. +// 0 | | s | `. +// -1 | 0-----x.....1 +// +--------------- +// -1 0 1 2 3 +// +// The axes are clip-space x and y. The region marked s is the visible region. +// The digits in the corners of the right-angled triangle are the vertex +// indices. +@vertex +fn skybox_vertex(@builtin(vertex_index) vertex_index: u32) -> VertexOutput { + // See the explanation above for how this works. + let clip_position = vec4( + f32(vertex_index & 1u), + f32((vertex_index >> 1u) & 1u), + 0.25, + 0.5 + ) * 4.0 - vec4(1.0); + + return VertexOutput(clip_position); +} + +@fragment +fn skybox_fragment(in: VertexOutput) -> @location(0) vec4 { + let ray_direction = coords_to_ray_direction(in.position.xy, view.viewport); + + // Cube maps are left-handed so we negate the z coordinate. + let out = textureSample(skybox, skybox_sampler, ray_direction * vec3(1.0, 1.0, -1.0)); + return vec4(out.rgb * uniforms.brightness, out.a); +} + +``` + +### bevy/crates/bevy_core_pipeline/src/taa/taa + +```rust +// References: +// https://www.elopezr.com/temporal-aa-and-the-quest-for-the-holy-trail +// http://behindthepixels.io/assets/files/TemporalAA.pdf +// http://leiy.cc/publications/TAA/TAA_EG2020_Talk.pdf +// https://advances.realtimerendering.com/s2014/index.html#_HIGH-QUALITY_TEMPORAL_SUPERSAMPLING + +// Controls how much to blend between the current and past samples +// Lower numbers = less of the current sample and more of the past sample = more smoothing +// Values chosen empirically +const DEFAULT_HISTORY_BLEND_RATE: f32 = 0.1; // Default blend rate to use when no confidence in history +const MIN_HISTORY_BLEND_RATE: f32 = 0.015; // Minimum blend rate allowed, to ensure at least some of the current sample is used + +@group(0) @binding(0) var view_target: texture_2d; +@group(0) @binding(1) var history: texture_2d; +@group(0) @binding(2) var motion_vectors: texture_2d; +@group(0) @binding(3) var depth: texture_depth_2d; +@group(0) @binding(4) var nearest_sampler: sampler; +@group(0) @binding(5) var linear_sampler: sampler; + +struct Output { + @location(0) view_target: vec4, + @location(1) history: vec4, +}; + +// TAA is ideally applied after tonemapping, but before post processing +// Post processing wants to go before tonemapping, which conflicts +// Solution: Put TAA before tonemapping, tonemap TAA input, apply TAA, invert-tonemap TAA output +// https://advances.realtimerendering.com/s2014/index.html#_HIGH-QUALITY_TEMPORAL_SUPERSAMPLING, slide 20 +// https://gpuopen.com/learn/optimized-reversible-tonemapper-for-resolve +fn rcp(x: f32) -> f32 { return 1.0 / x; } +fn max3(x: vec3) -> f32 { return max(x.r, max(x.g, x.b)); } +fn tonemap(color: vec3) -> vec3 { return color * rcp(max3(color) + 1.0); } +fn reverse_tonemap(color: vec3) -> vec3 { return color * rcp(1.0 - max3(color)); } + +// The following 3 functions are from Playdead (MIT-licensed) +// https://github.com/playdeadgames/temporal/blob/master/Assets/Shaders/TemporalReprojection.shader +fn RGB_to_YCoCg(rgb: vec3) -> vec3 { + let y = (rgb.r / 4.0) + (rgb.g / 2.0) + (rgb.b / 4.0); + let co = (rgb.r / 2.0) - (rgb.b / 2.0); + let cg = (-rgb.r / 4.0) + (rgb.g / 2.0) - (rgb.b / 4.0); + return vec3(y, co, cg); +} + +fn YCoCg_to_RGB(ycocg: vec3) -> vec3 { + let r = ycocg.x + ycocg.y - ycocg.z; + let g = ycocg.x + ycocg.z; + let b = ycocg.x - ycocg.y - ycocg.z; + return saturate(vec3(r, g, b)); +} + +fn clip_towards_aabb_center(history_color: vec3, current_color: vec3, aabb_min: vec3, aabb_max: vec3) -> vec3 { + let p_clip = 0.5 * (aabb_max + aabb_min); + let e_clip = 0.5 * (aabb_max - aabb_min) + 0.00000001; + let v_clip = history_color - p_clip; + let v_unit = v_clip / e_clip; + let a_unit = abs(v_unit); + let ma_unit = max3(a_unit); + if ma_unit > 1.0 { + return p_clip + (v_clip / ma_unit); + } else { + return history_color; + } +} + +fn sample_history(u: f32, v: f32) -> vec3 { + return textureSample(history, linear_sampler, vec2(u, v)).rgb; +} + +fn sample_view_target(uv: vec2) -> vec3 { + var sample = textureSample(view_target, nearest_sampler, uv).rgb; +#ifdef TONEMAP + sample = tonemap(sample); +#endif + return RGB_to_YCoCg(sample); +} + +@fragment +fn taa(@location(0) uv: vec2) -> Output { + let texture_size = vec2(textureDimensions(view_target)); + let texel_size = 1.0 / texture_size; + + // Fetch the current sample + let original_color = textureSample(view_target, nearest_sampler, uv); + var current_color = original_color.rgb; +#ifdef TONEMAP + current_color = tonemap(current_color); +#endif + +#ifndef RESET + // Pick the closest motion_vector from 5 samples (reduces aliasing on the edges of moving entities) + // https://advances.realtimerendering.com/s2014/index.html#_HIGH-QUALITY_TEMPORAL_SUPERSAMPLING, slide 27 + let offset = texel_size * 2.0; + let d_uv_tl = uv + vec2(-offset.x, offset.y); + let d_uv_tr = uv + vec2(offset.x, offset.y); + let d_uv_bl = uv + vec2(-offset.x, -offset.y); + let d_uv_br = uv + vec2(offset.x, -offset.y); + var closest_uv = uv; + let d_tl = textureSample(depth, nearest_sampler, d_uv_tl); + let d_tr = textureSample(depth, nearest_sampler, d_uv_tr); + var closest_depth = textureSample(depth, nearest_sampler, uv); + let d_bl = textureSample(depth, nearest_sampler, d_uv_bl); + let d_br = textureSample(depth, nearest_sampler, d_uv_br); + if d_tl > closest_depth { + closest_uv = d_uv_tl; + closest_depth = d_tl; + } + if d_tr > closest_depth { + closest_uv = d_uv_tr; + closest_depth = d_tr; + } + if d_bl > closest_depth { + closest_uv = d_uv_bl; + closest_depth = d_bl; + } + if d_br > closest_depth { + closest_uv = d_uv_br; + } + let closest_motion_vector = textureSample(motion_vectors, nearest_sampler, closest_uv).rg; + + // Reproject to find the equivalent sample from the past + // Uses 5-sample Catmull-Rom filtering (reduces blurriness) + // Catmull-Rom filtering: https://gist.github.com/TheRealMJP/c83b8c0f46b63f3a88a5986f4fa982b1 + // Ignoring corners: https://www.activision.com/cdn/research/Dynamic_Temporal_Antialiasing_and_Upsampling_in_Call_of_Duty_v4.pdf#page=68 + // Technically we should renormalize the weights since we're skipping the corners, but it's basically the same result + let history_uv = uv - closest_motion_vector; + let sample_position = history_uv * texture_size; + let texel_center = floor(sample_position - 0.5) + 0.5; + let f = sample_position - texel_center; + let w0 = f * (-0.5 + f * (1.0 - 0.5 * f)); + let w1 = 1.0 + f * f * (-2.5 + 1.5 * f); + let w2 = f * (0.5 + f * (2.0 - 1.5 * f)); + let w3 = f * f * (-0.5 + 0.5 * f); + let w12 = w1 + w2; + let texel_position_0 = (texel_center - 1.0) * texel_size; + let texel_position_3 = (texel_center + 2.0) * texel_size; + let texel_position_12 = (texel_center + (w2 / w12)) * texel_size; + var history_color = sample_history(texel_position_12.x, texel_position_0.y) * w12.x * w0.y; + history_color += sample_history(texel_position_0.x, texel_position_12.y) * w0.x * w12.y; + history_color += sample_history(texel_position_12.x, texel_position_12.y) * w12.x * w12.y; + history_color += sample_history(texel_position_3.x, texel_position_12.y) * w3.x * w12.y; + history_color += sample_history(texel_position_12.x, texel_position_3.y) * w12.x * w3.y; + + // Constrain past sample with 3x3 YCoCg variance clipping (reduces ghosting) + // YCoCg: https://advances.realtimerendering.com/s2014/index.html#_HIGH-QUALITY_TEMPORAL_SUPERSAMPLING, slide 33 + // Variance clipping: https://developer.download.nvidia.com/gameworks/events/GDC2016/msalvi_temporal_supersampling.pdf + let s_tl = sample_view_target(uv + vec2(-texel_size.x, texel_size.y)); + let s_tm = sample_view_target(uv + vec2( 0.0, texel_size.y)); + let s_tr = sample_view_target(uv + vec2( texel_size.x, texel_size.y)); + let s_ml = sample_view_target(uv + vec2(-texel_size.x, 0.0)); + let s_mm = RGB_to_YCoCg(current_color); + let s_mr = sample_view_target(uv + vec2( texel_size.x, 0.0)); + let s_bl = sample_view_target(uv + vec2(-texel_size.x, -texel_size.y)); + let s_bm = sample_view_target(uv + vec2( 0.0, -texel_size.y)); + let s_br = sample_view_target(uv + vec2( texel_size.x, -texel_size.y)); + let moment_1 = s_tl + s_tm + s_tr + s_ml + s_mm + s_mr + s_bl + s_bm + s_br; + let moment_2 = (s_tl * s_tl) + (s_tm * s_tm) + (s_tr * s_tr) + (s_ml * s_ml) + (s_mm * s_mm) + (s_mr * s_mr) + (s_bl * s_bl) + (s_bm * s_bm) + (s_br * s_br); + let mean = moment_1 / 9.0; + let variance = (moment_2 / 9.0) - (mean * mean); + let std_deviation = sqrt(max(variance, vec3(0.0))); + history_color = RGB_to_YCoCg(history_color); + history_color = clip_towards_aabb_center(history_color, s_mm, mean - std_deviation, mean + std_deviation); + history_color = YCoCg_to_RGB(history_color); + + // How confident we are that the history is representative of the current frame + var history_confidence = textureSample(history, nearest_sampler, uv).a; + let pixel_motion_vector = abs(closest_motion_vector) * texture_size; + if pixel_motion_vector.x < 0.01 && pixel_motion_vector.y < 0.01 { + // Increment when pixels are not moving + history_confidence += 10.0; + } else { + // Else reset + history_confidence = 1.0; + } + + // Blend current and past sample + // Use more of the history if we're confident in it (reduces noise when there is no motion) + // https://hhoppe.com/supersample.pdf, section 4.1 + var current_color_factor = clamp(1.0 / history_confidence, MIN_HISTORY_BLEND_RATE, DEFAULT_HISTORY_BLEND_RATE); + + // Reject history when motion vectors point off screen + if any(saturate(history_uv) != history_uv) { + current_color_factor = 1.0; + history_confidence = 1.0; + } + + current_color = mix(history_color, current_color, current_color_factor); +#endif // #ifndef RESET + + + // Write output to history and view target + var out: Output; +#ifdef RESET + let history_confidence = 1.0 / MIN_HISTORY_BLEND_RATE; +#endif + out.history = vec4(current_color, history_confidence); +#ifdef TONEMAP + current_color = reverse_tonemap(current_color); +#endif + out.view_target = vec4(current_color, original_color.a); + return out; +} + +``` + +### bevy/crates/bevy_core_pipeline/src/contrast_adaptive_sharpening/robust_contrast_adaptive_sharpening + +```rust +// Copyright (c) 2022 Advanced Micro Devices, Inc. All rights reserved. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +#import bevy_core_pipeline::fullscreen_vertex_shader::FullscreenVertexOutput + +struct CASUniforms { + sharpness: f32, +}; + +@group(0) @binding(0) var screenTexture: texture_2d; +@group(0) @binding(1) var samp: sampler; +@group(0) @binding(2) var uniforms: CASUniforms; + +// This is set at the limit of providing unnatural results for sharpening. +const FSR_RCAS_LIMIT = 0.1875; +// -4.0 instead of -1.0 to avoid issues with MSAA. +const peakC = vec2(10.0, -40.0); + +// Robust Contrast Adaptive Sharpening (RCAS) +// Based on the following implementation: +// https://github.com/GPUOpen-Effects/FidelityFX-FSR2/blob/ea97a113b0f9cadf519fbcff315cc539915a3acd/src/ffx-fsr2-api/shaders/ffx_fsr1.h#L672 +// RCAS is based on the following logic. +// RCAS uses a 5 tap filter in a cross pattern (same as CAS), +// W b +// W 1 W for taps d e f +// W h +// Where 'W' is the negative lobe weight. +// output = (W*(b+d+f+h)+e)/(4*W+1) +// RCAS solves for 'W' by seeing where the signal might clip out of the {0 to 1} input range, +// 0 == (W*(b+d+f+h)+e)/(4*W+1) -> W = -e/(b+d+f+h) +// 1 == (W*(b+d+f+h)+e)/(4*W+1) -> W = (1-e)/(b+d+f+h-4) +// Then chooses the 'W' which results in no clipping, limits 'W', and multiplies by the 'sharp' amount. +// This solution above has issues with MSAA input as the steps along the gradient cause edge detection issues. +// So RCAS uses 4x the maximum and 4x the minimum (depending on equation)in place of the individual taps. +// As well as switching from 'e' to either the minimum or maximum (depending on side), to help in energy conservation. +// This stabilizes RCAS. +// RCAS does a simple highpass which is normalized against the local contrast then shaped, +// 0.25 +// 0.25 -1 0.25 +// 0.25 +// This is used as a noise detection filter, to reduce the effect of RCAS on grain, and focus on real edges. +// The CAS node runs after tonemapping, so the input will be in the range of 0 to 1. +@fragment +fn fragment(in: FullscreenVertexOutput) -> @location(0) vec4 { + // Algorithm uses minimal 3x3 pixel neighborhood. + // b + // d e f + // h + let b = textureSample(screenTexture, samp, in.uv, vec2(0, -1)).rgb; + let d = textureSample(screenTexture, samp, in.uv, vec2(-1, 0)).rgb; + // We need the alpha value of the pixel we're working on for the output + let e = textureSample(screenTexture, samp, in.uv).rgbw; + let f = textureSample(screenTexture, samp, in.uv, vec2(1, 0)).rgb; + let h = textureSample(screenTexture, samp, in.uv, vec2(0, 1)).rgb; + // Min and max of ring. + let mn4 = min(min(b, d), min(f, h)); + let mx4 = max(max(b, d), max(f, h)); + // Limiters + // 4.0 to avoid issues with MSAA. + let hitMin = mn4 / (4.0 * mx4); + let hitMax = (peakC.x - mx4) / (peakC.y + 4.0 * mn4); + let lobeRGB = max(-hitMin, hitMax); + var lobe = max(-FSR_RCAS_LIMIT, min(0.0, max(lobeRGB.r, max(lobeRGB.g, lobeRGB.b)))) * uniforms.sharpness; +#ifdef RCAS_DENOISE + // Luma times 2. + let bL = b.b * 0.5 + (b.r * 0.5 + b.g); + let dL = d.b * 0.5 + (d.r * 0.5 + d.g); + let eL = e.b * 0.5 + (e.r * 0.5 + e.g); + let fL = f.b * 0.5 + (f.r * 0.5 + f.g); + let hL = h.b * 0.5 + (h.r * 0.5 + h.g); + // Noise detection. + var noise = 0.25 * bL + 0.25 * dL + 0.25 * fL + 0.25 * hL - eL;; + noise = saturate(abs(noise) / (max(max(bL, dL), max(fL, hL)) - min(min(bL, dL), min(fL, hL)))); + noise = 1.0 - 0.5 * noise; + // Apply noise removal. + lobe *= noise; +#endif + return vec4((lobe * b + lobe * d + lobe * f + lobe * h + e.rgb) / (4.0 * lobe + 1.0), e.w); +} + +``` + +### bevy/crates/bevy_core_pipeline/src/blit/blit + +```rust +#import bevy_core_pipeline::fullscreen_vertex_shader::FullscreenVertexOutput + +@group(0) @binding(0) var in_texture: texture_2d; +@group(0) @binding(1) var in_sampler: sampler; + +@fragment +fn fs_main(in: FullscreenVertexOutput) -> @location(0) vec4 { + return textureSample(in_texture, in_sampler, in.uv); +} + +``` + +### bevy/crates/bevy_core_pipeline/src/dof/dof + +```rust +// Performs depth of field postprocessing, with both Gaussian and bokeh kernels. +// +// Gaussian blur is performed as a separable convolution: first blurring in the +// X direction, and then in the Y direction. This is asymptotically more +// efficient than performing a 2D convolution. +// +// The Bokeh blur uses a similar, but more complex, separable convolution +// technique. The algorithm is described in Colin Barré-Brisebois, "Hexagonal +// Bokeh Blur Revisited" [1]. It's motivated by the observation that we can use +// separable convolutions not only to produce boxes but to produce +// parallelograms. Thus, by performing three separable convolutions in sequence, +// we can produce a hexagonal shape. The first and second convolutions are done +// simultaneously using multiple render targets to cut the total number of +// passes down to two. +// +// [1]: https://colinbarrebrisebois.com/2017/04/18/hexagonal-bokeh-blur-revisited-part-2-improved-2-pass-version/ + +#import bevy_core_pipeline::fullscreen_vertex_shader::FullscreenVertexOutput +#import bevy_pbr::mesh_view_bindings::view +#import bevy_pbr::view_transformations::depth_ndc_to_view_z +#import bevy_render::view::View + +// Parameters that control the depth of field effect. See +// `bevy_core_pipeline::dof::DepthOfFieldUniforms` for information on what these +// parameters mean. +struct DepthOfFieldParams { + /// The distance in meters to the location in focus. + focal_distance: f32, + + /// The [focal length]. Physically speaking, this represents "the distance + /// from the center of the lens to the principal foci of the lens". The + /// default value, 50 mm, is considered representative of human eyesight. + /// Real-world lenses range from anywhere from 5 mm for "fisheye" lenses to + /// 2000 mm for "super-telephoto" lenses designed for very distant objects. + /// + /// The higher the value, the more blurry objects not in focus will be. + /// + /// [focal length]: https://en.wikipedia.org/wiki/Focal_length + focal_length: f32, + + /// The premultiplied factor that we scale the circle of confusion by. + /// + /// This is calculated as `focal_length² / (sensor_height * aperture_f_stops)`. + coc_scale_factor: f32, + + /// The maximum diameter, in pixels, that we allow a circle of confusion to be. + /// + /// A circle of confusion essentially describes the size of a blur. + /// + /// This value is nonphysical but is useful for avoiding pathologically-slow + /// behavior. + max_circle_of_confusion_diameter: f32, + + /// The depth value that we clamp distant objects to. See the comment in + /// [`DepthOfFieldSettings`] for more information. + max_depth: f32, + + /// Padding. + pad_a: u32, + /// Padding. + pad_b: u32, + /// Padding. + pad_c: u32, +} + +// The first bokeh pass outputs to two render targets. We declare them here. +struct DualOutput { + // The vertical output. + @location(0) output_0: vec4, + // The diagonal output. + @location(1) output_1: vec4, +} + +// @group(0) @binding(0) is `mesh_view_bindings::view`. + +// The depth texture for the main view. +#ifdef MULTISAMPLED +@group(0) @binding(1) var depth_texture: texture_depth_multisampled_2d; +#else // MULTISAMPLED +@group(0) @binding(1) var depth_texture: texture_depth_2d; +#endif // MULTISAMPLED + +// The main color texture. +@group(0) @binding(2) var color_texture_a: texture_2d; + +// The auxiliary color texture that we're sampling from. This is only used as +// part of the second bokeh pass. +#ifdef DUAL_INPUT +@group(0) @binding(3) var color_texture_b: texture_2d; +#endif // DUAL_INPUT + +// The global uniforms, representing data backed by buffers shared among all +// views in the scene. + +// The parameters that control the depth of field effect. +@group(1) @binding(0) var dof_params: DepthOfFieldParams; + +// The sampler that's used to fetch texels from the source color buffer. +@group(1) @binding(1) var color_texture_sampler: sampler; + +// cos(-30°), used for the bokeh blur. +const COS_NEG_FRAC_PI_6: f32 = 0.8660254037844387; +// sin(-30°), used for the bokeh blur. +const SIN_NEG_FRAC_PI_6: f32 = -0.5; +// cos(-150°), used for the bokeh blur. +const COS_NEG_FRAC_PI_5_6: f32 = -0.8660254037844387; +// sin(-150°), used for the bokeh blur. +const SIN_NEG_FRAC_PI_5_6: f32 = -0.5; + +// Calculates and returns the diameter (not the radius) of the [circle of +// confusion]. +// +// [circle of confusion]: https://en.wikipedia.org/wiki/Circle_of_confusion +fn calculate_circle_of_confusion(in_frag_coord: vec4) -> f32 { + // Unpack the depth of field parameters. + let focus = dof_params.focal_distance; + let f = dof_params.focal_length; + let scale = dof_params.coc_scale_factor; + let max_coc_diameter = dof_params.max_circle_of_confusion_diameter; + + // Sample the depth. + let frag_coord = vec2(floor(in_frag_coord.xy)); + let raw_depth = textureLoad(depth_texture, frag_coord, 0); + let depth = min(-depth_ndc_to_view_z(raw_depth), dof_params.max_depth); + + // Calculate the circle of confusion. + // + // This is just the formula from Wikipedia [1]. + // + // [1]: https://en.wikipedia.org/wiki/Circle_of_confusion#Determining_a_circle_of_confusion_diameter_from_the_object_field + let candidate_coc = scale * abs(depth - focus) / (depth * (focus - f)); + + let framebuffer_size = vec2(textureDimensions(color_texture_a)); + return clamp(candidate_coc * framebuffer_size.y, 0.0, max_coc_diameter); +} + +// Performs a single direction of the separable Gaussian blur kernel. +// +// * `frag_coord` is the screen-space pixel coordinate of the fragment (i.e. the +// `position` input to the fragment). +// +// * `coc` is the diameter (not the radius) of the circle of confusion for this +// fragment. +// +// * `frag_offset` is the vector, in screen-space units, from one sample to the +// next. For a horizontal blur this will be `vec2(1.0, 0.0)`; for a vertical +// blur this will be `vec2(0.0, 1.0)`. +// +// Returns the resulting color of the fragment. +fn gaussian_blur(frag_coord: vec4, coc: f32, frag_offset: vec2) -> vec4 { + // Usually σ (the standard deviation) is half the radius, and the radius is + // half the CoC. So we multiply by 0.25. + let sigma = coc * 0.25; + + // 1.5σ is a good, somewhat aggressive default for support—the number of + // texels on each side of the center that we process. + let support = i32(ceil(sigma * 1.5)); + let uv = frag_coord.xy / vec2(textureDimensions(color_texture_a)); + let offset = frag_offset / vec2(textureDimensions(color_texture_a)); + + // The probability density function of the Gaussian blur is (up to constant factors) `exp(-1 / 2σ² * + // x²). We precalculate the constant factor here to avoid having to + // calculate it in the inner loop. + let exp_factor = -1.0 / (2.0 * sigma * sigma); + + // Accumulate samples on both sides of the current texel. Go two at a time, + // taking advantage of bilinear filtering. + var sum = textureSampleLevel(color_texture_a, color_texture_sampler, uv, 0.0).rgb; + var weight_sum = 1.0; + for (var i = 1; i <= support; i += 2) { + // This is a well-known trick to reduce the number of needed texture + // samples by a factor of two. We seek to accumulate two adjacent + // samples c₀ and c₁ with weights w₀ and w₁ respectively, with a single + // texture sample at a carefully chosen location. Observe that: + // + // k ⋅ lerp(c₀, c₁, t) = w₀⋅c₀ + w₁⋅c₁ + // + // w₁ + // if k = w₀ + w₁ and t = ─────── + // w₀ + w₁ + // + // Therefore, if we sample at a distance of t = w₁ / (w₀ + w₁) texels in + // between the two texel centers and scale by k = w₀ + w₁ afterward, we + // effectively evaluate w₀⋅c₀ + w₁⋅c₁ with a single texture lookup. + let w0 = exp(exp_factor * f32(i) * f32(i)); + let w1 = exp(exp_factor * f32(i + 1) * f32(i + 1)); + let uv_offset = offset * (f32(i) + w1 / (w0 + w1)); + let weight = w0 + w1; + + sum += ( + textureSampleLevel(color_texture_a, color_texture_sampler, uv + uv_offset, 0.0).rgb + + textureSampleLevel(color_texture_a, color_texture_sampler, uv - uv_offset, 0.0).rgb + ) * weight; + weight_sum += weight * 2.0; + } + + return vec4(sum / weight_sum, 1.0); +} + +// Performs a box blur in a single direction, sampling `color_texture_a`. +// +// * `frag_coord` is the screen-space pixel coordinate of the fragment (i.e. the +// `position` input to the fragment). +// +// * `coc` is the diameter (not the radius) of the circle of confusion for this +// fragment. +// +// * `frag_offset` is the vector, in screen-space units, from one sample to the +// next. This need not be horizontal or vertical. +fn box_blur_a(frag_coord: vec4, coc: f32, frag_offset: vec2) -> vec4 { + let support = i32(round(coc * 0.5)); + let uv = frag_coord.xy / vec2(textureDimensions(color_texture_a)); + let offset = frag_offset / vec2(textureDimensions(color_texture_a)); + + // Accumulate samples in a single direction. + var sum = vec3(0.0); + for (var i = 0; i <= support; i += 1) { + sum += textureSampleLevel( + color_texture_a, color_texture_sampler, uv + offset * f32(i), 0.0).rgb; + } + + return vec4(sum / vec3(1.0 + f32(support)), 1.0); +} + +// Performs a box blur in a single direction, sampling `color_texture_b`. +// +// * `frag_coord` is the screen-space pixel coordinate of the fragment (i.e. the +// `position` input to the fragment). +// +// * `coc` is the diameter (not the radius) of the circle of confusion for this +// fragment. +// +// * `frag_offset` is the vector, in screen-space units, from one sample to the +// next. This need not be horizontal or vertical. +#ifdef DUAL_INPUT +fn box_blur_b(frag_coord: vec4, coc: f32, frag_offset: vec2) -> vec4 { + let support = i32(round(coc * 0.5)); + let uv = frag_coord.xy / vec2(textureDimensions(color_texture_b)); + let offset = frag_offset / vec2(textureDimensions(color_texture_b)); + + // Accumulate samples in a single direction. + var sum = vec3(0.0); + for (var i = 0; i <= support; i += 1) { + sum += textureSampleLevel( + color_texture_b, color_texture_sampler, uv + offset * f32(i), 0.0).rgb; + } + + return vec4(sum / vec3(1.0 + f32(support)), 1.0); +} +#endif + +// Calculates the horizontal component of the separable Gaussian blur. +@fragment +fn gaussian_horizontal(in: FullscreenVertexOutput) -> @location(0) vec4 { + let coc = calculate_circle_of_confusion(in.position); + return gaussian_blur(in.position, coc, vec2(1.0, 0.0)); +} + +// Calculates the vertical component of the separable Gaussian blur. +@fragment +fn gaussian_vertical(in: FullscreenVertexOutput) -> @location(0) vec4 { + let coc = calculate_circle_of_confusion(in.position); + return gaussian_blur(in.position, coc, vec2(0.0, 1.0)); +} + +// Calculates the vertical and first diagonal components of the separable +// hexagonal bokeh blur. +// +// ╱ +// ╱ +// • +// │ +// │ +@fragment +fn bokeh_pass_0(in: FullscreenVertexOutput) -> DualOutput { + let coc = calculate_circle_of_confusion(in.position); + let vertical = box_blur_a(in.position, coc, vec2(0.0, 1.0)); + let diagonal = box_blur_a(in.position, coc, vec2(COS_NEG_FRAC_PI_6, SIN_NEG_FRAC_PI_6)); + + // Note that the diagonal part is pre-mixed with the vertical component. + var output: DualOutput; + output.output_0 = vertical; + output.output_1 = mix(vertical, diagonal, 0.5); + return output; +} + +// Calculates the second diagonal components of the separable hexagonal bokeh +// blur. +// +// ╲ ╱ +// ╲ ╱ +// • +#ifdef DUAL_INPUT +@fragment +fn bokeh_pass_1(in: FullscreenVertexOutput) -> @location(0) vec4 { + let coc = calculate_circle_of_confusion(in.position); + let output_0 = box_blur_a(in.position, coc, vec2(COS_NEG_FRAC_PI_6, SIN_NEG_FRAC_PI_6)); + let output_1 = box_blur_b(in.position, coc, vec2(COS_NEG_FRAC_PI_5_6, SIN_NEG_FRAC_PI_5_6)); + return mix(output_0, output_1, 0.5); +} +#endif + +``` + +### bevy/crates/bevy_core_pipeline/src/bloom/bloom + +```rust +// Bloom works by creating an intermediate texture with a bunch of mip levels, each half the size of the previous. +// You then downsample each mip (starting with the original texture) to the lower resolution mip under it, going in order. +// You then upsample each mip (starting from the smallest mip) and blend with the higher resolution mip above it (ending on the original texture). +// +// References: +// * [COD] - Next Generation Post Processing in Call of Duty - http://www.iryoku.com/next-generation-post-processing-in-call-of-duty-advanced-warfare +// * [PBB] - Physically Based Bloom - https://learnopengl.com/Guest-Articles/2022/Phys.-Based-Bloom + +struct BloomUniforms { + threshold_precomputations: vec4, + viewport: vec4, + aspect: f32, +}; + +@group(0) @binding(0) var input_texture: texture_2d; +@group(0) @binding(1) var s: sampler; + +@group(0) @binding(2) var uniforms: BloomUniforms; + +#ifdef FIRST_DOWNSAMPLE +// https://catlikecoding.com/unity/tutorials/advanced-rendering/bloom/#3.4 +fn soft_threshold(color: vec3) -> vec3 { + let brightness = max(color.r, max(color.g, color.b)); + var softness = brightness - uniforms.threshold_precomputations.y; + softness = clamp(softness, 0.0, uniforms.threshold_precomputations.z); + softness = softness * softness * uniforms.threshold_precomputations.w; + var contribution = max(brightness - uniforms.threshold_precomputations.x, softness); + contribution /= max(brightness, 0.00001); // Prevent division by 0 + return color * contribution; +} +#endif + +// luminance coefficients from Rec. 709. +// https://en.wikipedia.org/wiki/Rec._709 +fn tonemapping_luminance(v: vec3) -> f32 { + return dot(v, vec3(0.2126, 0.7152, 0.0722)); +} + +fn rgb_to_srgb_simple(color: vec3) -> vec3 { + return pow(color, vec3(1.0 / 2.2)); +} + +// http://graphicrants.blogspot.com/2013/12/tone-mapping.html +fn karis_average(color: vec3) -> f32 { + // Luminance calculated by gamma-correcting linear RGB to non-linear sRGB using pow(color, 1.0 / 2.2) + // and then calculating luminance based on Rec. 709 color primaries. + let luma = tonemapping_luminance(rgb_to_srgb_simple(color)) / 4.0; + return 1.0 / (1.0 + luma); +} + +// [COD] slide 153 +fn sample_input_13_tap(uv: vec2) -> vec3 { + let a = textureSample(input_texture, s, uv, vec2(-2, 2)).rgb; + let b = textureSample(input_texture, s, uv, vec2(0, 2)).rgb; + let c = textureSample(input_texture, s, uv, vec2(2, 2)).rgb; + let d = textureSample(input_texture, s, uv, vec2(-2, 0)).rgb; + let e = textureSample(input_texture, s, uv).rgb; + let f = textureSample(input_texture, s, uv, vec2(2, 0)).rgb; + let g = textureSample(input_texture, s, uv, vec2(-2, -2)).rgb; + let h = textureSample(input_texture, s, uv, vec2(0, -2)).rgb; + let i = textureSample(input_texture, s, uv, vec2(2, -2)).rgb; + let j = textureSample(input_texture, s, uv, vec2(-1, 1)).rgb; + let k = textureSample(input_texture, s, uv, vec2(1, 1)).rgb; + let l = textureSample(input_texture, s, uv, vec2(-1, -1)).rgb; + let m = textureSample(input_texture, s, uv, vec2(1, -1)).rgb; + +#ifdef FIRST_DOWNSAMPLE + // [COD] slide 168 + // + // The first downsample pass reads from the rendered frame which may exhibit + // 'fireflies' (individual very bright pixels) that should not cause the bloom effect. + // + // The first downsample uses a firefly-reduction method proposed by Brian Karis + // which takes a weighted-average of the samples to limit their luma range to [0, 1]. + // This implementation matches the LearnOpenGL article [PBB]. + var group0 = (a + b + d + e) * (0.125f / 4.0f); + var group1 = (b + c + e + f) * (0.125f / 4.0f); + var group2 = (d + e + g + h) * (0.125f / 4.0f); + var group3 = (e + f + h + i) * (0.125f / 4.0f); + var group4 = (j + k + l + m) * (0.5f / 4.0f); + group0 *= karis_average(group0); + group1 *= karis_average(group1); + group2 *= karis_average(group2); + group3 *= karis_average(group3); + group4 *= karis_average(group4); + return group0 + group1 + group2 + group3 + group4; +#else + var sample = (a + c + g + i) * 0.03125; + sample += (b + d + f + h) * 0.0625; + sample += (e + j + k + l + m) * 0.125; + return sample; +#endif +} + +// [COD] slide 162 +fn sample_input_3x3_tent(uv: vec2) -> vec3 { + // Radius. Empirically chosen by and tweaked from the LearnOpenGL article. + let x = 0.004 / uniforms.aspect; + let y = 0.004; + + let a = textureSample(input_texture, s, vec2(uv.x - x, uv.y + y)).rgb; + let b = textureSample(input_texture, s, vec2(uv.x, uv.y + y)).rgb; + let c = textureSample(input_texture, s, vec2(uv.x + x, uv.y + y)).rgb; + + let d = textureSample(input_texture, s, vec2(uv.x - x, uv.y)).rgb; + let e = textureSample(input_texture, s, vec2(uv.x, uv.y)).rgb; + let f = textureSample(input_texture, s, vec2(uv.x + x, uv.y)).rgb; + + let g = textureSample(input_texture, s, vec2(uv.x - x, uv.y - y)).rgb; + let h = textureSample(input_texture, s, vec2(uv.x, uv.y - y)).rgb; + let i = textureSample(input_texture, s, vec2(uv.x + x, uv.y - y)).rgb; + + var sample = e * 0.25; + sample += (b + d + f + h) * 0.125; + sample += (a + c + g + i) * 0.0625; + + return sample; +} + +#ifdef FIRST_DOWNSAMPLE +@fragment +fn downsample_first(@location(0) output_uv: vec2) -> @location(0) vec4 { + let sample_uv = uniforms.viewport.xy + output_uv * uniforms.viewport.zw; + var sample = sample_input_13_tap(sample_uv); + // Lower bound of 0.0001 is to avoid propagating multiplying by 0.0 through the + // downscaling and upscaling which would result in black boxes. + // The upper bound is to prevent NaNs. + // with f32::MAX (E+38) Chrome fails with ":value 340282346999999984391321947108527833088.0 cannot be represented as 'f32'" + sample = clamp(sample, vec3(0.0001), vec3(3.40282347E+37)); + +#ifdef USE_THRESHOLD + sample = soft_threshold(sample); +#endif + + return vec4(sample, 1.0); +} +#endif + +@fragment +fn downsample(@location(0) uv: vec2) -> @location(0) vec4 { + return vec4(sample_input_13_tap(uv), 1.0); +} + +@fragment +fn upsample(@location(0) uv: vec2) -> @location(0) vec4 { + return vec4(sample_input_3x3_tent(uv), 1.0); +} + +``` + +### bevy/crates/bevy_core_pipeline/src/fxaa/fxaa + +```rust +// NVIDIA FXAA 3.11 +// Original source code by TIMOTHY LOTTES +// https://gist.github.com/kosua20/0c506b81b3812ac900048059d2383126 +// +// Cleaned version - https://github.com/kosua20/Rendu/blob/master/resources/common/shaders/screens/fxaa.frag +// +// Tweaks by mrDIMAS - https://github.com/FyroxEngine/Fyrox/blob/master/src/renderer/shaders/fxaa_fs.glsl + +#import bevy_core_pipeline::fullscreen_vertex_shader::FullscreenVertexOutput + +@group(0) @binding(0) var screenTexture: texture_2d; +@group(0) @binding(1) var samp: sampler; + +// Trims the algorithm from processing darks. +#ifdef EDGE_THRESH_MIN_LOW + const EDGE_THRESHOLD_MIN: f32 = 0.0833; +#endif + +#ifdef EDGE_THRESH_MIN_MEDIUM + const EDGE_THRESHOLD_MIN: f32 = 0.0625; +#endif + +#ifdef EDGE_THRESH_MIN_HIGH + const EDGE_THRESHOLD_MIN: f32 = 0.0312; +#endif + +#ifdef EDGE_THRESH_MIN_ULTRA + const EDGE_THRESHOLD_MIN: f32 = 0.0156; +#endif + +#ifdef EDGE_THRESH_MIN_EXTREME + const EDGE_THRESHOLD_MIN: f32 = 0.0078; +#endif + +// The minimum amount of local contrast required to apply algorithm. +#ifdef EDGE_THRESH_LOW + const EDGE_THRESHOLD_MAX: f32 = 0.250; +#endif + +#ifdef EDGE_THRESH_MEDIUM + const EDGE_THRESHOLD_MAX: f32 = 0.166; +#endif + +#ifdef EDGE_THRESH_HIGH + const EDGE_THRESHOLD_MAX: f32 = 0.125; +#endif + +#ifdef EDGE_THRESH_ULTRA + const EDGE_THRESHOLD_MAX: f32 = 0.063; +#endif + +#ifdef EDGE_THRESH_EXTREME + const EDGE_THRESHOLD_MAX: f32 = 0.031; +#endif + +const ITERATIONS: i32 = 12; //default is 12 +const SUBPIXEL_QUALITY: f32 = 0.75; +// #define QUALITY(q) ((q) < 5 ? 1.0 : ((q) > 5 ? ((q) < 10 ? 2.0 : ((q) < 11 ? 4.0 : 8.0)) : 1.5)) +fn QUALITY(q: i32) -> f32 { + switch (q) { + //case 0, 1, 2, 3, 4: { return 1.0; } + default: { return 1.0; } + case 5: { return 1.5; } + case 6, 7, 8, 9: { return 2.0; } + case 10: { return 4.0; } + case 11: { return 8.0; } + } +} + +fn rgb2luma(rgb: vec3) -> f32 { + return sqrt(dot(rgb, vec3(0.299, 0.587, 0.114))); +} + +// Performs FXAA post-process anti-aliasing as described in the Nvidia FXAA white paper and the associated shader code. +@fragment +fn fragment(in: FullscreenVertexOutput) -> @location(0) vec4 { + let resolution = vec2(textureDimensions(screenTexture)); + let inverseScreenSize = 1.0 / resolution.xy; + let texCoord = in.position.xy * inverseScreenSize; + + let centerSample = textureSampleLevel(screenTexture, samp, texCoord, 0.0); + let colorCenter = centerSample.rgb; + + // Luma at the current fragment + let lumaCenter = rgb2luma(colorCenter); + + // Luma at the four direct neighbors of the current fragment. + let lumaDown = rgb2luma(textureSampleLevel(screenTexture, samp, texCoord, 0.0, vec2(0, -1)).rgb); + let lumaUp = rgb2luma(textureSampleLevel(screenTexture, samp, texCoord, 0.0, vec2(0, 1)).rgb); + let lumaLeft = rgb2luma(textureSampleLevel(screenTexture, samp, texCoord, 0.0, vec2(-1, 0)).rgb); + let lumaRight = rgb2luma(textureSampleLevel(screenTexture, samp, texCoord, 0.0, vec2(1, 0)).rgb); + + // Find the maximum and minimum luma around the current fragment. + let lumaMin = min(lumaCenter, min(min(lumaDown, lumaUp), min(lumaLeft, lumaRight))); + let lumaMax = max(lumaCenter, max(max(lumaDown, lumaUp), max(lumaLeft, lumaRight))); + + // Compute the delta. + let lumaRange = lumaMax - lumaMin; + + // If the luma variation is lower that a threshold (or if we are in a really dark area), we are not on an edge, don't perform any AA. + if (lumaRange < max(EDGE_THRESHOLD_MIN, lumaMax * EDGE_THRESHOLD_MAX)) { + return centerSample; + } + + // Query the 4 remaining corners lumas. + let lumaDownLeft = rgb2luma(textureSampleLevel(screenTexture, samp, texCoord, 0.0, vec2(-1, -1)).rgb); + let lumaUpRight = rgb2luma(textureSampleLevel(screenTexture, samp, texCoord, 0.0, vec2(1, 1)).rgb); + let lumaUpLeft = rgb2luma(textureSampleLevel(screenTexture, samp, texCoord, 0.0, vec2(-1, 1)).rgb); + let lumaDownRight = rgb2luma(textureSampleLevel(screenTexture, samp, texCoord, 0.0, vec2(1, -1)).rgb); + + // Combine the four edges lumas (using intermediary variables for future computations with the same values). + let lumaDownUp = lumaDown + lumaUp; + let lumaLeftRight = lumaLeft + lumaRight; + + // Same for corners + let lumaLeftCorners = lumaDownLeft + lumaUpLeft; + let lumaDownCorners = lumaDownLeft + lumaDownRight; + let lumaRightCorners = lumaDownRight + lumaUpRight; + let lumaUpCorners = lumaUpRight + lumaUpLeft; + + // Compute an estimation of the gradient along the horizontal and vertical axis. + let edgeHorizontal = abs(-2.0 * lumaLeft + lumaLeftCorners) + + abs(-2.0 * lumaCenter + lumaDownUp) * 2.0 + + abs(-2.0 * lumaRight + lumaRightCorners); + + let edgeVertical = abs(-2.0 * lumaUp + lumaUpCorners) + + abs(-2.0 * lumaCenter + lumaLeftRight) * 2.0 + + abs(-2.0 * lumaDown + lumaDownCorners); + + // Is the local edge horizontal or vertical ? + let isHorizontal = (edgeHorizontal >= edgeVertical); + + // Choose the step size (one pixel) accordingly. + var stepLength = select(inverseScreenSize.x, inverseScreenSize.y, isHorizontal); + + // Select the two neighboring texels lumas in the opposite direction to the local edge. + var luma1 = select(lumaLeft, lumaDown, isHorizontal); + var luma2 = select(lumaRight, lumaUp, isHorizontal); + + // Compute gradients in this direction. + let gradient1 = luma1 - lumaCenter; + let gradient2 = luma2 - lumaCenter; + + // Which direction is the steepest ? + let is1Steepest = abs(gradient1) >= abs(gradient2); + + // Gradient in the corresponding direction, normalized. + let gradientScaled = 0.25 * max(abs(gradient1), abs(gradient2)); + + // Average luma in the correct direction. + var lumaLocalAverage = 0.0; + if (is1Steepest) { + // Switch the direction + stepLength = -stepLength; + lumaLocalAverage = 0.5 * (luma1 + lumaCenter); + } else { + lumaLocalAverage = 0.5 * (luma2 + lumaCenter); + } + + // Shift UV in the correct direction by half a pixel. + // Compute offset (for each iteration step) in the right direction. + var currentUv = texCoord; + var offset = vec2(0.0, 0.0); + if (isHorizontal) { + currentUv.y = currentUv.y + stepLength * 0.5; + offset.x = inverseScreenSize.x; + } else { + currentUv.x = currentUv.x + stepLength * 0.5; + offset.y = inverseScreenSize.y; + } + + // Compute UVs to explore on each side of the edge, orthogonally. The QUALITY allows us to step faster. + var uv1 = currentUv - offset; // * QUALITY(0); // (quality 0 is 1.0) + var uv2 = currentUv + offset; // * QUALITY(0); // (quality 0 is 1.0) + + // Read the lumas at both current extremities of the exploration segment, and compute the delta wrt to the local average luma. + var lumaEnd1 = rgb2luma(textureSampleLevel(screenTexture, samp, uv1, 0.0).rgb); + var lumaEnd2 = rgb2luma(textureSampleLevel(screenTexture, samp, uv2, 0.0).rgb); + lumaEnd1 = lumaEnd1 - lumaLocalAverage; + lumaEnd2 = lumaEnd2 - lumaLocalAverage; + + // If the luma deltas at the current extremities is larger than the local gradient, we have reached the side of the edge. + var reached1 = abs(lumaEnd1) >= gradientScaled; + var reached2 = abs(lumaEnd2) >= gradientScaled; + var reachedBoth = reached1 && reached2; + + // If the side is not reached, we continue to explore in this direction. + uv1 = select(uv1 - offset, uv1, reached1); // * QUALITY(1); // (quality 1 is 1.0) + uv2 = select(uv2 - offset, uv2, reached2); // * QUALITY(1); // (quality 1 is 1.0) + + // If both sides have not been reached, continue to explore. + if (!reachedBoth) { + for (var i: i32 = 2; i < ITERATIONS; i = i + 1) { + // If needed, read luma in 1st direction, compute delta. + if (!reached1) { + lumaEnd1 = rgb2luma(textureSampleLevel(screenTexture, samp, uv1, 0.0).rgb); + lumaEnd1 = lumaEnd1 - lumaLocalAverage; + } + // If needed, read luma in opposite direction, compute delta. + if (!reached2) { + lumaEnd2 = rgb2luma(textureSampleLevel(screenTexture, samp, uv2, 0.0).rgb); + lumaEnd2 = lumaEnd2 - lumaLocalAverage; + } + // If the luma deltas at the current extremities is larger than the local gradient, we have reached the side of the edge. + reached1 = abs(lumaEnd1) >= gradientScaled; + reached2 = abs(lumaEnd2) >= gradientScaled; + reachedBoth = reached1 && reached2; + + // If the side is not reached, we continue to explore in this direction, with a variable quality. + if (!reached1) { + uv1 = uv1 - offset * QUALITY(i); + } + if (!reached2) { + uv2 = uv2 + offset * QUALITY(i); + } + + // If both sides have been reached, stop the exploration. + if (reachedBoth) { + break; + } + } + } + + // Compute the distances to each side edge of the edge (!). + var distance1 = select(texCoord.y - uv1.y, texCoord.x - uv1.x, isHorizontal); + var distance2 = select(uv2.y - texCoord.y, uv2.x - texCoord.x, isHorizontal); + + // In which direction is the side of the edge closer ? + let isDirection1 = distance1 < distance2; + let distanceFinal = min(distance1, distance2); + + // Thickness of the edge. + let edgeThickness = (distance1 + distance2); + + // Is the luma at center smaller than the local average ? + let isLumaCenterSmaller = lumaCenter < lumaLocalAverage; + + // If the luma at center is smaller than at its neighbor, the delta luma at each end should be positive (same variation). + let correctVariation1 = (lumaEnd1 < 0.0) != isLumaCenterSmaller; + let correctVariation2 = (lumaEnd2 < 0.0) != isLumaCenterSmaller; + + // Only keep the result in the direction of the closer side of the edge. + var correctVariation = select(correctVariation2, correctVariation1, isDirection1); + + // UV offset: read in the direction of the closest side of the edge. + let pixelOffset = - distanceFinal / edgeThickness + 0.5; + + // If the luma variation is incorrect, do not offset. + var finalOffset = select(0.0, pixelOffset, correctVariation); + + // Sub-pixel shifting + // Full weighted average of the luma over the 3x3 neighborhood. + let lumaAverage = (1.0 / 12.0) * (2.0 * (lumaDownUp + lumaLeftRight) + lumaLeftCorners + lumaRightCorners); + // Ratio of the delta between the global average and the center luma, over the luma range in the 3x3 neighborhood. + let subPixelOffset1 = clamp(abs(lumaAverage - lumaCenter) / lumaRange, 0.0, 1.0); + let subPixelOffset2 = (-2.0 * subPixelOffset1 + 3.0) * subPixelOffset1 * subPixelOffset1; + // Compute a sub-pixel offset based on this delta. + let subPixelOffsetFinal = subPixelOffset2 * subPixelOffset2 * SUBPIXEL_QUALITY; + + // Pick the biggest of the two offsets. + finalOffset = max(finalOffset, subPixelOffsetFinal); + + // Compute the final UV coordinates. + var finalUv = texCoord; + if (isHorizontal) { + finalUv.y = finalUv.y + finalOffset * stepLength; + } else { + finalUv.x = finalUv.x + finalOffset * stepLength; + } + + // Read the color at the new UV coordinates, and use it. + var finalColor = textureSampleLevel(screenTexture, samp, finalUv, 0.0).rgb; + return vec4(finalColor, centerSample.a); +} + +``` + +### bevy/crates/bevy_ui/src/render/ui + +```rust +#import bevy_render::view::View + +const TEXTURED = 1u; +const RIGHT_VERTEX = 2u; +const BOTTOM_VERTEX = 4u; +const BORDER: u32 = 8u; + +fn enabled(flags: u32, mask: u32) -> bool { + return (flags & mask) != 0u; +} + +@group(0) @binding(0) var view: View; + +struct VertexOutput { + @location(0) uv: vec2, + @location(1) color: vec4, + + @location(2) @interpolate(flat) size: vec2, + @location(3) @interpolate(flat) flags: u32, + @location(4) @interpolate(flat) radius: vec4, + @location(5) @interpolate(flat) border: vec4, + + // Position relative to the center of the rectangle. + @location(6) point: vec2, + @builtin(position) position: vec4, +}; + +@vertex +fn vertex( + @location(0) vertex_position: vec3, + @location(1) vertex_uv: vec2, + @location(2) vertex_color: vec4, + @location(3) flags: u32, + + // x: top left, y: top right, z: bottom right, w: bottom left. + @location(4) radius: vec4, + + // x: left, y: top, z: right, w: bottom. + @location(5) border: vec4, + @location(6) size: vec2, +) -> VertexOutput { + var out: VertexOutput; + out.uv = vertex_uv; + out.position = view.clip_from_world * vec4(vertex_position, 1.0); + out.color = vertex_color; + out.flags = flags; + out.radius = radius; + out.size = size; + out.border = border; + var point = 0.49999 * size; + if (flags & RIGHT_VERTEX) == 0u { + point.x *= -1.; + } + if (flags & BOTTOM_VERTEX) == 0u { + point.y *= -1.; + } + out.point = point; + + return out; +} + +@group(1) @binding(0) var sprite_texture: texture_2d; +@group(1) @binding(1) var sprite_sampler: sampler; + +// The returned value is the shortest distance from the given point to the boundary of the rounded +// box. +// +// Negative values indicate that the point is inside the rounded box, positive values that the point +// is outside, and zero is exactly on the boundary. +// +// Arguments: +// - `point` -> The function will return the distance from this point to the closest point on +// the boundary. +// - `size` -> The maximum width and height of the box. +// - `corner_radii` -> The radius of each rounded corner. Ordered counter clockwise starting +// top left: +// x: top left, y: top right, z: bottom right, w: bottom left. +fn sd_rounded_box(point: vec2, size: vec2, corner_radii: vec4) -> f32 { + // If 0.0 < y then select bottom left (w) and bottom right corner radius (z). + // Else select top left (x) and top right corner radius (y). + let rs = select(corner_radii.xy, corner_radii.wz, 0.0 < point.y); + // w and z are swapped above so that both pairs are in left to right order, otherwise this second + // select statement would return the incorrect value for the bottom pair. + let radius = select(rs.x, rs.y, 0.0 < point.x); + // Vector from the corner closest to the point, to the point. + let corner_to_point = abs(point) - 0.5 * size; + // Vector from the center of the radius circle to the point. + let q = corner_to_point + radius; + // Length from center of the radius circle to the point, zeros a component if the point is not + // within the quadrant of the radius circle that is part of the curved corner. + let l = length(max(q, vec2(0.0))); + let m = min(max(q.x, q.y), 0.0); + return l + m - radius; +} + +fn sd_inset_rounded_box(point: vec2, size: vec2, radius: vec4, inset: vec4) -> f32 { + let inner_size = size - inset.xy - inset.zw; + let inner_center = inset.xy + 0.5 * inner_size - 0.5 * size; + let inner_point = point - inner_center; + + var r = radius; + + // Top left corner. + r.x = r.x - max(inset.x, inset.y); + + // Top right corner. + r.y = r.y - max(inset.z, inset.y); + + // Bottom right corner. + r.z = r.z - max(inset.z, inset.w); + + // Bottom left corner. + r.w = r.w - max(inset.x, inset.w); + + let half_size = inner_size * 0.5; + let min_size = min(half_size.x, half_size.y); + + r = min(max(r, vec4(0.0)), vec4(min_size)); + + return sd_rounded_box(inner_point, inner_size, r); +} + +// get alpha for antialiasing for sdf +fn antialias(distance: f32) -> f32 { + // Using the fwidth(distance) was causing artifacts, so just use the distance. + // This antialiases between the distance values of 0.25 and -0.25 + return clamp(0.0, 1.0, 0.5 - 2.0 * distance); +} + +fn draw(in: VertexOutput, texture_color: vec4) -> vec4 { + // Only use the color sampled from the texture if the `TEXTURED` flag is enabled. + // This allows us to draw both textured and untextured shapes together in the same batch. + let color = select(in.color, in.color * texture_color, enabled(in.flags, TEXTURED)); + + // Signed distances. The magnitude is the distance of the point from the edge of the shape. + // * Negative values indicate that the point is inside the shape. + // * Zero values indicate the point is on the edge of the shape. + // * Positive values indicate the point is outside the shape. + + // Signed distance from the exterior boundary. + let external_distance = sd_rounded_box(in.point, in.size, in.radius); + + // Signed distance from the border's internal edge (the signed distance is negative if the point + // is inside the rect but not on the border). + // If the border size is set to zero, this is the same as the external distance. + let internal_distance = sd_inset_rounded_box(in.point, in.size, in.radius, in.border); + + // Signed distance from the border (the intersection of the rect with its border). + // Points inside the border have negative signed distance. Any point outside the border, whether + // outside the outside edge, or inside the inner edge have positive signed distance. + let border_distance = max(external_distance, -internal_distance); + + // At external edges with no border, `border_distance` is equal to zero. + // This select statement ensures we only perform anti-aliasing where a non-zero width border + // is present, otherwise an outline about the external boundary would be drawn even without + // a border. + let t = select(1.0 - step(0.0, border_distance), antialias(border_distance), external_distance < internal_distance); + + // Blend mode ALPHA_BLENDING is used for UI elements, so we don't premultiply alpha here. + return vec4(color.rgb, saturate(color.a * t)); +} + +fn draw_background(in: VertexOutput, texture_color: vec4) -> vec4 { + let color = select(in.color, in.color * texture_color, enabled(in.flags, TEXTURED)); + + // When drawing the background only draw the internal area and not the border. + let internal_distance = sd_inset_rounded_box(in.point, in.size, in.radius, in.border); + let t = antialias(internal_distance); + return vec4(color.rgb, saturate(color.a * t)); +} + +@fragment +fn fragment(in: VertexOutput) -> @location(0) vec4 { + let texture_color = textureSample(sprite_texture, sprite_sampler, in.uv); + + if enabled(in.flags, BORDER) { + return draw(in, texture_color); + } else { + return draw_background(in, texture_color); + } +} + +``` + +### bevy/crates/bevy_ui/src/render/ui_material + +```rust +#import bevy_render::{ + view::View, + globals::Globals, +} +#import bevy_ui::ui_vertex_output::UiVertexOutput + +@group(0) @binding(0) +var view: View; +@group(0) @binding(1) +var globals: Globals; + +@vertex +fn vertex( + @location(0) vertex_position: vec3, + @location(1) vertex_uv: vec2, + @location(2) size: vec2, + @location(3) border_widths: vec4, +) -> UiVertexOutput { + var out: UiVertexOutput; + out.uv = vertex_uv; + out.position = view.clip_from_world * vec4(vertex_position, 1.0); + out.size = size; + out.border_widths = border_widths; + return out; +} + +@fragment +fn fragment(in: UiVertexOutput) -> @location(0) vec4 { + return vec4(1.0); +} + +``` + +### bevy/crates/bevy_ui/src/render/ui_vertex_output + +```rust +#define_import_path bevy_ui::ui_vertex_output + +// The Vertex output of the default vertex shader for the Ui Material pipeline. +struct UiVertexOutput { + @location(0) uv: vec2, + // The size of the borders in UV space. Order is Left, Right, Top, Bottom. + @location(1) border_widths: vec4, + // The size of the node in pixels. Order is width, height. + @location(2) @interpolate(flat) size: vec2, + @builtin(position) position: vec4, +}; + +``` + +### bevy/crates/bevy_pbr/src/deferred/deferred_lighting + +```rust +#import bevy_pbr::{ + prepass_utils, + pbr_types::STANDARD_MATERIAL_FLAGS_UNLIT_BIT, + pbr_functions, + pbr_deferred_functions::pbr_input_from_deferred_gbuffer, + pbr_deferred_types::unpack_unorm3x4_plus_unorm_20_, + lighting, + mesh_view_bindings::deferred_prepass_texture, +} + +#ifdef SCREEN_SPACE_AMBIENT_OCCLUSION +#import bevy_pbr::mesh_view_bindings::screen_space_ambient_occlusion_texture +#import bevy_pbr::gtao_utils::gtao_multibounce +#endif + +struct FullscreenVertexOutput { + @builtin(position) + position: vec4, + @location(0) + uv: vec2, +}; + +struct PbrDeferredLightingDepthId { + depth_id: u32, // limited to u8 +#ifdef SIXTEEN_BYTE_ALIGNMENT + // WebGL2 structs must be 16 byte aligned. + _webgl2_padding_0: f32, + _webgl2_padding_1: f32, + _webgl2_padding_2: f32, +#endif +} +@group(1) @binding(0) +var depth_id: PbrDeferredLightingDepthId; + +@vertex +fn vertex(@builtin(vertex_index) vertex_index: u32) -> FullscreenVertexOutput { + // See the full screen vertex shader for explanation above for how this works. + let uv = vec2(f32(vertex_index >> 1u), f32(vertex_index & 1u)) * 2.0; + // Depth is stored as unorm, so we are dividing the u8 depth_id by 255.0 here. + let clip_position = vec4(uv * vec2(2.0, -2.0) + vec2(-1.0, 1.0), f32(depth_id.depth_id) / 255.0, 1.0); + + return FullscreenVertexOutput(clip_position, uv); +} + +@fragment +fn fragment(in: FullscreenVertexOutput) -> @location(0) vec4 { + var frag_coord = vec4(in.position.xy, 0.0, 0.0); + + let deferred_data = textureLoad(deferred_prepass_texture, vec2(frag_coord.xy), 0); + +#ifdef WEBGL2 + frag_coord.z = unpack_unorm3x4_plus_unorm_20_(deferred_data.b).w; +#else +#ifdef DEPTH_PREPASS + frag_coord.z = prepass_utils::prepass_depth(in.position, 0u); +#endif +#endif + + var pbr_input = pbr_input_from_deferred_gbuffer(frag_coord, deferred_data); + var output_color = vec4(0.0); + + // NOTE: Unlit bit not set means == 0 is true, so the true case is if lit + if ((pbr_input.material.flags & STANDARD_MATERIAL_FLAGS_UNLIT_BIT) == 0u) { + +#ifdef SCREEN_SPACE_AMBIENT_OCCLUSION + let ssao = textureLoad(screen_space_ambient_occlusion_texture, vec2(in.position.xy), 0i).r; + let ssao_multibounce = gtao_multibounce(ssao, pbr_input.material.base_color.rgb); + pbr_input.diffuse_occlusion = min(pbr_input.diffuse_occlusion, ssao_multibounce); + + // Neubelt and Pettineo 2013, "Crafting a Next-gen Material Pipeline for The Order: 1886" + let NdotV = max(dot(pbr_input.N, pbr_input.V), 0.0001); + var perceptual_roughness: f32 = pbr_input.material.perceptual_roughness; + let roughness = lighting::perceptualRoughnessToRoughness(perceptual_roughness); + // Use SSAO to estimate the specular occlusion. + // Lagarde and Rousiers 2014, "Moving Frostbite to Physically Based Rendering" + pbr_input.specular_occlusion = saturate(pow(NdotV + ssao, exp2(-16.0 * roughness - 1.0)) - 1.0 + ssao); +#endif // SCREEN_SPACE_AMBIENT_OCCLUSION + + output_color = pbr_functions::apply_pbr_lighting(pbr_input); + } else { + output_color = pbr_input.material.base_color; + } + + output_color = pbr_functions::main_pass_post_lighting_processing(pbr_input, output_color); + + return output_color; +} + + +``` + +### bevy/crates/bevy_pbr/src/deferred/pbr_deferred_types + +```rust +#define_import_path bevy_pbr::pbr_deferred_types + +#import bevy_pbr::{ + mesh_types::MESH_FLAGS_SHADOW_RECEIVER_BIT, + pbr_types::{STANDARD_MATERIAL_FLAGS_FOG_ENABLED_BIT, STANDARD_MATERIAL_FLAGS_UNLIT_BIT}, +} + +// Maximum of 8 bits available +const DEFERRED_FLAGS_UNLIT_BIT: u32 = 1u; +const DEFERRED_FLAGS_FOG_ENABLED_BIT: u32 = 2u; +const DEFERRED_MESH_FLAGS_SHADOW_RECEIVER_BIT: u32 = 4u; + +fn deferred_flags_from_mesh_material_flags(mesh_flags: u32, mat_flags: u32) -> u32 { + var flags = 0u; + flags |= u32((mesh_flags & MESH_FLAGS_SHADOW_RECEIVER_BIT) != 0u) * DEFERRED_MESH_FLAGS_SHADOW_RECEIVER_BIT; + flags |= u32((mat_flags & STANDARD_MATERIAL_FLAGS_FOG_ENABLED_BIT) != 0u) * DEFERRED_FLAGS_FOG_ENABLED_BIT; + flags |= u32((mat_flags & STANDARD_MATERIAL_FLAGS_UNLIT_BIT) != 0u) * DEFERRED_FLAGS_UNLIT_BIT; + return flags; +} + +fn mesh_material_flags_from_deferred_flags(deferred_flags: u32) -> vec2 { + var mat_flags = 0u; + var mesh_flags = 0u; + mesh_flags |= u32((deferred_flags & DEFERRED_MESH_FLAGS_SHADOW_RECEIVER_BIT) != 0u) * MESH_FLAGS_SHADOW_RECEIVER_BIT; + mat_flags |= u32((deferred_flags & DEFERRED_FLAGS_FOG_ENABLED_BIT) != 0u) * STANDARD_MATERIAL_FLAGS_FOG_ENABLED_BIT; + mat_flags |= u32((deferred_flags & DEFERRED_FLAGS_UNLIT_BIT) != 0u) * STANDARD_MATERIAL_FLAGS_UNLIT_BIT; + return vec2(mesh_flags, mat_flags); +} + +const U12MAXF = 4095.0; +const U16MAXF = 65535.0; +const U20MAXF = 1048575.0; + +// Storing normals as oct24. +// Flags are stored in the remaining 8 bits. +// https://jcgt.org/published/0003/02/01/paper.pdf +// Could possibly go down to oct20 if the space is needed. + +fn pack_24bit_normal_and_flags(octahedral_normal: vec2, flags: u32) -> u32 { + let unorm1 = u32(saturate(octahedral_normal.x) * U12MAXF + 0.5); + let unorm2 = u32(saturate(octahedral_normal.y) * U12MAXF + 0.5); + return (unorm1 & 0xFFFu) | ((unorm2 & 0xFFFu) << 12u) | ((flags & 0xFFu) << 24u); +} + +fn unpack_24bit_normal(packed: u32) -> vec2 { + let unorm1 = packed & 0xFFFu; + let unorm2 = (packed >> 12u) & 0xFFFu; + return vec2(f32(unorm1) / U12MAXF, f32(unorm2) / U12MAXF); +} + +fn unpack_flags(packed: u32) -> u32 { + return (packed >> 24u) & 0xFFu; +} + +// The builtin one didn't work in webgl. +// "'unpackUnorm4x8' : no matching overloaded function found" +// https://github.com/gfx-rs/naga/issues/2006 +fn unpack_unorm4x8_(v: u32) -> vec4 { + return vec4( + f32(v & 0xFFu), + f32((v >> 8u) & 0xFFu), + f32((v >> 16u) & 0xFFu), + f32((v >> 24u) & 0xFFu) + ) / 255.0; +} + +// 'packUnorm4x8' : no matching overloaded function found +// https://github.com/gfx-rs/naga/issues/2006 +fn pack_unorm4x8_(values: vec4) -> u32 { + let v = vec4(saturate(values) * 255.0 + 0.5); + return (v.w << 24u) | (v.z << 16u) | (v.y << 8u) | v.x; +} + +// Pack 3x 4bit unorm + 1x 20bit +fn pack_unorm3x4_plus_unorm_20_(v: vec4) -> u32 { + let sm = vec3(saturate(v.xyz) * 15.0 + 0.5); + let bg = u32(saturate(v.w) * U20MAXF + 0.5); + return (bg << 12u) | (sm.z << 8u) | (sm.y << 4u) | sm.x; +} + +// Unpack 3x 4bit unorm + 1x 20bit +fn unpack_unorm3x4_plus_unorm_20_(v: u32) -> vec4 { + return vec4( + f32(v & 0xfu) / 15.0, + f32((v >> 4u) & 0xFu) / 15.0, + f32((v >> 8u) & 0xFu) / 15.0, + f32((v >> 12u) & 0xFFFFFFu) / U20MAXF, + ); +} + +``` + +### bevy/crates/bevy_pbr/src/deferred/pbr_deferred_functions + +```rust +#define_import_path bevy_pbr::pbr_deferred_functions + +#import bevy_pbr::{ + pbr_types::{PbrInput, pbr_input_new, STANDARD_MATERIAL_FLAGS_UNLIT_BIT}, + pbr_deferred_types as deferred_types, + pbr_functions, + rgb9e5, + mesh_view_bindings::view, + utils::{octahedral_encode, octahedral_decode}, + prepass_io::FragmentOutput, + view_transformations::{position_ndc_to_world, frag_coord_to_ndc}, +} + +#ifdef MESHLET_MESH_MATERIAL_PASS +#import bevy_pbr::meshlet_visibility_buffer_resolve::VertexOutput +#else +#import bevy_pbr::prepass_io::VertexOutput +#endif + +#ifdef MOTION_VECTOR_PREPASS + #import bevy_pbr::pbr_prepass_functions::calculate_motion_vector +#endif + +// Creates the deferred gbuffer from a PbrInput. +fn deferred_gbuffer_from_pbr_input(in: PbrInput) -> vec4 { + // Only monochrome occlusion supported. May not be worth including at all. + // Some models have baked occlusion, GLTF only supports monochrome. + // Real time occlusion is applied in the deferred lighting pass. + // Deriving luminance via Rec. 709. coefficients + // https://en.wikipedia.org/wiki/Rec._709 + let diffuse_occlusion = dot(in.diffuse_occlusion, vec3(0.2126, 0.7152, 0.0722)); +#ifdef WEBGL2 // More crunched for webgl so we can also fit depth. + var props = deferred_types::pack_unorm3x4_plus_unorm_20_(vec4( + in.material.reflectance, + in.material.metallic, + diffuse_occlusion, + in.frag_coord.z)); +#else + var props = deferred_types::pack_unorm4x8_(vec4( + in.material.reflectance, // could be fewer bits + in.material.metallic, // could be fewer bits + diffuse_occlusion, // is this worth including? + 0.0)); // spare +#endif // WEBGL2 + let flags = deferred_types::deferred_flags_from_mesh_material_flags(in.flags, in.material.flags); + let octahedral_normal = octahedral_encode(normalize(in.N)); + var base_color_srgb = vec3(0.0); + var emissive = in.material.emissive.rgb; + if ((in.material.flags & STANDARD_MATERIAL_FLAGS_UNLIT_BIT) != 0u) { + // Material is unlit, use emissive component of gbuffer for color data. + // Unlit materials are effectively emissive. + emissive = in.material.base_color.rgb; + } else { + base_color_srgb = pow(in.material.base_color.rgb, vec3(1.0 / 2.2)); + } + let deferred = vec4( + deferred_types::pack_unorm4x8_(vec4(base_color_srgb, in.material.perceptual_roughness)), + rgb9e5::vec3_to_rgb9e5_(emissive), + props, + deferred_types::pack_24bit_normal_and_flags(octahedral_normal, flags), + ); + return deferred; +} + +// Creates a PbrInput from the deferred gbuffer. +fn pbr_input_from_deferred_gbuffer(frag_coord: vec4, gbuffer: vec4) -> PbrInput { + var pbr = pbr_input_new(); + + let flags = deferred_types::unpack_flags(gbuffer.a); + let deferred_flags = deferred_types::mesh_material_flags_from_deferred_flags(flags); + pbr.flags = deferred_flags.x; + pbr.material.flags = deferred_flags.y; + + let base_rough = deferred_types::unpack_unorm4x8_(gbuffer.r); + pbr.material.perceptual_roughness = base_rough.a; + let emissive = rgb9e5::rgb9e5_to_vec3_(gbuffer.g); + if ((pbr.material.flags & STANDARD_MATERIAL_FLAGS_UNLIT_BIT) != 0u) { + pbr.material.base_color = vec4(emissive, 1.0); + pbr.material.emissive = vec4(vec3(0.0), 1.0); + } else { + pbr.material.base_color = vec4(pow(base_rough.rgb, vec3(2.2)), 1.0); + pbr.material.emissive = vec4(emissive, 1.0); + } +#ifdef WEBGL2 // More crunched for webgl so we can also fit depth. + let props = deferred_types::unpack_unorm3x4_plus_unorm_20_(gbuffer.b); + // Bias to 0.5 since that's the value for almost all materials. + pbr.material.reflectance = saturate(props.r - 0.03333333333); +#else + let props = deferred_types::unpack_unorm4x8_(gbuffer.b); + pbr.material.reflectance = props.r; +#endif // WEBGL2 + pbr.material.metallic = props.g; + pbr.diffuse_occlusion = vec3(props.b); + let octahedral_normal = deferred_types::unpack_24bit_normal(gbuffer.a); + let N = octahedral_decode(octahedral_normal); + + let world_position = vec4(position_ndc_to_world(frag_coord_to_ndc(frag_coord)), 1.0); + let is_orthographic = view.clip_from_view[3].w == 1.0; + let V = pbr_functions::calculate_view(world_position, is_orthographic); + + pbr.frag_coord = frag_coord; + pbr.world_normal = N; + pbr.world_position = world_position; + pbr.N = N; + pbr.V = V; + pbr.is_orthographic = is_orthographic; + + return pbr; +} + +#ifdef PREPASS_PIPELINE +fn deferred_output(in: VertexOutput, pbr_input: PbrInput) -> FragmentOutput { + var out: FragmentOutput; + + // gbuffer + out.deferred = deferred_gbuffer_from_pbr_input(pbr_input); + // lighting pass id (used to determine which lighting shader to run for the fragment) + out.deferred_lighting_pass_id = pbr_input.material.deferred_lighting_pass_id; + // normal if required +#ifdef NORMAL_PREPASS + out.normal = vec4(in.world_normal * 0.5 + vec3(0.5), 1.0); +#endif + // motion vectors if required +#ifdef MOTION_VECTOR_PREPASS +#ifdef MESHLET_MESH_MATERIAL_PASS + out.motion_vector = in.motion_vector; +#else + out.motion_vector = calculate_motion_vector(in.world_position, in.previous_world_position); +#endif +#endif + + return out; +} +#endif + +``` + +### bevy/crates/bevy_pbr/src/render/pbr_functions + +```rust +#define_import_path bevy_pbr::pbr_functions + +#import bevy_pbr::{ + pbr_types, + pbr_bindings, + mesh_view_bindings as view_bindings, + mesh_view_types, + lighting, + lighting::{LAYER_BASE, LAYER_CLEARCOAT}, + transmission, + clustered_forward as clustering, + shadows, + ambient, + irradiance_volume, + mesh_types::{MESH_FLAGS_SHADOW_RECEIVER_BIT, MESH_FLAGS_TRANSMITTED_SHADOW_RECEIVER_BIT}, +} +#import bevy_render::maths::{E, powsafe} + +#ifdef MESHLET_MESH_MATERIAL_PASS +#import bevy_pbr::meshlet_visibility_buffer_resolve::VertexOutput +#else ifdef PREPASS_PIPELINE +#import bevy_pbr::prepass_io::VertexOutput +#else // PREPASS_PIPELINE +#import bevy_pbr::forward_io::VertexOutput +#endif // PREPASS_PIPELINE + +#ifdef ENVIRONMENT_MAP +#import bevy_pbr::environment_map +#endif + +#ifdef TONEMAP_IN_SHADER +#import bevy_core_pipeline::tonemapping::{tone_mapping, screen_space_dither} +#endif + + +// Biasing info needed to sample from a texture when calling `sample_texture`. +// How this is done depends on whether we're rendering meshlets or regular +// meshes. +struct SampleBias { +#ifdef MESHLET_MESH_MATERIAL_PASS + ddx_uv: vec2, + ddy_uv: vec2, +#else // MESHLET_MESH_MATERIAL_PASS + mip_bias: f32, +#endif // MESHLET_MESH_MATERIAL_PASS +} + +// This is the standard 4x4 ordered dithering pattern from [1]. +// +// We can't use `array, 4>` because they can't be indexed dynamically +// due to Naga limitations. So instead we pack into a single `vec4` and extract +// individual bytes. +// +// [1]: https://en.wikipedia.org/wiki/Ordered_dithering#Threshold_map +const DITHER_THRESHOLD_MAP: vec4 = vec4( + 0x0a020800, + 0x060e040c, + 0x09010b03, + 0x050d070f +); + +// Processes a visibility range dither value and discards the fragment if +// needed. +// +// Visibility ranges, also known as HLODs, are crossfades between different +// levels of detail. +// +// The `dither` value ranges from [-16, 16]. When zooming out, positive values +// are used for meshes that are in the process of disappearing, while negative +// values are used for meshes that are in the process of appearing. In other +// words, when the camera is moving backwards, the `dither` value counts up from +// -16 to 0 when the object is fading in, stays at 0 while the object is +// visible, and then counts up to 16 while the object is fading out. +// Distinguishing between negative and positive values allows the dither +// patterns for different LOD levels of a single mesh to mesh together properly. +#ifdef VISIBILITY_RANGE_DITHER +fn visibility_range_dither(frag_coord: vec4, dither: i32) { + // If `dither` is 0, the object is visible. + if (dither == 0) { + return; + } + + // If `dither` is less than -15 or greater than 15, the object is culled. + if (dither <= -16 || dither >= 16) { + discard; + } + + // Otherwise, check the dither pattern. + let coords = vec2(floor(frag_coord.xy)) % 4u; + let threshold = i32((DITHER_THRESHOLD_MAP[coords.y] >> (coords.x * 8)) & 0xff); + if ((dither >= 0 && dither + threshold >= 16) || (dither < 0 && 1 + dither + threshold <= 0)) { + discard; + } +} +#endif + +fn alpha_discard(material: pbr_types::StandardMaterial, output_color: vec4) -> vec4 { + var color = output_color; + let alpha_mode = material.flags & pbr_types::STANDARD_MATERIAL_FLAGS_ALPHA_MODE_RESERVED_BITS; + if alpha_mode == pbr_types::STANDARD_MATERIAL_FLAGS_ALPHA_MODE_OPAQUE { + // NOTE: If rendering as opaque, alpha should be ignored so set to 1.0 + color.a = 1.0; + } + +#ifdef MAY_DISCARD + // NOTE: `MAY_DISCARD` is only defined in the alpha to coverage case if MSAA + // was off. This special situation causes alpha to coverage to fall back to + // alpha mask. + else if alpha_mode == pbr_types::STANDARD_MATERIAL_FLAGS_ALPHA_MODE_MASK || + alpha_mode == pbr_types::STANDARD_MATERIAL_FLAGS_ALPHA_MODE_ALPHA_TO_COVERAGE { + if color.a >= material.alpha_cutoff { + // NOTE: If rendering as masked alpha and >= the cutoff, render as fully opaque + color.a = 1.0; + } else { + // NOTE: output_color.a < in.material.alpha_cutoff should not be rendered + discard; + } + } +#endif + + return color; +} + +// Samples a texture using the appropriate biasing metric for the type of mesh +// in use (mesh vs. meshlet). +fn sample_texture( + texture: texture_2d, + samp: sampler, + uv: vec2, + bias: SampleBias, +) -> vec4 { +#ifdef MESHLET_MESH_MATERIAL_PASS + return textureSampleGrad(texture, samp, uv, bias.ddx_uv, bias.ddy_uv); +#else + return textureSampleBias(texture, samp, uv, bias.mip_bias); +#endif +} + +fn prepare_world_normal( + world_normal: vec3, + double_sided: bool, + is_front: bool, +) -> vec3 { + var output: vec3 = world_normal; +#ifndef VERTEX_TANGENTS +#ifndef STANDARD_MATERIAL_NORMAL_MAP + // NOTE: When NOT using normal-mapping, if looking at the back face of a double-sided + // material, the normal needs to be inverted. This is a branchless version of that. + output = (f32(!double_sided || is_front) * 2.0 - 1.0) * output; +#endif +#endif + return output; +} + +// Calculates the three TBN vectors according to [mikktspace]. Returns a matrix +// with T, B, N columns in that order. +// +// [mikktspace]: http://www.mikktspace.com/ +fn calculate_tbn_mikktspace(world_normal: vec3, world_tangent: vec4) -> mat3x3 { + // NOTE: The mikktspace method of normal mapping explicitly requires that the world normal NOT + // be re-normalized in the fragment shader. This is primarily to match the way mikktspace + // bakes vertex tangents and normal maps so that this is the exact inverse. Blender, Unity, + // Unreal Engine, Godot, and more all use the mikktspace method. Do not change this code + // unless you really know what you are doing. + // http://www.mikktspace.com/ + var N: vec3 = world_normal; + + // NOTE: The mikktspace method of normal mapping explicitly requires that these NOT be + // normalized nor any Gram-Schmidt applied to ensure the vertex normal is orthogonal to the + // vertex tangent! Do not change this code unless you really know what you are doing. + // http://www.mikktspace.com/ + var T: vec3 = world_tangent.xyz; + var B: vec3 = world_tangent.w * cross(N, T); + + return mat3x3(T, B, N); +} + +fn apply_normal_mapping( + standard_material_flags: u32, + TBN: mat3x3, + double_sided: bool, + is_front: bool, + in_Nt: vec3, +) -> vec3 { + // Unpack the TBN vectors. + var T = TBN[0]; + var B = TBN[1]; + var N = TBN[2]; + + // Nt is the tangent-space normal. + var Nt = in_Nt; + if (standard_material_flags & pbr_types::STANDARD_MATERIAL_FLAGS_TWO_COMPONENT_NORMAL_MAP) != 0u { + // Only use the xy components and derive z for 2-component normal maps. + Nt = vec3(Nt.rg * 2.0 - 1.0, 0.0); + Nt.z = sqrt(1.0 - Nt.x * Nt.x - Nt.y * Nt.y); + } else { + Nt = Nt * 2.0 - 1.0; + } + // Normal maps authored for DirectX require flipping the y component + if (standard_material_flags & pbr_types::STANDARD_MATERIAL_FLAGS_FLIP_NORMAL_MAP_Y) != 0u { + Nt.y = -Nt.y; + } + + if double_sided && !is_front { + Nt = -Nt; + } + + // NOTE: The mikktspace method of normal mapping applies maps the tangent-space normal from + // the normal map texture in this way to be an EXACT inverse of how the normal map baker + // calculates the normal maps so there is no error introduced. Do not change this code + // unless you really know what you are doing. + // http://www.mikktspace.com/ + N = Nt.x * T + Nt.y * B + Nt.z * N; + + return normalize(N); +} + +#ifdef STANDARD_MATERIAL_ANISOTROPY + +// Modifies the normal to achieve a better approximate direction from the +// environment map when using anisotropy. +// +// This follows the suggested implementation in the `KHR_materials_anisotropy` specification: +// https://github.com/KhronosGroup/glTF/blob/main/extensions/2.0/Khronos/KHR_materials_anisotropy/README.md#image-based-lighting +fn bend_normal_for_anisotropy(lighting_input: ptr) { + // Unpack. + let N = (*lighting_input).layers[LAYER_BASE].N; + let roughness = (*lighting_input).layers[LAYER_BASE].roughness; + let V = (*lighting_input).V; + let anisotropy = (*lighting_input).anisotropy; + let Ba = (*lighting_input).Ba; + + var bent_normal = normalize(cross(cross(Ba, V), Ba)); + + // The `KHR_materials_anisotropy` spec states: + // + // > This heuristic can probably be improved upon + let a = pow(2.0, pow(2.0, 1.0 - anisotropy * (1.0 - roughness))); + bent_normal = normalize(mix(bent_normal, N, a)); + + // The `KHR_materials_anisotropy` spec states: + // + // > Mixing the reflection with the normal is more accurate both with and + // > without anisotropy and keeps rough objects from gathering light from + // > behind their tangent plane. + let R = normalize(mix(reflect(-V, bent_normal), bent_normal, roughness * roughness)); + + (*lighting_input).layers[LAYER_BASE].N = bent_normal; + (*lighting_input).layers[LAYER_BASE].R = R; +} + +#endif // STANDARD_MATERIAL_ANISTROPY + +// NOTE: Correctly calculates the view vector depending on whether +// the projection is orthographic or perspective. +fn calculate_view( + world_position: vec4, + is_orthographic: bool, +) -> vec3 { + var V: vec3; + if is_orthographic { + // Orthographic view vector + V = normalize(vec3(view_bindings::view.clip_from_world[0].z, view_bindings::view.clip_from_world[1].z, view_bindings::view.clip_from_world[2].z)); + } else { + // Only valid for a perspective projection + V = normalize(view_bindings::view.world_position.xyz - world_position.xyz); + } + return V; +} + +// Diffuse strength is inversely related to metallicity, specular and diffuse transmission +fn calculate_diffuse_color( + base_color: vec3, + metallic: f32, + specular_transmission: f32, + diffuse_transmission: f32 +) -> vec3 { + return base_color * (1.0 - metallic) * (1.0 - specular_transmission) * + (1.0 - diffuse_transmission); +} + +// Remapping [0,1] reflectance to F0 +// See https://google.github.io/filament/Filament.html#materialsystem/parameterization/remapping +fn calculate_F0(base_color: vec3, metallic: f32, reflectance: f32) -> vec3 { + return 0.16 * reflectance * reflectance * (1.0 - metallic) + base_color * metallic; +} + +#ifndef PREPASS_FRAGMENT +fn apply_pbr_lighting( + in: pbr_types::PbrInput, +) -> vec4 { + var output_color: vec4 = in.material.base_color; + + let emissive = in.material.emissive; + + // calculate non-linear roughness from linear perceptualRoughness + let metallic = in.material.metallic; + let perceptual_roughness = in.material.perceptual_roughness; + let roughness = lighting::perceptualRoughnessToRoughness(perceptual_roughness); + let ior = in.material.ior; + let thickness = in.material.thickness; + let reflectance = in.material.reflectance; + let diffuse_transmission = in.material.diffuse_transmission; + let specular_transmission = in.material.specular_transmission; + + let specular_transmissive_color = specular_transmission * in.material.base_color.rgb; + + let diffuse_occlusion = in.diffuse_occlusion; + let specular_occlusion = in.specular_occlusion; + + // Neubelt and Pettineo 2013, "Crafting a Next-gen Material Pipeline for The Order: 1886" + let NdotV = max(dot(in.N, in.V), 0.0001); + let R = reflect(-in.V, in.N); + +#ifdef STANDARD_MATERIAL_CLEARCOAT + // Do the above calculations again for the clearcoat layer. Remember that + // the clearcoat can have its own roughness and its own normal. + let clearcoat = in.material.clearcoat; + let clearcoat_perceptual_roughness = in.material.clearcoat_perceptual_roughness; + let clearcoat_roughness = lighting::perceptualRoughnessToRoughness(clearcoat_perceptual_roughness); + let clearcoat_N = in.clearcoat_N; + let clearcoat_NdotV = max(dot(clearcoat_N, in.V), 0.0001); + let clearcoat_R = reflect(-in.V, clearcoat_N); +#endif // STANDARD_MATERIAL_CLEARCOAT + + let diffuse_color = calculate_diffuse_color( + output_color.rgb, + metallic, + specular_transmission, + diffuse_transmission + ); + + // Diffuse transmissive strength is inversely related to metallicity and specular transmission, but directly related to diffuse transmission + let diffuse_transmissive_color = output_color.rgb * (1.0 - metallic) * (1.0 - specular_transmission) * diffuse_transmission; + + // Calculate the world position of the second Lambertian lobe used for diffuse transmission, by subtracting material thickness + let diffuse_transmissive_lobe_world_position = in.world_position - vec4(in.world_normal, 0.0) * thickness; + + let F0 = calculate_F0(output_color.rgb, metallic, reflectance); + let F_ab = lighting::F_AB(perceptual_roughness, NdotV); + + var direct_light: vec3 = vec3(0.0); + + // Transmitted Light (Specular and Diffuse) + var transmitted_light: vec3 = vec3(0.0); + + // Pack all the values into a structure. + var lighting_input: lighting::LightingInput; + lighting_input.layers[LAYER_BASE].NdotV = NdotV; + lighting_input.layers[LAYER_BASE].N = in.N; + lighting_input.layers[LAYER_BASE].R = R; + lighting_input.layers[LAYER_BASE].perceptual_roughness = perceptual_roughness; + lighting_input.layers[LAYER_BASE].roughness = roughness; + lighting_input.P = in.world_position.xyz; + lighting_input.V = in.V; + lighting_input.diffuse_color = diffuse_color; + lighting_input.F0_ = F0; + lighting_input.F_ab = F_ab; +#ifdef STANDARD_MATERIAL_CLEARCOAT + lighting_input.layers[LAYER_CLEARCOAT].NdotV = clearcoat_NdotV; + lighting_input.layers[LAYER_CLEARCOAT].N = clearcoat_N; + lighting_input.layers[LAYER_CLEARCOAT].R = clearcoat_R; + lighting_input.layers[LAYER_CLEARCOAT].perceptual_roughness = clearcoat_perceptual_roughness; + lighting_input.layers[LAYER_CLEARCOAT].roughness = clearcoat_roughness; + lighting_input.clearcoat_strength = clearcoat; +#endif // STANDARD_MATERIAL_CLEARCOAT +#ifdef STANDARD_MATERIAL_ANISOTROPY + lighting_input.anisotropy = in.anisotropy_strength; + lighting_input.Ta = in.anisotropy_T; + lighting_input.Ba = in.anisotropy_B; +#endif // STANDARD_MATERIAL_ANISOTROPY + + // And do the same for transmissive if we need to. +#ifdef STANDARD_MATERIAL_DIFFUSE_TRANSMISSION + var transmissive_lighting_input: lighting::LightingInput; + transmissive_lighting_input.layers[LAYER_BASE].NdotV = 1.0; + transmissive_lighting_input.layers[LAYER_BASE].N = -in.N; + transmissive_lighting_input.layers[LAYER_BASE].R = vec3(0.0); + transmissive_lighting_input.layers[LAYER_BASE].perceptual_roughness = 1.0; + transmissive_lighting_input.layers[LAYER_BASE].roughness = 1.0; + transmissive_lighting_input.P = diffuse_transmissive_lobe_world_position.xyz; + transmissive_lighting_input.V = -in.V; + transmissive_lighting_input.diffuse_color = diffuse_transmissive_color; + transmissive_lighting_input.F0_ = vec3(0.0); + transmissive_lighting_input.F_ab = vec2(0.1); +#ifdef STANDARD_MATERIAL_CLEARCOAT + transmissive_lighting_input.layers[LAYER_CLEARCOAT].NdotV = 0.0; + transmissive_lighting_input.layers[LAYER_CLEARCOAT].N = vec3(0.0); + transmissive_lighting_input.layers[LAYER_CLEARCOAT].R = vec3(0.0); + transmissive_lighting_input.layers[LAYER_CLEARCOAT].perceptual_roughness = 0.0; + transmissive_lighting_input.layers[LAYER_CLEARCOAT].roughness = 0.0; + transmissive_lighting_input.clearcoat_strength = 0.0; +#endif // STANDARD_MATERIAL_CLEARCOAT +#ifdef STANDARD_MATERIAL_ANISOTROPY + lighting_input.anisotropy = in.anisotropy_strength; + lighting_input.Ta = in.anisotropy_T; + lighting_input.Ba = in.anisotropy_B; +#endif // STANDARD_MATERIAL_ANISOTROPY +#endif // STANDARD_MATERIAL_DIFFUSE_TRANSMISSION + + let view_z = dot(vec4( + view_bindings::view.view_from_world[0].z, + view_bindings::view.view_from_world[1].z, + view_bindings::view.view_from_world[2].z, + view_bindings::view.view_from_world[3].z + ), in.world_position); + let cluster_index = clustering::fragment_cluster_index(in.frag_coord.xy, view_z, in.is_orthographic); + let offset_and_counts = clustering::unpack_offset_and_counts(cluster_index); + + // Point lights (direct) + for (var i: u32 = offset_and_counts[0]; i < offset_and_counts[0] + offset_and_counts[1]; i = i + 1u) { + let light_id = clustering::get_clusterable_object_id(i); + var shadow: f32 = 1.0; + if ((in.flags & MESH_FLAGS_SHADOW_RECEIVER_BIT) != 0u + && (view_bindings::clusterable_objects.data[light_id].flags & mesh_view_types::POINT_LIGHT_FLAGS_SHADOWS_ENABLED_BIT) != 0u) { + shadow = shadows::fetch_point_shadow(light_id, in.world_position, in.world_normal); + } + + let light_contrib = lighting::point_light(light_id, &lighting_input); + direct_light += light_contrib * shadow; + +#ifdef STANDARD_MATERIAL_DIFFUSE_TRANSMISSION + // NOTE: We use the diffuse transmissive color, the second Lambertian lobe's calculated + // world position, inverted normal and view vectors, and the following simplified + // values for a fully diffuse transmitted light contribution approximation: + // + // roughness = 1.0; + // NdotV = 1.0; + // R = vec3(0.0) // doesn't really matter + // F_ab = vec2(0.1) + // F0 = vec3(0.0) + var transmitted_shadow: f32 = 1.0; + if ((in.flags & (MESH_FLAGS_SHADOW_RECEIVER_BIT | MESH_FLAGS_TRANSMITTED_SHADOW_RECEIVER_BIT)) == (MESH_FLAGS_SHADOW_RECEIVER_BIT | MESH_FLAGS_TRANSMITTED_SHADOW_RECEIVER_BIT) + && (view_bindings::clusterable_objects.data[light_id].flags & mesh_view_types::POINT_LIGHT_FLAGS_SHADOWS_ENABLED_BIT) != 0u) { + transmitted_shadow = shadows::fetch_point_shadow(light_id, diffuse_transmissive_lobe_world_position, -in.world_normal); + } + + let transmitted_light_contrib = + lighting::point_light(light_id, &transmissive_lighting_input); + transmitted_light += transmitted_light_contrib * transmitted_shadow; +#endif + } + + // Spot lights (direct) + for (var i: u32 = offset_and_counts[0] + offset_and_counts[1]; i < offset_and_counts[0] + offset_and_counts[1] + offset_and_counts[2]; i = i + 1u) { + let light_id = clustering::get_clusterable_object_id(i); + + var shadow: f32 = 1.0; + if ((in.flags & MESH_FLAGS_SHADOW_RECEIVER_BIT) != 0u + && (view_bindings::clusterable_objects.data[light_id].flags & mesh_view_types::POINT_LIGHT_FLAGS_SHADOWS_ENABLED_BIT) != 0u) { + shadow = shadows::fetch_spot_shadow(light_id, in.world_position, in.world_normal); + } + + let light_contrib = lighting::spot_light(light_id, &lighting_input); + direct_light += light_contrib * shadow; + +#ifdef STANDARD_MATERIAL_DIFFUSE_TRANSMISSION + // NOTE: We use the diffuse transmissive color, the second Lambertian lobe's calculated + // world position, inverted normal and view vectors, and the following simplified + // values for a fully diffuse transmitted light contribution approximation: + // + // roughness = 1.0; + // NdotV = 1.0; + // R = vec3(0.0) // doesn't really matter + // F_ab = vec2(0.1) + // F0 = vec3(0.0) + var transmitted_shadow: f32 = 1.0; + if ((in.flags & (MESH_FLAGS_SHADOW_RECEIVER_BIT | MESH_FLAGS_TRANSMITTED_SHADOW_RECEIVER_BIT)) == (MESH_FLAGS_SHADOW_RECEIVER_BIT | MESH_FLAGS_TRANSMITTED_SHADOW_RECEIVER_BIT) + && (view_bindings::clusterable_objects.data[light_id].flags & mesh_view_types::POINT_LIGHT_FLAGS_SHADOWS_ENABLED_BIT) != 0u) { + transmitted_shadow = shadows::fetch_spot_shadow(light_id, diffuse_transmissive_lobe_world_position, -in.world_normal); + } + + let transmitted_light_contrib = + lighting::spot_light(light_id, &transmissive_lighting_input); + transmitted_light += transmitted_light_contrib * transmitted_shadow; +#endif + } + + // directional lights (direct) + let n_directional_lights = view_bindings::lights.n_directional_lights; + for (var i: u32 = 0u; i < n_directional_lights; i = i + 1u) { + // check if this light should be skipped, which occurs if this light does not intersect with the view + // note point and spot lights aren't skippable, as the relevant lights are filtered in `assign_lights_to_clusters` + let light = &view_bindings::lights.directional_lights[i]; + if (*light).skip != 0u { + continue; + } + + var shadow: f32 = 1.0; + if ((in.flags & MESH_FLAGS_SHADOW_RECEIVER_BIT) != 0u + && (view_bindings::lights.directional_lights[i].flags & mesh_view_types::DIRECTIONAL_LIGHT_FLAGS_SHADOWS_ENABLED_BIT) != 0u) { + shadow = shadows::fetch_directional_shadow(i, in.world_position, in.world_normal, view_z); + } + + var light_contrib = lighting::directional_light(i, &lighting_input); + +#ifdef DIRECTIONAL_LIGHT_SHADOW_MAP_DEBUG_CASCADES + light_contrib = shadows::cascade_debug_visualization(light_contrib, i, view_z); +#endif + direct_light += light_contrib * shadow; + +#ifdef STANDARD_MATERIAL_DIFFUSE_TRANSMISSION + // NOTE: We use the diffuse transmissive color, the second Lambertian lobe's calculated + // world position, inverted normal and view vectors, and the following simplified + // values for a fully diffuse transmitted light contribution approximation: + // + // roughness = 1.0; + // NdotV = 1.0; + // R = vec3(0.0) // doesn't really matter + // F_ab = vec2(0.1) + // F0 = vec3(0.0) + var transmitted_shadow: f32 = 1.0; + if ((in.flags & (MESH_FLAGS_SHADOW_RECEIVER_BIT | MESH_FLAGS_TRANSMITTED_SHADOW_RECEIVER_BIT)) == (MESH_FLAGS_SHADOW_RECEIVER_BIT | MESH_FLAGS_TRANSMITTED_SHADOW_RECEIVER_BIT) + && (view_bindings::lights.directional_lights[i].flags & mesh_view_types::DIRECTIONAL_LIGHT_FLAGS_SHADOWS_ENABLED_BIT) != 0u) { + transmitted_shadow = shadows::fetch_directional_shadow(i, diffuse_transmissive_lobe_world_position, -in.world_normal, view_z); + } + + let transmitted_light_contrib = + lighting::directional_light(i, &transmissive_lighting_input); + transmitted_light += transmitted_light_contrib * transmitted_shadow; +#endif + } + +#ifdef STANDARD_MATERIAL_DIFFUSE_TRANSMISSION + // NOTE: We use the diffuse transmissive color, the second Lambertian lobe's calculated + // world position, inverted normal and view vectors, and the following simplified + // values for a fully diffuse transmitted light contribution approximation: + // + // perceptual_roughness = 1.0; + // NdotV = 1.0; + // F0 = vec3(0.0) + // diffuse_occlusion = vec3(1.0) + transmitted_light += ambient::ambient_light(diffuse_transmissive_lobe_world_position, -in.N, -in.V, 1.0, diffuse_transmissive_color, vec3(0.0), 1.0, vec3(1.0)); +#endif + + // Diffuse indirect lighting can come from a variety of sources. The + // priority goes like this: + // + // 1. Lightmap (highest) + // 2. Irradiance volume + // 3. Environment map (lowest) + // + // When we find a source of diffuse indirect lighting, we stop accumulating + // any more diffuse indirect light. This avoids double-counting if, for + // example, both lightmaps and irradiance volumes are present. + + var indirect_light = vec3(0.0f); + +#ifdef LIGHTMAP + if (all(indirect_light == vec3(0.0f))) { + indirect_light += in.lightmap_light * diffuse_color; + } +#endif + +#ifdef IRRADIANCE_VOLUME { + // Irradiance volume light (indirect) + if (all(indirect_light == vec3(0.0f))) { + let irradiance_volume_light = irradiance_volume::irradiance_volume_light( + in.world_position.xyz, in.N); + indirect_light += irradiance_volume_light * diffuse_color * diffuse_occlusion; + } +#endif + + // Environment map light (indirect) +#ifdef ENVIRONMENT_MAP + +#ifdef STANDARD_MATERIAL_ANISOTROPY + var bent_normal_lighting_input = lighting_input; + bend_normal_for_anisotropy(&bent_normal_lighting_input); + let environment_map_lighting_input = &bent_normal_lighting_input; +#else // STANDARD_MATERIAL_ANISOTROPY + let environment_map_lighting_input = &lighting_input; +#endif // STANDARD_MATERIAL_ANISOTROPY + + let environment_light = environment_map::environment_map_light( + environment_map_lighting_input, + any(indirect_light != vec3(0.0f)) + ); + + // If screen space reflections are going to be used for this material, don't + // accumulate environment map light yet. The SSR shader will do it. +#ifdef SCREEN_SPACE_REFLECTIONS + let use_ssr = perceptual_roughness <= + view_bindings::ssr_settings.perceptual_roughness_threshold; +#else // SCREEN_SPACE_REFLECTIONS + let use_ssr = false; +#endif // SCREEN_SPACE_REFLECTIONS + + if (!use_ssr) { + let environment_light = environment_map::environment_map_light( + &lighting_input, + any(indirect_light != vec3(0.0f)) + ); + + indirect_light += environment_light.diffuse * diffuse_occlusion + + environment_light.specular * specular_occlusion; + } + +#endif // ENVIRONMENT_MAP + + // Ambient light (indirect) + indirect_light += ambient::ambient_light(in.world_position, in.N, in.V, NdotV, diffuse_color, F0, perceptual_roughness, diffuse_occlusion); + + // we'll use the specular component of the transmitted environment + // light in the call to `specular_transmissive_light()` below + var specular_transmitted_environment_light = vec3(0.0); + +#ifdef ENVIRONMENT_MAP + +#ifdef STANDARD_MATERIAL_DIFFUSE_OR_SPECULAR_TRANSMISSION + // NOTE: We use the diffuse transmissive color, inverted normal and view vectors, + // and the following simplified values for the transmitted environment light contribution + // approximation: + // + // diffuse_color = vec3(1.0) // later we use `diffuse_transmissive_color` and `specular_transmissive_color` + // NdotV = 1.0; + // R = T // see definition below + // F0 = vec3(1.0) + // diffuse_occlusion = 1.0 + // + // (This one is slightly different from the other light types above, because the environment + // map light returns both diffuse and specular components separately, and we want to use both) + + let T = -normalize( + in.V + // start with view vector at entry point + refract(in.V, -in.N, 1.0 / ior) * thickness // add refracted vector scaled by thickness, towards exit point + ); // normalize to find exit point view vector + + var transmissive_environment_light_input: lighting::LightingInput; + transmissive_environment_light_input.diffuse_color = vec3(1.0); + transmissive_environment_light_input.layers[LAYER_BASE].NdotV = 1.0; + transmissive_environment_light_input.P = in.world_position.xyz; + transmissive_environment_light_input.layers[LAYER_BASE].N = -in.N; + transmissive_environment_light_input.V = in.V; + transmissive_environment_light_input.layers[LAYER_BASE].R = T; + transmissive_environment_light_input.layers[LAYER_BASE].perceptual_roughness = perceptual_roughness; + transmissive_environment_light_input.layers[LAYER_BASE].roughness = roughness; + transmissive_environment_light_input.F0_ = vec3(1.0); + transmissive_environment_light_input.F_ab = vec2(0.1); +#ifdef STANDARD_MATERIAL_CLEARCOAT + // No clearcoat. + transmissive_environment_light_input.clearcoat_strength = 0.0; + transmissive_environment_light_input.layers[LAYER_CLEARCOAT].NdotV = 0.0; + transmissive_environment_light_input.layers[LAYER_CLEARCOAT].N = in.N; + transmissive_environment_light_input.layers[LAYER_CLEARCOAT].R = vec3(0.0); + transmissive_environment_light_input.layers[LAYER_CLEARCOAT].perceptual_roughness = 0.0; + transmissive_environment_light_input.layers[LAYER_CLEARCOAT].roughness = 0.0; +#endif // STANDARD_MATERIAL_CLEARCOAT + + let transmitted_environment_light = + environment_map::environment_map_light(&transmissive_environment_light_input, false); + +#ifdef STANDARD_MATERIAL_DIFFUSE_TRANSMISSION + transmitted_light += transmitted_environment_light.diffuse * diffuse_transmissive_color; +#endif // STANDARD_MATERIAL_DIFFUSE_TRANSMISSION +#ifdef STANDARD_MATERIAL_SPECULAR_TRANSMISSION + specular_transmitted_environment_light = transmitted_environment_light.specular * specular_transmissive_color; +#endif // STANDARD_MATERIAL_SPECULAR_TRANSMISSION + +#endif // STANDARD_MATERIAL_SPECULAR_OR_DIFFUSE_TRANSMISSION + +#endif // ENVIRONMENT_MAP + + var emissive_light = emissive.rgb * output_color.a; + + // "The clearcoat layer is on top of emission in the layering stack. + // Consequently, the emission is darkened by the Fresnel term." + // + // +#ifdef STANDARD_MATERIAL_CLEARCOAT + emissive_light = emissive_light * (0.04 + (1.0 - 0.04) * pow(1.0 - clearcoat_NdotV, 5.0)); +#endif + + emissive_light = emissive_light * mix(1.0, view_bindings::view.exposure, emissive.a); + +#ifdef STANDARD_MATERIAL_SPECULAR_TRANSMISSION + transmitted_light += transmission::specular_transmissive_light(in.world_position, in.frag_coord.xyz, view_z, in.N, in.V, F0, ior, thickness, perceptual_roughness, specular_transmissive_color, specular_transmitted_environment_light).rgb; + + if (in.material.flags & pbr_types::STANDARD_MATERIAL_FLAGS_ATTENUATION_ENABLED_BIT) != 0u { + // We reuse the `atmospheric_fog()` function here, as it's fundamentally + // equivalent to the attenuation that takes place inside the material volume, + // and will allow us to eventually hook up subsurface scattering more easily + var attenuation_fog: mesh_view_types::Fog; + attenuation_fog.base_color.a = 1.0; + attenuation_fog.be = pow(1.0 - in.material.attenuation_color.rgb, vec3(E)) / in.material.attenuation_distance; + // TODO: Add the subsurface scattering factor below + // attenuation_fog.bi = /* ... */ + transmitted_light = bevy_pbr::fog::atmospheric_fog( + attenuation_fog, vec4(transmitted_light, 1.0), thickness, + vec3(0.0) // TODO: Pass in (pre-attenuated) scattered light contribution here + ).rgb; + } +#endif + + // Total light + output_color = vec4( + (view_bindings::view.exposure * (transmitted_light + direct_light + indirect_light)) + emissive_light, + output_color.a + ); + + output_color = clustering::cluster_debug_visualization( + output_color, + view_z, + in.is_orthographic, + offset_and_counts, + cluster_index, + ); + + return output_color; +} +#endif // PREPASS_FRAGMENT + +fn apply_fog(fog_params: mesh_view_types::Fog, input_color: vec4, fragment_world_position: vec3, view_world_position: vec3) -> vec4 { + let view_to_world = fragment_world_position.xyz - view_world_position.xyz; + + // `length()` is used here instead of just `view_to_world.z` since that produces more + // high quality results, especially for denser/smaller fogs. we get a "curved" + // fog shape that remains consistent with camera rotation, instead of a "linear" + // fog shape that looks a bit fake + let distance = length(view_to_world); + + var scattering = vec3(0.0); + if fog_params.directional_light_color.a > 0.0 { + let view_to_world_normalized = view_to_world / distance; + let n_directional_lights = view_bindings::lights.n_directional_lights; + for (var i: u32 = 0u; i < n_directional_lights; i = i + 1u) { + let light = view_bindings::lights.directional_lights[i]; + scattering += pow( + max( + dot(view_to_world_normalized, light.direction_to_light), + 0.0 + ), + fog_params.directional_light_exponent + ) * light.color.rgb * view_bindings::view.exposure; + } + } + + if fog_params.mode == mesh_view_types::FOG_MODE_LINEAR { + return bevy_pbr::fog::linear_fog(fog_params, input_color, distance, scattering); + } else if fog_params.mode == mesh_view_types::FOG_MODE_EXPONENTIAL { + return bevy_pbr::fog::exponential_fog(fog_params, input_color, distance, scattering); + } else if fog_params.mode == mesh_view_types::FOG_MODE_EXPONENTIAL_SQUARED { + return bevy_pbr::fog::exponential_squared_fog(fog_params, input_color, distance, scattering); + } else if fog_params.mode == mesh_view_types::FOG_MODE_ATMOSPHERIC { + return bevy_pbr::fog::atmospheric_fog(fog_params, input_color, distance, scattering); + } else { + return input_color; + } +} + +#ifdef PREMULTIPLY_ALPHA +fn premultiply_alpha(standard_material_flags: u32, color: vec4) -> vec4 { +// `Blend`, `Premultiplied` and `Alpha` all share the same `BlendState`. Depending +// on the alpha mode, we premultiply the color channels by the alpha channel value, +// (and also optionally replace the alpha value with 0.0) so that the result produces +// the desired blend mode when sent to the blending operation. +#ifdef BLEND_PREMULTIPLIED_ALPHA + // For `BlendState::PREMULTIPLIED_ALPHA_BLENDING` the blend function is: + // + // result = 1 * src_color + (1 - src_alpha) * dst_color + let alpha_mode = standard_material_flags & pbr_types::STANDARD_MATERIAL_FLAGS_ALPHA_MODE_RESERVED_BITS; + if alpha_mode == pbr_types::STANDARD_MATERIAL_FLAGS_ALPHA_MODE_ADD { + // Here, we premultiply `src_color` by `src_alpha`, and replace `src_alpha` with 0.0: + // + // src_color *= src_alpha + // src_alpha = 0.0 + // + // We end up with: + // + // result = 1 * (src_alpha * src_color) + (1 - 0) * dst_color + // result = src_alpha * src_color + 1 * dst_color + // + // Which is the blend operation for additive blending + return vec4(color.rgb * color.a, 0.0); + } else { + // Here, we don't do anything, so that we get premultiplied alpha blending. (As expected) + return color.rgba; + } +#endif +// `Multiply` uses its own `BlendState`, but we still need to premultiply here in the +// shader so that we get correct results as we tweak the alpha channel +#ifdef BLEND_MULTIPLY + // The blend function is: + // + // result = dst_color * src_color + (1 - src_alpha) * dst_color + // + // We premultiply `src_color` by `src_alpha`: + // + // src_color *= src_alpha + // + // We end up with: + // + // result = dst_color * (src_color * src_alpha) + (1 - src_alpha) * dst_color + // result = src_alpha * (src_color * dst_color) + (1 - src_alpha) * dst_color + // + // Which is the blend operation for multiplicative blending with arbitrary mixing + // controlled by the source alpha channel + return vec4(color.rgb * color.a, color.a); +#endif +} +#endif + +// fog, alpha premultiply +// for non-hdr cameras, tonemapping and debanding +fn main_pass_post_lighting_processing( + pbr_input: pbr_types::PbrInput, + input_color: vec4, +) -> vec4 { + var output_color = input_color; + + // fog + if (view_bindings::fog.mode != mesh_view_types::FOG_MODE_OFF && (pbr_input.material.flags & pbr_types::STANDARD_MATERIAL_FLAGS_FOG_ENABLED_BIT) != 0u) { + output_color = apply_fog(view_bindings::fog, output_color, pbr_input.world_position.xyz, view_bindings::view.world_position.xyz); + } + +#ifdef TONEMAP_IN_SHADER + output_color = tone_mapping(output_color, view_bindings::view.color_grading); +#ifdef DEBAND_DITHER + var output_rgb = output_color.rgb; + output_rgb = powsafe(output_rgb, 1.0 / 2.2); + output_rgb += screen_space_dither(pbr_input.frag_coord.xy); + // This conversion back to linear space is required because our output texture format is + // SRGB; the GPU will assume our output is linear and will apply an SRGB conversion. + output_rgb = powsafe(output_rgb, 2.2); + output_color = vec4(output_rgb, output_color.a); +#endif +#endif +#ifdef PREMULTIPLY_ALPHA + output_color = premultiply_alpha(pbr_input.material.flags, output_color); +#endif + return output_color; +} + +``` + +### bevy/crates/bevy_pbr/src/render/morph + +```rust +#define_import_path bevy_pbr::morph + +#ifdef MORPH_TARGETS + +#import bevy_pbr::mesh_types::MorphWeights; + +@group(1) @binding(2) var morph_weights: MorphWeights; +@group(1) @binding(3) var morph_targets: texture_3d; +@group(1) @binding(7) var prev_morph_weights: MorphWeights; + +// NOTE: Those are the "hardcoded" values found in `MorphAttributes` struct +// in crates/bevy_render/src/mesh/morph/visitors.rs +// In an ideal world, the offsets are established dynamically and passed as #defines +// to the shader, but it's out of scope for the initial implementation of morph targets. +const position_offset: u32 = 0u; +const normal_offset: u32 = 3u; +const tangent_offset: u32 = 6u; +const total_component_count: u32 = 9u; + +fn layer_count() -> u32 { + let dimensions = textureDimensions(morph_targets); + return u32(dimensions.z); +} +fn component_texture_coord(vertex_index: u32, component_offset: u32) -> vec2 { + let width = u32(textureDimensions(morph_targets).x); + let component_index = total_component_count * vertex_index + component_offset; + return vec2(component_index % width, component_index / width); +} +fn weight_at(weight_index: u32) -> f32 { + let i = weight_index; + return morph_weights.weights[i / 4u][i % 4u]; +} +fn prev_weight_at(weight_index: u32) -> f32 { + let i = weight_index; + return prev_morph_weights.weights[i / 4u][i % 4u]; +} +fn morph_pixel(vertex: u32, component: u32, weight: u32) -> f32 { + let coord = component_texture_coord(vertex, component); + // Due to https://gpuweb.github.io/gpuweb/wgsl/#texel-formats + // While the texture stores a f32, the textureLoad returns a vec4<>, where + // only the first component is set. + return textureLoad(morph_targets, vec3(coord, weight), 0).r; +} +fn morph(vertex_index: u32, component_offset: u32, weight_index: u32) -> vec3 { + return vec3( + morph_pixel(vertex_index, component_offset, weight_index), + morph_pixel(vertex_index, component_offset + 1u, weight_index), + morph_pixel(vertex_index, component_offset + 2u, weight_index), + ); +} + +#endif // MORPH_TARGETS + +``` + +### bevy/crates/bevy_pbr/src/render/shadow_sampling + +```rust +#define_import_path bevy_pbr::shadow_sampling + +#import bevy_pbr::{ + mesh_view_bindings as view_bindings, + utils::interleaved_gradient_noise, + utils, +} +#import bevy_render::maths::{orthonormalize, PI} + +// Do the lookup, using HW 2x2 PCF and comparison +fn sample_shadow_map_hardware(light_local: vec2, depth: f32, array_index: i32) -> f32 { +#ifdef NO_ARRAY_TEXTURES_SUPPORT + return textureSampleCompare( + view_bindings::directional_shadow_textures, + view_bindings::directional_shadow_textures_sampler, + light_local, + depth, + ); +#else + return textureSampleCompareLevel( + view_bindings::directional_shadow_textures, + view_bindings::directional_shadow_textures_sampler, + light_local, + array_index, + depth, + ); +#endif +} + +// Numbers determined by trial and error that gave nice results. +const SPOT_SHADOW_TEXEL_SIZE: f32 = 0.0134277345; +const POINT_SHADOW_SCALE: f32 = 0.003; +const POINT_SHADOW_TEMPORAL_OFFSET_SCALE: f32 = 0.5; + +// These are the standard MSAA sample point positions from D3D. They were chosen +// to get a reasonable distribution that's not too regular. +// +// https://learn.microsoft.com/en-us/windows/win32/api/d3d11/ne-d3d11-d3d11_standard_multisample_quality_levels?redirectedfrom=MSDN +const D3D_SAMPLE_POINT_POSITIONS: array, 8> = array( + vec2( 0.125, -0.375), + vec2(-0.125, 0.375), + vec2( 0.625, 0.125), + vec2(-0.375, -0.625), + vec2(-0.625, 0.625), + vec2(-0.875, -0.125), + vec2( 0.375, 0.875), + vec2( 0.875, -0.875), +); + +// And these are the coefficients corresponding to the probability distribution +// function of a 2D Gaussian lobe with zero mean and the identity covariance +// matrix at those points. +const D3D_SAMPLE_POINT_COEFFS: array = array( + 0.157112, + 0.157112, + 0.138651, + 0.130251, + 0.114946, + 0.114946, + 0.107982, + 0.079001, +); + +// https://web.archive.org/web/20230210095515/http://the-witness.net/news/2013/09/shadow-mapping-summary-part-1 +fn sample_shadow_map_castano_thirteen(light_local: vec2, depth: f32, array_index: i32) -> f32 { + let shadow_map_size = vec2(textureDimensions(view_bindings::directional_shadow_textures)); + let inv_shadow_map_size = 1.0 / shadow_map_size; + + let uv = light_local * shadow_map_size; + var base_uv = floor(uv + 0.5); + let s = (uv.x + 0.5 - base_uv.x); + let t = (uv.y + 0.5 - base_uv.y); + base_uv -= 0.5; + base_uv *= inv_shadow_map_size; + + let uw0 = (4.0 - 3.0 * s); + let uw1 = 7.0; + let uw2 = (1.0 + 3.0 * s); + + let u0 = (3.0 - 2.0 * s) / uw0 - 2.0; + let u1 = (3.0 + s) / uw1; + let u2 = s / uw2 + 2.0; + + let vw0 = (4.0 - 3.0 * t); + let vw1 = 7.0; + let vw2 = (1.0 + 3.0 * t); + + let v0 = (3.0 - 2.0 * t) / vw0 - 2.0; + let v1 = (3.0 + t) / vw1; + let v2 = t / vw2 + 2.0; + + var sum = 0.0; + + sum += uw0 * vw0 * sample_shadow_map_hardware(base_uv + (vec2(u0, v0) * inv_shadow_map_size), depth, array_index); + sum += uw1 * vw0 * sample_shadow_map_hardware(base_uv + (vec2(u1, v0) * inv_shadow_map_size), depth, array_index); + sum += uw2 * vw0 * sample_shadow_map_hardware(base_uv + (vec2(u2, v0) * inv_shadow_map_size), depth, array_index); + + sum += uw0 * vw1 * sample_shadow_map_hardware(base_uv + (vec2(u0, v1) * inv_shadow_map_size), depth, array_index); + sum += uw1 * vw1 * sample_shadow_map_hardware(base_uv + (vec2(u1, v1) * inv_shadow_map_size), depth, array_index); + sum += uw2 * vw1 * sample_shadow_map_hardware(base_uv + (vec2(u2, v1) * inv_shadow_map_size), depth, array_index); + + sum += uw0 * vw2 * sample_shadow_map_hardware(base_uv + (vec2(u0, v2) * inv_shadow_map_size), depth, array_index); + sum += uw1 * vw2 * sample_shadow_map_hardware(base_uv + (vec2(u1, v2) * inv_shadow_map_size), depth, array_index); + sum += uw2 * vw2 * sample_shadow_map_hardware(base_uv + (vec2(u2, v2) * inv_shadow_map_size), depth, array_index); + + return sum * (1.0 / 144.0); +} + +fn map(min1: f32, max1: f32, min2: f32, max2: f32, value: f32) -> f32 { + return min2 + (value - min1) * (max2 - min2) / (max1 - min1); +} + +// Creates a random rotation matrix using interleaved gradient noise. +// +// See: https://www.iryoku.com/next-generation-post-processing-in-call-of-duty-advanced-warfare/ +fn random_rotation_matrix(scale: vec2) -> mat2x2 { + let random_angle = 2.0 * PI * interleaved_gradient_noise( + scale, view_bindings::globals.frame_count); + let m = vec2(sin(random_angle), cos(random_angle)); + return mat2x2( + m.y, -m.x, + m.x, m.y + ); +} + +fn sample_shadow_map_jimenez_fourteen(light_local: vec2, depth: f32, array_index: i32, texel_size: f32) -> f32 { + let shadow_map_size = vec2(textureDimensions(view_bindings::directional_shadow_textures)); + let rotation_matrix = random_rotation_matrix(light_local * shadow_map_size); + + // Empirically chosen fudge factor to make PCF look better across different CSM cascades + let f = map(0.00390625, 0.022949219, 0.015, 0.035, texel_size); + let uv_offset_scale = f / (texel_size * shadow_map_size); + + // https://www.iryoku.com/next-generation-post-processing-in-call-of-duty-advanced-warfare (slides 120-135) + let sample_offset0 = (rotation_matrix * utils::SPIRAL_OFFSET_0_) * uv_offset_scale; + let sample_offset1 = (rotation_matrix * utils::SPIRAL_OFFSET_1_) * uv_offset_scale; + let sample_offset2 = (rotation_matrix * utils::SPIRAL_OFFSET_2_) * uv_offset_scale; + let sample_offset3 = (rotation_matrix * utils::SPIRAL_OFFSET_3_) * uv_offset_scale; + let sample_offset4 = (rotation_matrix * utils::SPIRAL_OFFSET_4_) * uv_offset_scale; + let sample_offset5 = (rotation_matrix * utils::SPIRAL_OFFSET_5_) * uv_offset_scale; + let sample_offset6 = (rotation_matrix * utils::SPIRAL_OFFSET_6_) * uv_offset_scale; + let sample_offset7 = (rotation_matrix * utils::SPIRAL_OFFSET_7_) * uv_offset_scale; + + var sum = 0.0; + sum += sample_shadow_map_hardware(light_local + sample_offset0, depth, array_index); + sum += sample_shadow_map_hardware(light_local + sample_offset1, depth, array_index); + sum += sample_shadow_map_hardware(light_local + sample_offset2, depth, array_index); + sum += sample_shadow_map_hardware(light_local + sample_offset3, depth, array_index); + sum += sample_shadow_map_hardware(light_local + sample_offset4, depth, array_index); + sum += sample_shadow_map_hardware(light_local + sample_offset5, depth, array_index); + sum += sample_shadow_map_hardware(light_local + sample_offset6, depth, array_index); + sum += sample_shadow_map_hardware(light_local + sample_offset7, depth, array_index); + return sum / 8.0; +} + +fn sample_shadow_map(light_local: vec2, depth: f32, array_index: i32, texel_size: f32) -> f32 { +#ifdef SHADOW_FILTER_METHOD_GAUSSIAN + return sample_shadow_map_castano_thirteen(light_local, depth, array_index); +#else ifdef SHADOW_FILTER_METHOD_TEMPORAL + return sample_shadow_map_jimenez_fourteen(light_local, depth, array_index, texel_size); +#else ifdef SHADOW_FILTER_METHOD_HARDWARE_2X2 + return sample_shadow_map_hardware(light_local, depth, array_index); +#else + // This needs a default return value to avoid shader compilation errors if it's compiled with no SHADOW_FILTER_METHOD_* defined. + // (eg. if the normal prepass is enabled it ends up compiling this due to the normal prepass depending on pbr_functions, which depends on shadows) + // This should never actually get used, as anyone using bevy's lighting/shadows should always have a SHADOW_FILTER_METHOD defined. + // Set to 0 to make it obvious that something is wrong. + return 0.0; +#endif +} + +// NOTE: Due to the non-uniform control flow in `shadows::fetch_point_shadow`, +// we must use the Level variant of textureSampleCompare to avoid undefined +// behavior due to some of the fragments in a quad (2x2 fragments) being +// processed not being sampled, and this messing with mip-mapping functionality. +// The shadow maps have no mipmaps so Level just samples from LOD 0. +fn sample_shadow_cubemap_hardware(light_local: vec3, depth: f32, light_id: u32) -> f32 { +#ifdef NO_CUBE_ARRAY_TEXTURES_SUPPORT + return textureSampleCompare(view_bindings::point_shadow_textures, view_bindings::point_shadow_textures_sampler, light_local, depth); +#else + return textureSampleCompareLevel(view_bindings::point_shadow_textures, view_bindings::point_shadow_textures_sampler, light_local, i32(light_id), depth); +#endif +} + +fn sample_shadow_cubemap_at_offset( + position: vec2, + coeff: f32, + x_basis: vec3, + y_basis: vec3, + light_local: vec3, + depth: f32, + light_id: u32, +) -> f32 { + return sample_shadow_cubemap_hardware( + light_local + position.x * x_basis + position.y * y_basis, + depth, + light_id + ) * coeff; +} + +// This more or less does what Castano13 does, but in 3D space. Castano13 is +// essentially an optimized 2D Gaussian filter that takes advantage of the +// bilinear filtering hardware to reduce the number of samples needed. This +// trick doesn't apply to cubemaps, so we manually apply a Gaussian filter over +// the standard 8xMSAA pattern instead. +fn sample_shadow_cubemap_gaussian( + light_local: vec3, + depth: f32, + scale: f32, + distance_to_light: f32, + light_id: u32, +) -> f32 { + // Create an orthonormal basis so we can apply a 2D sampling pattern to a + // cubemap. + var up = vec3(0.0, 1.0, 0.0); + if (dot(up, normalize(light_local)) > 0.99) { + up = vec3(1.0, 0.0, 0.0); // Avoid creating a degenerate basis. + } + let basis = orthonormalize(light_local, up) * scale * distance_to_light; + + var sum: f32 = 0.0; + sum += sample_shadow_cubemap_at_offset( + D3D_SAMPLE_POINT_POSITIONS[0], D3D_SAMPLE_POINT_COEFFS[0], + basis[0], basis[1], light_local, depth, light_id); + sum += sample_shadow_cubemap_at_offset( + D3D_SAMPLE_POINT_POSITIONS[1], D3D_SAMPLE_POINT_COEFFS[1], + basis[0], basis[1], light_local, depth, light_id); + sum += sample_shadow_cubemap_at_offset( + D3D_SAMPLE_POINT_POSITIONS[2], D3D_SAMPLE_POINT_COEFFS[2], + basis[0], basis[1], light_local, depth, light_id); + sum += sample_shadow_cubemap_at_offset( + D3D_SAMPLE_POINT_POSITIONS[3], D3D_SAMPLE_POINT_COEFFS[3], + basis[0], basis[1], light_local, depth, light_id); + sum += sample_shadow_cubemap_at_offset( + D3D_SAMPLE_POINT_POSITIONS[4], D3D_SAMPLE_POINT_COEFFS[4], + basis[0], basis[1], light_local, depth, light_id); + sum += sample_shadow_cubemap_at_offset( + D3D_SAMPLE_POINT_POSITIONS[5], D3D_SAMPLE_POINT_COEFFS[5], + basis[0], basis[1], light_local, depth, light_id); + sum += sample_shadow_cubemap_at_offset( + D3D_SAMPLE_POINT_POSITIONS[6], D3D_SAMPLE_POINT_COEFFS[6], + basis[0], basis[1], light_local, depth, light_id); + sum += sample_shadow_cubemap_at_offset( + D3D_SAMPLE_POINT_POSITIONS[7], D3D_SAMPLE_POINT_COEFFS[7], + basis[0], basis[1], light_local, depth, light_id); + return sum; +} + +// This is a port of the Jimenez14 filter above to the 3D space. It jitters the +// points in the spiral pattern after first creating a 2D orthonormal basis +// along the principal light direction. +fn sample_shadow_cubemap_temporal( + light_local: vec3, + depth: f32, + scale: f32, + distance_to_light: f32, + light_id: u32, +) -> f32 { + // Create an orthonormal basis so we can apply a 2D sampling pattern to a + // cubemap. + var up = vec3(0.0, 1.0, 0.0); + if (dot(up, normalize(light_local)) > 0.99) { + up = vec3(1.0, 0.0, 0.0); // Avoid creating a degenerate basis. + } + let basis = orthonormalize(light_local, up) * scale * distance_to_light; + + let rotation_matrix = random_rotation_matrix(vec2(1.0)); + + let sample_offset0 = rotation_matrix * utils::SPIRAL_OFFSET_0_ * + POINT_SHADOW_TEMPORAL_OFFSET_SCALE; + let sample_offset1 = rotation_matrix * utils::SPIRAL_OFFSET_1_ * + POINT_SHADOW_TEMPORAL_OFFSET_SCALE; + let sample_offset2 = rotation_matrix * utils::SPIRAL_OFFSET_2_ * + POINT_SHADOW_TEMPORAL_OFFSET_SCALE; + let sample_offset3 = rotation_matrix * utils::SPIRAL_OFFSET_3_ * + POINT_SHADOW_TEMPORAL_OFFSET_SCALE; + let sample_offset4 = rotation_matrix * utils::SPIRAL_OFFSET_4_ * + POINT_SHADOW_TEMPORAL_OFFSET_SCALE; + let sample_offset5 = rotation_matrix * utils::SPIRAL_OFFSET_5_ * + POINT_SHADOW_TEMPORAL_OFFSET_SCALE; + let sample_offset6 = rotation_matrix * utils::SPIRAL_OFFSET_6_ * + POINT_SHADOW_TEMPORAL_OFFSET_SCALE; + let sample_offset7 = rotation_matrix * utils::SPIRAL_OFFSET_7_ * + POINT_SHADOW_TEMPORAL_OFFSET_SCALE; + + var sum: f32 = 0.0; + sum += sample_shadow_cubemap_at_offset( + sample_offset0, 0.125, basis[0], basis[1], light_local, depth, light_id); + sum += sample_shadow_cubemap_at_offset( + sample_offset1, 0.125, basis[0], basis[1], light_local, depth, light_id); + sum += sample_shadow_cubemap_at_offset( + sample_offset2, 0.125, basis[0], basis[1], light_local, depth, light_id); + sum += sample_shadow_cubemap_at_offset( + sample_offset3, 0.125, basis[0], basis[1], light_local, depth, light_id); + sum += sample_shadow_cubemap_at_offset( + sample_offset4, 0.125, basis[0], basis[1], light_local, depth, light_id); + sum += sample_shadow_cubemap_at_offset( + sample_offset5, 0.125, basis[0], basis[1], light_local, depth, light_id); + sum += sample_shadow_cubemap_at_offset( + sample_offset6, 0.125, basis[0], basis[1], light_local, depth, light_id); + sum += sample_shadow_cubemap_at_offset( + sample_offset7, 0.125, basis[0], basis[1], light_local, depth, light_id); + return sum; +} + +fn sample_shadow_cubemap( + light_local: vec3, + distance_to_light: f32, + depth: f32, + light_id: u32, +) -> f32 { +#ifdef SHADOW_FILTER_METHOD_GAUSSIAN + return sample_shadow_cubemap_gaussian( + light_local, depth, POINT_SHADOW_SCALE, distance_to_light, light_id); +#else ifdef SHADOW_FILTER_METHOD_TEMPORAL + return sample_shadow_cubemap_temporal( + light_local, depth, POINT_SHADOW_SCALE, distance_to_light, light_id); +#else ifdef SHADOW_FILTER_METHOD_HARDWARE_2X2 + return sample_shadow_cubemap_hardware(light_local, depth, light_id); +#else + // This needs a default return value to avoid shader compilation errors if it's compiled with no SHADOW_FILTER_METHOD_* defined. + // (eg. if the normal prepass is enabled it ends up compiling this due to the normal prepass depending on pbr_functions, which depends on shadows) + // This should never actually get used, as anyone using bevy's lighting/shadows should always have a SHADOW_FILTER_METHOD defined. + // Set to 0 to make it obvious that something is wrong. + return 0.0; +#endif +} + +``` + +### bevy/crates/bevy_pbr/src/render/pbr_bindings + +```rust +#define_import_path bevy_pbr::pbr_bindings + +#import bevy_pbr::pbr_types::StandardMaterial + +@group(2) @binding(0) var material: StandardMaterial; +@group(2) @binding(1) var base_color_texture: texture_2d; +@group(2) @binding(2) var base_color_sampler: sampler; +@group(2) @binding(3) var emissive_texture: texture_2d; +@group(2) @binding(4) var emissive_sampler: sampler; +@group(2) @binding(5) var metallic_roughness_texture: texture_2d; +@group(2) @binding(6) var metallic_roughness_sampler: sampler; +@group(2) @binding(7) var occlusion_texture: texture_2d; +@group(2) @binding(8) var occlusion_sampler: sampler; +@group(2) @binding(9) var normal_map_texture: texture_2d; +@group(2) @binding(10) var normal_map_sampler: sampler; +@group(2) @binding(11) var depth_map_texture: texture_2d; +@group(2) @binding(12) var depth_map_sampler: sampler; +#ifdef PBR_ANISOTROPY_TEXTURE_SUPPORTED +@group(2) @binding(13) var anisotropy_texture: texture_2d; +@group(2) @binding(14) var anisotropy_sampler: sampler; +#endif +#ifdef PBR_TRANSMISSION_TEXTURES_SUPPORTED +@group(2) @binding(15) var specular_transmission_texture: texture_2d; +@group(2) @binding(16) var specular_transmission_sampler: sampler; +@group(2) @binding(17) var thickness_texture: texture_2d; +@group(2) @binding(18) var thickness_sampler: sampler; +@group(2) @binding(19) var diffuse_transmission_texture: texture_2d; +@group(2) @binding(20) var diffuse_transmission_sampler: sampler; +#endif +#ifdef PBR_MULTI_LAYER_MATERIAL_TEXTURES_SUPPORTED +@group(2) @binding(21) var clearcoat_texture: texture_2d; +@group(2) @binding(22) var clearcoat_sampler: sampler; +@group(2) @binding(23) var clearcoat_roughness_texture: texture_2d; +@group(2) @binding(24) var clearcoat_roughness_sampler: sampler; +@group(2) @binding(25) var clearcoat_normal_texture: texture_2d; +@group(2) @binding(26) var clearcoat_normal_sampler: sampler; +#endif + +``` + +### bevy/crates/bevy_pbr/src/render/view_transformations + +```rust +#define_import_path bevy_pbr::view_transformations + +#import bevy_pbr::mesh_view_bindings as view_bindings + +/// World space: +/// +y is up + +/// View space: +/// -z is forward, +x is right, +y is up +/// Forward is from the camera position into the scene. +/// (0.0, 0.0, -1.0) is linear distance of 1.0 in front of the camera's view relative to the camera's rotation +/// (0.0, 1.0, 0.0) is linear distance of 1.0 above the camera's view relative to the camera's rotation + +/// NDC (normalized device coordinate): +/// https://www.w3.org/TR/webgpu/#coordinate-systems +/// (-1.0, -1.0) in NDC is located at the bottom-left corner of NDC +/// (1.0, 1.0) in NDC is located at the top-right corner of NDC +/// Z is depth where: +/// 1.0 is near clipping plane +/// Perspective projection: 0.0 is inf far away +/// Orthographic projection: 0.0 is far clipping plane + +/// UV space: +/// 0.0, 0.0 is the top left +/// 1.0, 1.0 is the bottom right + + +// ----------------- +// TO WORLD -------- +// ----------------- + +/// Convert a view space position to world space +fn position_view_to_world(view_pos: vec3) -> vec3 { + let world_pos = view_bindings::view.world_from_view * vec4(view_pos, 1.0); + return world_pos.xyz; +} + +/// Convert a clip space position to world space +fn position_clip_to_world(clip_pos: vec4) -> vec3 { + let world_pos = view_bindings::view.world_from_clip * clip_pos; + return world_pos.xyz; +} + +/// Convert a ndc space position to world space +fn position_ndc_to_world(ndc_pos: vec3) -> vec3 { + let world_pos = view_bindings::view.world_from_clip * vec4(ndc_pos, 1.0); + return world_pos.xyz / world_pos.w; +} + +/// Convert a view space direction to world space +fn direction_view_to_world(view_dir: vec3) -> vec3 { + let world_dir = view_bindings::view.world_from_view * vec4(view_dir, 0.0); + return world_dir.xyz; +} + +/// Convert a clip space direction to world space +fn direction_clip_to_world(clip_dir: vec4) -> vec3 { + let world_dir = view_bindings::view.world_from_clip * clip_dir; + return world_dir.xyz; +} + +// ----------------- +// TO VIEW --------- +// ----------------- + +/// Convert a world space position to view space +fn position_world_to_view(world_pos: vec3) -> vec3 { + let view_pos = view_bindings::view.view_from_world * vec4(world_pos, 1.0); + return view_pos.xyz; +} + +/// Convert a clip space position to view space +fn position_clip_to_view(clip_pos: vec4) -> vec3 { + let view_pos = view_bindings::view.view_from_clip * clip_pos; + return view_pos.xyz; +} + +/// Convert a ndc space position to view space +fn position_ndc_to_view(ndc_pos: vec3) -> vec3 { + let view_pos = view_bindings::view.view_from_clip * vec4(ndc_pos, 1.0); + return view_pos.xyz / view_pos.w; +} + +/// Convert a world space direction to view space +fn direction_world_to_view(world_dir: vec3) -> vec3 { + let view_dir = view_bindings::view.view_from_world * vec4(world_dir, 0.0); + return view_dir.xyz; +} + +/// Convert a clip space direction to view space +fn direction_clip_to_view(clip_dir: vec4) -> vec3 { + let view_dir = view_bindings::view.view_from_clip * clip_dir; + return view_dir.xyz; +} + +// ----------------- +// TO CLIP --------- +// ----------------- + +/// Convert a world space position to clip space +fn position_world_to_clip(world_pos: vec3) -> vec4 { + let clip_pos = view_bindings::view.clip_from_world * vec4(world_pos, 1.0); + return clip_pos; +} + +/// Convert a view space position to clip space +fn position_view_to_clip(view_pos: vec3) -> vec4 { + let clip_pos = view_bindings::view.clip_from_view * vec4(view_pos, 1.0); + return clip_pos; +} + +/// Convert a world space direction to clip space +fn direction_world_to_clip(world_dir: vec3) -> vec4 { + let clip_dir = view_bindings::view.clip_from_world * vec4(world_dir, 0.0); + return clip_dir; +} + +/// Convert a view space direction to clip space +fn direction_view_to_clip(view_dir: vec3) -> vec4 { + let clip_dir = view_bindings::view.clip_from_view * vec4(view_dir, 0.0); + return clip_dir; +} + +// ----------------- +// TO NDC ---------- +// ----------------- + +/// Convert a world space position to ndc space +fn position_world_to_ndc(world_pos: vec3) -> vec3 { + let ndc_pos = view_bindings::view.clip_from_world * vec4(world_pos, 1.0); + return ndc_pos.xyz / ndc_pos.w; +} + +/// Convert a view space position to ndc space +fn position_view_to_ndc(view_pos: vec3) -> vec3 { + let ndc_pos = view_bindings::view.clip_from_view * vec4(view_pos, 1.0); + return ndc_pos.xyz / ndc_pos.w; +} + +// ----------------- +// DEPTH ----------- +// ----------------- + +/// Retrieve the perspective camera near clipping plane +fn perspective_camera_near() -> f32 { + return view_bindings::view.clip_from_view[3][2]; +} + +/// Convert ndc depth to linear view z. +/// Note: Depth values in front of the camera will be negative as -z is forward +fn depth_ndc_to_view_z(ndc_depth: f32) -> f32 { +#ifdef VIEW_PROJECTION_PERSPECTIVE + return -perspective_camera_near() / ndc_depth; +#else ifdef VIEW_PROJECTION_ORTHOGRAPHIC + return -(view_bindings::view.clip_from_view[3][2] - ndc_depth) / view_bindings::view.clip_from_view[2][2]; +#else + let view_pos = view_bindings::view.view_from_clip * vec4(0.0, 0.0, ndc_depth, 1.0); + return view_pos.z / view_pos.w; +#endif +} + +/// Convert linear view z to ndc depth. +/// Note: View z input should be negative for values in front of the camera as -z is forward +fn view_z_to_depth_ndc(view_z: f32) -> f32 { +#ifdef VIEW_PROJECTION_PERSPECTIVE + return -perspective_camera_near() / view_z; +#else ifdef VIEW_PROJECTION_ORTHOGRAPHIC + return view_bindings::view.clip_from_view[3][2] + view_z * view_bindings::view.clip_from_view[2][2]; +#else + let ndc_pos = view_bindings::view.clip_from_view * vec4(0.0, 0.0, view_z, 1.0); + return ndc_pos.z / ndc_pos.w; +#endif +} + +// ----------------- +// UV -------------- +// ----------------- + +/// Convert ndc space xy coordinate [-1.0 .. 1.0] to uv [0.0 .. 1.0] +fn ndc_to_uv(ndc: vec2) -> vec2 { + return ndc * vec2(0.5, -0.5) + vec2(0.5); +} + +/// Convert uv [0.0 .. 1.0] coordinate to ndc space xy [-1.0 .. 1.0] +fn uv_to_ndc(uv: vec2) -> vec2 { + return uv * vec2(2.0, -2.0) + vec2(-1.0, 1.0); +} + +/// returns the (0.0, 0.0) .. (1.0, 1.0) position within the viewport for the current render target +/// [0 .. render target viewport size] eg. [(0.0, 0.0) .. (1280.0, 720.0)] to [(0.0, 0.0) .. (1.0, 1.0)] +fn frag_coord_to_uv(frag_coord: vec2) -> vec2 { + return (frag_coord - view_bindings::view.viewport.xy) / view_bindings::view.viewport.zw; +} + +/// Convert frag coord to ndc +fn frag_coord_to_ndc(frag_coord: vec4) -> vec3 { + return vec3(uv_to_ndc(frag_coord_to_uv(frag_coord.xy)), frag_coord.z); +} + +/// Convert ndc space xy coordinate [-1.0 .. 1.0] to [0 .. render target +/// viewport size] +fn ndc_to_frag_coord(ndc: vec2) -> vec2 { + return ndc_to_uv(ndc) * view_bindings::view.viewport.zw; +} + +``` + +### bevy/crates/bevy_pbr/src/render/mesh_functions + +```rust +#define_import_path bevy_pbr::mesh_functions + +#import bevy_pbr::{ + mesh_view_bindings::{ + view, + visibility_ranges, + VISIBILITY_RANGE_UNIFORM_BUFFER_SIZE + }, + mesh_bindings::mesh, + mesh_types::MESH_FLAGS_SIGN_DETERMINANT_MODEL_3X3_BIT, + view_transformations::position_world_to_clip, +} +#import bevy_render::maths::{affine3_to_square, mat2x4_f32_to_mat3x3_unpack} + + +fn get_world_from_local(instance_index: u32) -> mat4x4 { + return affine3_to_square(mesh[instance_index].world_from_local); +} + +fn get_previous_world_from_local(instance_index: u32) -> mat4x4 { + return affine3_to_square(mesh[instance_index].previous_world_from_local); +} + +fn mesh_position_local_to_world(world_from_local: mat4x4, vertex_position: vec4) -> vec4 { + return world_from_local * vertex_position; +} + +// NOTE: The intermediate world_position assignment is important +// for precision purposes when using the 'equals' depth comparison +// function. +fn mesh_position_local_to_clip(world_from_local: mat4x4, vertex_position: vec4) -> vec4 { + let world_position = mesh_position_local_to_world(world_from_local, vertex_position); + return position_world_to_clip(world_position.xyz); +} + +fn mesh_normal_local_to_world(vertex_normal: vec3, instance_index: u32) -> vec3 { + // NOTE: The mikktspace method of normal mapping requires that the world normal is + // re-normalized in the vertex shader to match the way mikktspace bakes vertex tangents + // and normal maps so that the exact inverse process is applied when shading. Blender, Unity, + // Unreal Engine, Godot, and more all use the mikktspace method. + // We only skip normalization for invalid normals so that they don't become NaN. + // Do not change this code unless you really know what you are doing. + // http://www.mikktspace.com/ + if any(vertex_normal != vec3(0.0)) { + return normalize( + mat2x4_f32_to_mat3x3_unpack( + mesh[instance_index].local_from_world_transpose_a, + mesh[instance_index].local_from_world_transpose_b, + ) * vertex_normal + ); + } else { + return vertex_normal; + } +} + +// Calculates the sign of the determinant of the 3x3 model matrix based on a +// mesh flag +fn sign_determinant_model_3x3m(mesh_flags: u32) -> f32 { + // bool(u32) is false if 0u else true + // f32(bool) is 1.0 if true else 0.0 + // * 2.0 - 1.0 remaps 0.0 or 1.0 to -1.0 or 1.0 respectively + return f32(bool(mesh_flags & MESH_FLAGS_SIGN_DETERMINANT_MODEL_3X3_BIT)) * 2.0 - 1.0; +} + +fn mesh_tangent_local_to_world(world_from_local: mat4x4, vertex_tangent: vec4, instance_index: u32) -> vec4 { + // NOTE: The mikktspace method of normal mapping requires that the world tangent is + // re-normalized in the vertex shader to match the way mikktspace bakes vertex tangents + // and normal maps so that the exact inverse process is applied when shading. Blender, Unity, + // Unreal Engine, Godot, and more all use the mikktspace method. + // We only skip normalization for invalid tangents so that they don't become NaN. + // Do not change this code unless you really know what you are doing. + // http://www.mikktspace.com/ + if any(vertex_tangent != vec4(0.0)) { + return vec4( + normalize( + mat3x3( + world_from_local[0].xyz, + world_from_local[1].xyz, + world_from_local[2].xyz, + ) * vertex_tangent.xyz + ), + // NOTE: Multiplying by the sign of the determinant of the 3x3 model matrix accounts for + // situations such as negative scaling. + vertex_tangent.w * sign_determinant_model_3x3m(mesh[instance_index].flags) + ); + } else { + return vertex_tangent; + } +} + +// Returns an appropriate dither level for the current mesh instance. +// +// This looks up the LOD range in the `visibility_ranges` table and compares the +// camera distance to determine the dithering level. +#ifdef VISIBILITY_RANGE_DITHER +fn get_visibility_range_dither_level(instance_index: u32, world_position: vec4) -> i32 { +#if AVAILABLE_STORAGE_BUFFER_BINDINGS >= 6 + // If we're using a storage buffer, then the length is variable. + let visibility_buffer_array_len = arrayLength(&visibility_ranges); +#else // AVAILABLE_STORAGE_BUFFER_BINDINGS >= 6 + // If we're using a uniform buffer, then the length is constant + let visibility_buffer_array_len = VISIBILITY_RANGE_UNIFORM_BUFFER_SIZE; +#endif // AVAILABLE_STORAGE_BUFFER_BINDINGS >= 6 + + let visibility_buffer_index = mesh[instance_index].flags & 0xffffu; + if (visibility_buffer_index > visibility_buffer_array_len) { + return -16; + } + + let lod_range = visibility_ranges[visibility_buffer_index]; + let camera_distance = length(view.world_position.xyz - world_position.xyz); + + // This encodes the following mapping: + // + // `lod_range.` x y z w camera distance + // ←───────┼────────┼────────┼────────┼────────→ + // LOD level -16 -16 0 0 16 16 LOD level + let offset = select(-16, 0, camera_distance >= lod_range.z); + let bounds = select(lod_range.xy, lod_range.zw, camera_distance >= lod_range.z); + let level = i32(round((camera_distance - bounds.x) / (bounds.y - bounds.x) * 16.0)); + return offset + clamp(level, 0, 16); +} +#endif + +``` + +### bevy/crates/bevy_pbr/src/render/skinning + +```rust +#define_import_path bevy_pbr::skinning + +#import bevy_pbr::mesh_types::SkinnedMesh + +#ifdef SKINNED + +@group(1) @binding(1) var joint_matrices: SkinnedMesh; + +// An array of matrices specifying the joint positions from the previous frame. +// +// This is used for motion vector computation. +// +// If this is the first frame, or we're otherwise prevented from using data from +// the previous frame, this is simply the same as `joint_matrices` above. +@group(1) @binding(6) var prev_joint_matrices: SkinnedMesh; + +fn skin_model( + indexes: vec4, + weights: vec4, +) -> mat4x4 { + return weights.x * joint_matrices.data[indexes.x] + + weights.y * joint_matrices.data[indexes.y] + + weights.z * joint_matrices.data[indexes.z] + + weights.w * joint_matrices.data[indexes.w]; +} + +// Returns the skinned position of a vertex with the given weights from the +// previous frame. +// +// This is used for motion vector computation. +fn skin_prev_model( + indexes: vec4, + weights: vec4, +) -> mat4x4 { + return weights.x * prev_joint_matrices.data[indexes.x] + + weights.y * prev_joint_matrices.data[indexes.y] + + weights.z * prev_joint_matrices.data[indexes.z] + + weights.w * prev_joint_matrices.data[indexes.w]; +} + +fn inverse_transpose_3x3m(in: mat3x3) -> mat3x3 { + let x = cross(in[1], in[2]); + let y = cross(in[2], in[0]); + let z = cross(in[0], in[1]); + let det = dot(in[2], z); + return mat3x3( + x / det, + y / det, + z / det + ); +} + +fn skin_normals( + world_from_local: mat4x4, + normal: vec3, +) -> vec3 { + return normalize( + inverse_transpose_3x3m( + mat3x3( + world_from_local[0].xyz, + world_from_local[1].xyz, + world_from_local[2].xyz + ) + ) * normal + ); +} + +#endif + +``` + +### bevy/crates/bevy_pbr/src/render/pbr_types + +```rust +#define_import_path bevy_pbr::pbr_types + +// Since this is a hot path, try to keep the alignment and size of the struct members in mind. +// You can find the alignment and sizes at . +struct StandardMaterial { + base_color: vec4, + emissive: vec4, + attenuation_color: vec4, + uv_transform: mat3x3, + perceptual_roughness: f32, + metallic: f32, + reflectance: f32, + diffuse_transmission: f32, + specular_transmission: f32, + thickness: f32, + ior: f32, + attenuation_distance: f32, + clearcoat: f32, + clearcoat_perceptual_roughness: f32, + anisotropy_strength: f32, + anisotropy_rotation: vec2, + // 'flags' is a bit field indicating various options. u32 is 32 bits so we have up to 32 options. + flags: u32, + alpha_cutoff: f32, + parallax_depth_scale: f32, + max_parallax_layer_count: f32, + lightmap_exposure: f32, + max_relief_mapping_search_steps: u32, + /// ID for specifying which deferred lighting pass should be used for rendering this material, if any. + deferred_lighting_pass_id: u32, +}; + +// !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! +// NOTE: if these flags are updated or changed. Be sure to also update +// deferred_flags_from_mesh_material_flags and mesh_material_flags_from_deferred_flags +// !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! +const STANDARD_MATERIAL_FLAGS_BASE_COLOR_TEXTURE_BIT: u32 = 1u; +const STANDARD_MATERIAL_FLAGS_EMISSIVE_TEXTURE_BIT: u32 = 2u; +const STANDARD_MATERIAL_FLAGS_METALLIC_ROUGHNESS_TEXTURE_BIT: u32 = 4u; +const STANDARD_MATERIAL_FLAGS_OCCLUSION_TEXTURE_BIT: u32 = 8u; +const STANDARD_MATERIAL_FLAGS_DOUBLE_SIDED_BIT: u32 = 16u; +const STANDARD_MATERIAL_FLAGS_UNLIT_BIT: u32 = 32u; +const STANDARD_MATERIAL_FLAGS_TWO_COMPONENT_NORMAL_MAP: u32 = 64u; +const STANDARD_MATERIAL_FLAGS_FLIP_NORMAL_MAP_Y: u32 = 128u; +const STANDARD_MATERIAL_FLAGS_FOG_ENABLED_BIT: u32 = 256u; +const STANDARD_MATERIAL_FLAGS_DEPTH_MAP_BIT: u32 = 512u; +const STANDARD_MATERIAL_FLAGS_SPECULAR_TRANSMISSION_TEXTURE_BIT: u32 = 1024u; +const STANDARD_MATERIAL_FLAGS_THICKNESS_TEXTURE_BIT: u32 = 2048u; +const STANDARD_MATERIAL_FLAGS_DIFFUSE_TRANSMISSION_TEXTURE_BIT: u32 = 4096u; +const STANDARD_MATERIAL_FLAGS_ATTENUATION_ENABLED_BIT: u32 = 8192u; +const STANDARD_MATERIAL_FLAGS_CLEARCOAT_TEXTURE_BIT: u32 = 16384u; +const STANDARD_MATERIAL_FLAGS_CLEARCOAT_ROUGHNESS_TEXTURE_BIT: u32 = 32768u; +const STANDARD_MATERIAL_FLAGS_CLEARCOAT_NORMAL_TEXTURE_BIT: u32 = 65536u; +const STANDARD_MATERIAL_FLAGS_ANISOTROPY_TEXTURE_BIT: u32 = 131072u; +const STANDARD_MATERIAL_FLAGS_ALPHA_MODE_RESERVED_BITS: u32 = 3758096384u; // (0b111u32 << 29) +const STANDARD_MATERIAL_FLAGS_ALPHA_MODE_OPAQUE: u32 = 0u; // (0u32 << 29) +const STANDARD_MATERIAL_FLAGS_ALPHA_MODE_MASK: u32 = 536870912u; // (1u32 << 29) +const STANDARD_MATERIAL_FLAGS_ALPHA_MODE_BLEND: u32 = 1073741824u; // (2u32 << 29) +const STANDARD_MATERIAL_FLAGS_ALPHA_MODE_PREMULTIPLIED: u32 = 1610612736u; // (3u32 << 29) +const STANDARD_MATERIAL_FLAGS_ALPHA_MODE_ADD: u32 = 2147483648u; // (4u32 << 29) +const STANDARD_MATERIAL_FLAGS_ALPHA_MODE_MULTIPLY: u32 = 2684354560u; // (5u32 << 29) +const STANDARD_MATERIAL_FLAGS_ALPHA_MODE_ALPHA_TO_COVERAGE: u32 = 3221225472u; // (6u32 << 29) +// ↑ To calculate/verify the values above, use the following playground: +// https://play.rust-lang.org/?version=stable&mode=debug&edition=2021&gist=7792f8dd6fc6a8d4d0b6b1776898a7f4 + + +// Creates a StandardMaterial with default values +fn standard_material_new() -> StandardMaterial { + var material: StandardMaterial; + + // NOTE: Keep in-sync with src/pbr_material.rs! + material.base_color = vec4(1.0, 1.0, 1.0, 1.0); + material.emissive = vec4(0.0, 0.0, 0.0, 1.0); + material.perceptual_roughness = 0.5; + material.metallic = 0.00; + material.reflectance = 0.5; + material.diffuse_transmission = 0.0; + material.specular_transmission = 0.0; + material.thickness = 0.0; + material.ior = 1.5; + material.attenuation_distance = 1.0; + material.attenuation_color = vec4(1.0, 1.0, 1.0, 1.0); + material.clearcoat = 0.0; + material.clearcoat_perceptual_roughness = 0.0; + material.flags = STANDARD_MATERIAL_FLAGS_ALPHA_MODE_OPAQUE; + material.alpha_cutoff = 0.5; + material.parallax_depth_scale = 0.1; + material.max_parallax_layer_count = 16.0; + material.max_relief_mapping_search_steps = 5u; + material.deferred_lighting_pass_id = 1u; + // scale 1, translation 0, rotation 0 + material.uv_transform = mat3x3(1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0); + + return material; +} + +struct PbrInput { + material: StandardMaterial, + // Note: this gets monochromized upon deferred PbrInput reconstruction. + diffuse_occlusion: vec3, + // Note: this is 1.0 (entirely unoccluded) when SSAO and SSR are off. + specular_occlusion: f32, + frag_coord: vec4, + world_position: vec4, + // Normalized world normal used for shadow mapping as normal-mapping is not used for shadow + // mapping + world_normal: vec3, + // Normalized normal-mapped world normal used for lighting + N: vec3, + // Normalized view vector in world space, pointing from the fragment world position toward the + // view world position + V: vec3, + lightmap_light: vec3, + clearcoat_N: vec3, + anisotropy_strength: f32, + // These two aren't specific to anisotropy, but we only fill them in if + // we're doing anisotropy, so they're prefixed with `anisotropy_`. + anisotropy_T: vec3, + anisotropy_B: vec3, + is_orthographic: bool, + flags: u32, +}; + +// Creates a PbrInput with default values +fn pbr_input_new() -> PbrInput { + var pbr_input: PbrInput; + + pbr_input.material = standard_material_new(); + pbr_input.diffuse_occlusion = vec3(1.0); + // If SSAO is enabled, then this gets overwritten with proper specular occlusion. If its not, then we get specular environment map unoccluded (we have no data with which to occlude it with). + pbr_input.specular_occlusion = 1.0; + + pbr_input.frag_coord = vec4(0.0, 0.0, 0.0, 1.0); + pbr_input.world_position = vec4(0.0, 0.0, 0.0, 1.0); + pbr_input.world_normal = vec3(0.0, 0.0, 1.0); + + pbr_input.is_orthographic = false; + + pbr_input.N = vec3(0.0, 0.0, 1.0); + pbr_input.V = vec3(1.0, 0.0, 0.0); + + pbr_input.clearcoat_N = vec3(0.0); + pbr_input.anisotropy_T = vec3(0.0); + pbr_input.anisotropy_B = vec3(0.0); + + pbr_input.lightmap_light = vec3(0.0); + + pbr_input.flags = 0u; + + return pbr_input; +} + +``` + +### bevy/crates/bevy_pbr/src/render/utils + +```rust +#define_import_path bevy_pbr::utils + +#import bevy_pbr::rgb9e5 + +// Generates a random u32 in range [0, u32::MAX]. +// +// `state` is a mutable reference to a u32 used as the seed. +// +// Values are generated via "white noise", with no correlation between values. +// In shaders, you often want spatial and/or temporal correlation. Use a different RNG method for these use cases. +// +// https://www.pcg-random.org +// https://www.reedbeta.com/blog/hash-functions-for-gpu-rendering +fn rand_u(state: ptr) -> u32 { + *state = *state * 747796405u + 2891336453u; + let word = ((*state >> ((*state >> 28u) + 4u)) ^ *state) * 277803737u; + return (word >> 22u) ^ word; +} + +// Generates a random f32 in range [0, 1.0]. +fn rand_f(state: ptr) -> f32 { + *state = *state * 747796405u + 2891336453u; + let word = ((*state >> ((*state >> 28u) + 4u)) ^ *state) * 277803737u; + return f32((word >> 22u) ^ word) * bitcast(0x2f800004u); +} + +// Generates a random vec2 where each value is in range [0, 1.0]. +fn rand_vec2f(state: ptr) -> vec2 { + return vec2(rand_f(state), rand_f(state)); +} + +// Generates a random u32 in range [0, n). +fn rand_range_u(n: u32, state: ptr) -> u32 { + return rand_u(state) % n; +} + +// returns the (0-1, 0-1) position within the given viewport for the current buffer coords . +// buffer coords can be obtained from `@builtin(position).xy`. +// the view uniform struct contains the current camera viewport in `view.viewport`. +// topleft = 0,0 +fn coords_to_viewport_uv(position: vec2, viewport: vec4) -> vec2 { + return (position - viewport.xy) / viewport.zw; +} + +// https://jcgt.org/published/0003/02/01/paper.pdf + +// For encoding normals or unit direction vectors as octahedral coordinates. +fn octahedral_encode(v: vec3) -> vec2 { + var n = v / (abs(v.x) + abs(v.y) + abs(v.z)); + let octahedral_wrap = (1.0 - abs(n.yx)) * select(vec2(-1.0), vec2(1.0), n.xy > vec2f(0.0)); + let n_xy = select(octahedral_wrap, n.xy, n.z >= 0.0); + return n_xy * 0.5 + 0.5; +} + +// For decoding normals or unit direction vectors from octahedral coordinates. +fn octahedral_decode(v: vec2) -> vec3 { + let f = v * 2.0 - 1.0; + var n = vec3(f.xy, 1.0 - abs(f.x) - abs(f.y)); + let t = saturate(-n.z); + let w = select(vec2(t), vec2(-t), n.xy >= vec2(0.0)); + n = vec3(n.xy + w, n.z); + return normalize(n); +} + +// https://blog.demofox.org/2022/01/01/interleaved-gradient-noise-a-different-kind-of-low-discrepancy-sequence +fn interleaved_gradient_noise(pixel_coordinates: vec2, frame: u32) -> f32 { + let xy = pixel_coordinates + 5.588238 * f32(frame % 64u); + return fract(52.9829189 * fract(0.06711056 * xy.x + 0.00583715 * xy.y)); +} + +// https://www.iryoku.com/next-generation-post-processing-in-call-of-duty-advanced-warfare (slides 120-135) +// TODO: Use an array here instead of a bunch of constants, once arrays work properly under DX12. +// NOTE: The names have a final underscore to avoid the following error: +// `Composable module identifiers must not require substitution according to naga writeback rules` +const SPIRAL_OFFSET_0_ = vec2(-0.7071, 0.7071); +const SPIRAL_OFFSET_1_ = vec2(-0.0000, -0.8750); +const SPIRAL_OFFSET_2_ = vec2( 0.5303, 0.5303); +const SPIRAL_OFFSET_3_ = vec2(-0.6250, -0.0000); +const SPIRAL_OFFSET_4_ = vec2( 0.3536, -0.3536); +const SPIRAL_OFFSET_5_ = vec2(-0.0000, 0.3750); +const SPIRAL_OFFSET_6_ = vec2(-0.1768, -0.1768); +const SPIRAL_OFFSET_7_ = vec2( 0.1250, 0.0000); + +``` + +### bevy/crates/bevy_pbr/src/render/mesh_types + +```rust +#define_import_path bevy_pbr::mesh_types + +struct Mesh { + // Affine 4x3 matrices transposed to 3x4 + // Use bevy_render::maths::affine3_to_square to unpack + world_from_local: mat3x4, + previous_world_from_local: mat3x4, + // 3x3 matrix packed in mat2x4 and f32 as: + // [0].xyz, [1].x, + // [1].yz, [2].xy + // [2].z + // Use bevy_pbr::mesh_functions::mat2x4_f32_to_mat3x3_unpack to unpack + local_from_world_transpose_a: mat2x4, + local_from_world_transpose_b: f32, + // 'flags' is a bit field indicating various options. u32 is 32 bits so we have up to 32 options. + flags: u32, + lightmap_uv_rect: vec2, +}; + +#ifdef SKINNED +struct SkinnedMesh { + data: array, 256u>, +}; +#endif + +#ifdef MORPH_TARGETS +struct MorphWeights { + weights: array, 16u>, // 16 = 64 / 4 (64 = MAX_MORPH_WEIGHTS) +}; +#endif + +// [2^0, 2^16) +const MESH_FLAGS_VISIBILITY_RANGE_INDEX_BITS: u32 = 65535u; +// 2^29 +const MESH_FLAGS_SHADOW_RECEIVER_BIT: u32 = 536870912u; +// 2^30 +const MESH_FLAGS_TRANSMITTED_SHADOW_RECEIVER_BIT: u32 = 1073741824u; +// 2^31 - if the flag is set, the sign is positive, else it is negative +const MESH_FLAGS_SIGN_DETERMINANT_MODEL_3X3_BIT: u32 = 2147483648u; + +``` + +### bevy/crates/bevy_pbr/src/render/pbr_lighting + +```rust +#define_import_path bevy_pbr::lighting + +#import bevy_pbr::{ + mesh_view_types::POINT_LIGHT_FLAGS_SPOT_LIGHT_Y_NEGATIVE, + mesh_view_bindings as view_bindings, +} +#import bevy_render::maths::PI + +const LAYER_BASE: u32 = 0; +const LAYER_CLEARCOAT: u32 = 1; + +// From the Filament design doc +// https://google.github.io/filament/Filament.html#table_symbols +// Symbol Definition +// v View unit vector +// l Incident light unit vector +// n Surface normal unit vector +// h Half unit vector between l and v +// f BRDF +// f_d Diffuse component of a BRDF +// f_r Specular component of a BRDF +// α Roughness, remapped from using input perceptualRoughness +// σ Diffuse reflectance +// Ω Spherical domain +// f0 Reflectance at normal incidence +// f90 Reflectance at grazing angle +// χ+(a) Heaviside function (1 if a>0 and 0 otherwise) +// nior Index of refraction (IOR) of an interface +// ⟨n⋅l⟩ Dot product clamped to [0..1] +// ⟨a⟩ Saturated value (clamped to [0..1]) + +// The Bidirectional Reflectance Distribution Function (BRDF) describes the surface response of a standard material +// and consists of two components, the diffuse component (f_d) and the specular component (f_r): +// f(v,l) = f_d(v,l) + f_r(v,l) +// +// The form of the microfacet model is the same for diffuse and specular +// f_r(v,l) = f_d(v,l) = 1 / { |n⋅v||n⋅l| } ∫_Ω D(m,α) G(v,l,m) f_m(v,l,m) (v⋅m) (l⋅m) dm +// +// In which: +// D, also called the Normal Distribution Function (NDF) models the distribution of the microfacets +// G models the visibility (or occlusion or shadow-masking) of the microfacets +// f_m is the microfacet BRDF and differs between specular and diffuse components +// +// The above integration needs to be approximated. + +// Input to a lighting function for a single layer (either the base layer or the +// clearcoat layer). +struct LayerLightingInput { + // The normal vector. + N: vec3, + // The reflected vector. + R: vec3, + // The normal vector ⋅ the view vector. + NdotV: f32, + + // The perceptual roughness of the layer. + perceptual_roughness: f32, + // The roughness of the layer. + roughness: f32, +} + +// Input to a lighting function (`point_light`, `spot_light`, +// `directional_light`). +struct LightingInput { +#ifdef STANDARD_MATERIAL_CLEARCOAT + layers: array, +#else // STANDARD_MATERIAL_CLEARCOAT + layers: array, +#endif // STANDARD_MATERIAL_CLEARCOAT + + // The world-space position. + P: vec3, + // The vector to the view. + V: vec3, + + // The diffuse color of the material. + diffuse_color: vec3, + + // Specular reflectance at the normal incidence angle. + // + // This should be read F₀, but due to Naga limitations we can't name it that. + F0_: vec3, + // Constants for the BRDF approximation. + // + // See `EnvBRDFApprox` in + // . + // What we call `F_ab` they call `AB`. + F_ab: vec2, + +#ifdef STANDARD_MATERIAL_CLEARCOAT + // The strength of the clearcoat layer. + clearcoat_strength: f32, +#endif // STANDARD_MATERIAL_CLEARCOAT + +#ifdef STANDARD_MATERIAL_ANISOTROPY + // The anisotropy strength, reflecting the amount of increased roughness in + // the tangent direction. + anisotropy: f32, + // The tangent direction for anisotropy: i.e. the direction in which + // roughness increases. + Ta: vec3, + // The bitangent direction, which is the cross product of the normal with + // the tangent direction. + Ba: vec3, +#endif // STANDARD_MATERIAL_ANISOTROPY +} + +// Values derived from the `LightingInput` for both diffuse and specular lights. +struct DerivedLightingInput { + // The half-vector between L, the incident light vector, and V, the view + // vector. + H: vec3, + // The normal vector ⋅ the incident light vector. + NdotL: f32, + // The normal vector ⋅ the half-vector. + NdotH: f32, + // The incident light vector ⋅ the half-vector. + LdotH: f32, +} + +// distanceAttenuation is simply the square falloff of light intensity +// combined with a smooth attenuation at the edge of the light radius +// +// light radius is a non-physical construct for efficiency purposes, +// because otherwise every light affects every fragment in the scene +fn getDistanceAttenuation(distanceSquare: f32, inverseRangeSquared: f32) -> f32 { + let factor = distanceSquare * inverseRangeSquared; + let smoothFactor = saturate(1.0 - factor * factor); + let attenuation = smoothFactor * smoothFactor; + return attenuation * 1.0 / max(distanceSquare, 0.0001); +} + +// Normal distribution function (specular D) +// Based on https://google.github.io/filament/Filament.html#citation-walter07 + +// D_GGX(h,α) = α^2 / { π ((n⋅h)^2 (α2−1) + 1)^2 } + +// Simple implementation, has precision problems when using fp16 instead of fp32 +// see https://google.github.io/filament/Filament.html#listing_speculardfp16 +fn D_GGX(roughness: f32, NdotH: f32, h: vec3) -> f32 { + let oneMinusNdotHSquared = 1.0 - NdotH * NdotH; + let a = NdotH * roughness; + let k = roughness / (oneMinusNdotHSquared + a * a); + let d = k * k * (1.0 / PI); + return d; +} + +// An approximation of the anisotropic GGX distribution function. +// +// 1 +// D(𝐡) = ─────────────────────────────────────────────────── +// παₜα_b((𝐡 ⋅ 𝐭)² / αₜ²) + (𝐡 ⋅ 𝐛)² / α_b² + (𝐡 ⋅ 𝐧)²)² +// +// * `T` = 𝐭 = the tangent direction = the direction of increased roughness. +// +// * `B` = 𝐛 = the bitangent direction = the direction of decreased roughness. +// +// * `at` = αₜ = the alpha-roughness in the tangent direction. +// +// * `ab` = α_b = the alpha-roughness in the bitangent direction. +// +// This is from the `KHR_materials_anisotropy` spec: +// +fn D_GGX_anisotropic(at: f32, ab: f32, NdotH: f32, TdotH: f32, BdotH: f32) -> f32 { + let a2 = at * ab; + let f = vec3(ab * TdotH, at * BdotH, a2 * NdotH); + let w2 = a2 / dot(f, f); + let d = a2 * w2 * w2 * (1.0 / PI); + return d; +} + +// Visibility function (Specular G) +// V(v,l,a) = G(v,l,α) / { 4 (n⋅v) (n⋅l) } +// such that f_r becomes +// f_r(v,l) = D(h,α) V(v,l,α) F(v,h,f0) +// where +// V(v,l,α) = 0.5 / { n⋅l sqrt((n⋅v)^2 (1−α2) + α2) + n⋅v sqrt((n⋅l)^2 (1−α2) + α2) } +// Note the two sqrt's, that may be slow on mobile, see https://google.github.io/filament/Filament.html#listing_approximatedspecularv +fn V_SmithGGXCorrelated(roughness: f32, NdotV: f32, NdotL: f32) -> f32 { + let a2 = roughness * roughness; + let lambdaV = NdotL * sqrt((NdotV - a2 * NdotV) * NdotV + a2); + let lambdaL = NdotV * sqrt((NdotL - a2 * NdotL) * NdotL + a2); + let v = 0.5 / (lambdaV + lambdaL); + return v; +} + +// The visibility function, anisotropic variant. +fn V_GGX_anisotropic( + at: f32, + ab: f32, + NdotL: f32, + NdotV: f32, + BdotV: f32, + TdotV: f32, + TdotL: f32, + BdotL: f32, +) -> f32 { + let GGX_V = NdotL * length(vec3(at * TdotV, ab * BdotV, NdotV)); + let GGX_L = NdotV * length(vec3(at * TdotL, ab * BdotL, NdotL)); + let v = 0.5 / (GGX_V + GGX_L); + return saturate(v); +} + +// A simpler, but nonphysical, alternative to Smith-GGX. We use this for +// clearcoat, per the Filament spec. +// +// https://google.github.io/filament/Filament.html#materialsystem/clearcoatmodel#toc4.9.1 +fn V_Kelemen(LdotH: f32) -> f32 { + return 0.25 / (LdotH * LdotH); +} + +// Fresnel function +// see https://google.github.io/filament/Filament.html#citation-schlick94 +// F_Schlick(v,h,f_0,f_90) = f_0 + (f_90 − f_0) (1 − v⋅h)^5 +fn F_Schlick_vec(f0: vec3, f90: f32, VdotH: f32) -> vec3 { + // not using mix to keep the vec3 and float versions identical + return f0 + (f90 - f0) * pow(1.0 - VdotH, 5.0); +} + +fn F_Schlick(f0: f32, f90: f32, VdotH: f32) -> f32 { + // not using mix to keep the vec3 and float versions identical + return f0 + (f90 - f0) * pow(1.0 - VdotH, 5.0); +} + +fn fresnel(f0: vec3, LdotH: f32) -> vec3 { + // f_90 suitable for ambient occlusion + // see https://google.github.io/filament/Filament.html#lighting/occlusion + let f90 = saturate(dot(f0, vec3(50.0 * 0.33))); + return F_Schlick_vec(f0, f90, LdotH); +} + +// Given distribution, visibility, and Fresnel term, calculates the final +// specular light. +// +// Multiscattering approximation: +// +fn specular_multiscatter( + input: ptr, + D: f32, + V: f32, + F: vec3, + specular_intensity: f32, +) -> vec3 { + // Unpack. + let F0 = (*input).F0_; + let F_ab = (*input).F_ab; + + var Fr = (specular_intensity * D * V) * F; + Fr *= 1.0 + F0 * (1.0 / F_ab.x - 1.0); + return Fr; +} + +// Specular BRDF +// https://google.github.io/filament/Filament.html#materialsystem/specularbrdf + +// N, V, and L must all be normalized. +fn derive_lighting_input(N: vec3, V: vec3, L: vec3) -> DerivedLightingInput { + var input: DerivedLightingInput; + var H: vec3 = normalize(L + V); + input.H = H; + input.NdotL = saturate(dot(N, L)); + input.NdotH = saturate(dot(N, H)); + input.LdotH = saturate(dot(L, H)); + return input; +} + +// Returns L in the `xyz` components and the specular intensity in the `w` component. +fn compute_specular_layer_values_for_point_light( + input: ptr, + layer: u32, + V: vec3, + light_to_frag: vec3, + light_position_radius: f32, +) -> vec4 { + // Unpack. + let R = (*input).layers[layer].R; + let a = (*input).layers[layer].roughness; + + // Representative Point Area Lights. + // see http://blog.selfshadow.com/publications/s2013-shading-course/karis/s2013_pbs_epic_notes_v2.pdf p14-16 + let centerToRay = dot(light_to_frag, R) * R - light_to_frag; + let closestPoint = light_to_frag + centerToRay * saturate( + light_position_radius * inverseSqrt(dot(centerToRay, centerToRay))); + let LspecLengthInverse = inverseSqrt(dot(closestPoint, closestPoint)); + let normalizationFactor = a / saturate(a + (light_position_radius * 0.5 * LspecLengthInverse)); + let intensity = normalizationFactor * normalizationFactor; + + let L: vec3 = closestPoint * LspecLengthInverse; // normalize() equivalent? + return vec4(L, intensity); +} + +// Cook-Torrance approximation of the microfacet model integration using Fresnel law F to model f_m +// f_r(v,l) = { D(h,α) G(v,l,α) F(v,h,f0) } / { 4 (n⋅v) (n⋅l) } +fn specular( + input: ptr, + derived_input: ptr, + specular_intensity: f32, +) -> vec3 { + // Unpack. + let roughness = (*input).layers[LAYER_BASE].roughness; + let NdotV = (*input).layers[LAYER_BASE].NdotV; + let F0 = (*input).F0_; + let H = (*derived_input).H; + let NdotL = (*derived_input).NdotL; + let NdotH = (*derived_input).NdotH; + let LdotH = (*derived_input).LdotH; + + // Calculate distribution. + let D = D_GGX(roughness, NdotH, H); + // Calculate visibility. + let V = V_SmithGGXCorrelated(roughness, NdotV, NdotL); + // Calculate the Fresnel term. + let F = fresnel(F0, LdotH); + + // Calculate the specular light. + let Fr = specular_multiscatter(input, D, V, F, specular_intensity); + return Fr; +} + +// Calculates the specular light for the clearcoat layer. Returns Fc, the +// Fresnel term, in the first channel, and Frc, the specular clearcoat light, in +// the second channel. +// +// +fn specular_clearcoat( + input: ptr, + derived_input: ptr, + clearcoat_strength: f32, + specular_intensity: f32, +) -> vec2 { + // Unpack. + let roughness = (*input).layers[LAYER_CLEARCOAT].roughness; + let H = (*derived_input).H; + let NdotH = (*derived_input).NdotH; + let LdotH = (*derived_input).LdotH; + + // Calculate distribution. + let Dc = D_GGX(roughness, NdotH, H); + // Calculate visibility. + let Vc = V_Kelemen(LdotH); + // Calculate the Fresnel term. + let Fc = F_Schlick(0.04, 1.0, LdotH) * clearcoat_strength; + // Calculate the specular light. + let Frc = (specular_intensity * Dc * Vc) * Fc; + return vec2(Fc, Frc); +} + +#ifdef STANDARD_MATERIAL_ANISOTROPY + +fn specular_anisotropy( + input: ptr, + derived_input: ptr, + L: vec3, + specular_intensity: f32, +) -> vec3 { + // Unpack. + let roughness = (*input).layers[LAYER_BASE].roughness; + let NdotV = (*input).layers[LAYER_BASE].NdotV; + let V = (*input).V; + let F0 = (*input).F0_; + let anisotropy = (*input).anisotropy; + let Ta = (*input).Ta; + let Ba = (*input).Ba; + let H = (*derived_input).H; + let NdotL = (*derived_input).NdotL; + let NdotH = (*derived_input).NdotH; + let LdotH = (*derived_input).LdotH; + + let TdotL = dot(Ta, L); + let BdotL = dot(Ba, L); + let TdotH = dot(Ta, H); + let BdotH = dot(Ba, H); + let TdotV = dot(Ta, V); + let BdotV = dot(Ba, V); + + let ab = roughness * roughness; + let at = mix(ab, 1.0, anisotropy * anisotropy); + + let Da = D_GGX_anisotropic(at, ab, NdotH, TdotH, BdotH); + let Va = V_GGX_anisotropic(at, ab, NdotL, NdotV, BdotV, TdotV, TdotL, BdotL); + let Fa = fresnel(F0, LdotH); + + // Calculate the specular light. + let Fr = specular_multiscatter(input, Da, Va, Fa, specular_intensity); + return Fr; +} + +#endif // STANDARD_MATERIAL_ANISOTROPY + +// Diffuse BRDF +// https://google.github.io/filament/Filament.html#materialsystem/diffusebrdf +// fd(v,l) = σ/π * 1 / { |n⋅v||n⋅l| } ∫Ω D(m,α) G(v,l,m) (v⋅m) (l⋅m) dm +// +// simplest approximation +// float Fd_Lambert() { +// return 1.0 / PI; +// } +// +// vec3 Fd = diffuseColor * Fd_Lambert(); +// +// Disney approximation +// See https://google.github.io/filament/Filament.html#citation-burley12 +// minimal quality difference +fn Fd_Burley( + input: ptr, + derived_input: ptr, +) -> f32 { + // Unpack. + let roughness = (*input).layers[LAYER_BASE].roughness; + let NdotV = (*input).layers[LAYER_BASE].NdotV; + let NdotL = (*derived_input).NdotL; + let LdotH = (*derived_input).LdotH; + + let f90 = 0.5 + 2.0 * roughness * LdotH * LdotH; + let lightScatter = F_Schlick(1.0, f90, NdotL); + let viewScatter = F_Schlick(1.0, f90, NdotV); + return lightScatter * viewScatter * (1.0 / PI); +} + +// Scale/bias approximation +// https://www.unrealengine.com/en-US/blog/physically-based-shading-on-mobile +// TODO: Use a LUT (more accurate) +fn F_AB(perceptual_roughness: f32, NdotV: f32) -> vec2 { + let c0 = vec4(-1.0, -0.0275, -0.572, 0.022); + let c1 = vec4(1.0, 0.0425, 1.04, -0.04); + let r = perceptual_roughness * c0 + c1; + let a004 = min(r.x * r.x, exp2(-9.28 * NdotV)) * r.x + r.y; + return vec2(-1.04, 1.04) * a004 + r.zw; +} + +fn EnvBRDFApprox(F0: vec3, F_ab: vec2) -> vec3 { + return F0 * F_ab.x + F_ab.y; +} + +fn perceptualRoughnessToRoughness(perceptualRoughness: f32) -> f32 { + // clamp perceptual roughness to prevent precision problems + // According to Filament design 0.089 is recommended for mobile + // Filament uses 0.045 for non-mobile + let clampedPerceptualRoughness = clamp(perceptualRoughness, 0.089, 1.0); + return clampedPerceptualRoughness * clampedPerceptualRoughness; +} + +fn point_light(light_id: u32, input: ptr) -> vec3 { + // Unpack. + let diffuse_color = (*input).diffuse_color; + let P = (*input).P; + let N = (*input).layers[LAYER_BASE].N; + let V = (*input).V; + + let light = &view_bindings::clusterable_objects.data[light_id]; + let light_to_frag = (*light).position_radius.xyz - P; + let L = normalize(light_to_frag); + let distance_square = dot(light_to_frag, light_to_frag); + let rangeAttenuation = getDistanceAttenuation(distance_square, (*light).color_inverse_square_range.w); + + // Base layer + + let specular_L_intensity = compute_specular_layer_values_for_point_light( + input, + LAYER_BASE, + V, + light_to_frag, + (*light).position_radius.w, + ); + var specular_derived_input = derive_lighting_input(N, V, specular_L_intensity.xyz); + + let specular_intensity = specular_L_intensity.w; + +#ifdef STANDARD_MATERIAL_ANISOTROPY + let specular_light = specular_anisotropy(input, &specular_derived_input, L, specular_intensity); +#else // STANDARD_MATERIAL_ANISOTROPY + let specular_light = specular(input, &specular_derived_input, specular_intensity); +#endif // STANDARD_MATERIAL_ANISOTROPY + + // Clearcoat + +#ifdef STANDARD_MATERIAL_CLEARCOAT + // Unpack. + let clearcoat_N = (*input).layers[LAYER_CLEARCOAT].N; + let clearcoat_strength = (*input).clearcoat_strength; + + // Perform specular input calculations again for the clearcoat layer. We + // can't reuse the above because the clearcoat normal might be different + // from the main layer normal. + let clearcoat_specular_L_intensity = compute_specular_layer_values_for_point_light( + input, + LAYER_CLEARCOAT, + V, + light_to_frag, + (*light).position_radius.w, + ); + var clearcoat_specular_derived_input = + derive_lighting_input(clearcoat_N, V, clearcoat_specular_L_intensity.xyz); + + // Calculate the specular light. + let clearcoat_specular_intensity = clearcoat_specular_L_intensity.w; + let Fc_Frc = specular_clearcoat( + input, + &clearcoat_specular_derived_input, + clearcoat_strength, + clearcoat_specular_intensity + ); + let inv_Fc = 1.0 - Fc_Frc.r; // Inverse Fresnel term. + let Frc = Fc_Frc.g; // Clearcoat light. +#endif // STANDARD_MATERIAL_CLEARCOAT + + // Diffuse. + // Comes after specular since its N⋅L is used in the lighting equation. + var derived_input = derive_lighting_input(N, V, L); + let diffuse = diffuse_color * Fd_Burley(input, &derived_input); + + // See https://google.github.io/filament/Filament.html#mjx-eqn-pointLightLuminanceEquation + // Lout = f(v,l) Φ / { 4 π d^2 }⟨n⋅l⟩ + // where + // f(v,l) = (f_d(v,l) + f_r(v,l)) * light_color + // Φ is luminous power in lumens + // our rangeAttenuation = 1 / d^2 multiplied with an attenuation factor for smoothing at the edge of the non-physical maximum light radius + + // For a point light, luminous intensity, I, in lumens per steradian is given by: + // I = Φ / 4 π + // The derivation of this can be seen here: https://google.github.io/filament/Filament.html#mjx-eqn-pointLightLuminousPower + + // NOTE: (*light).color.rgb is premultiplied with (*light).intensity / 4 π (which would be the luminous intensity) on the CPU + + var color: vec3; +#ifdef STANDARD_MATERIAL_CLEARCOAT + // Account for the Fresnel term from the clearcoat darkening the main layer. + // + // + color = (diffuse + specular_light * inv_Fc) * inv_Fc + Frc; +#else // STANDARD_MATERIAL_CLEARCOAT + color = diffuse + specular_light; +#endif // STANDARD_MATERIAL_CLEARCOAT + + return color * (*light).color_inverse_square_range.rgb * + (rangeAttenuation * derived_input.NdotL); +} + +fn spot_light(light_id: u32, input: ptr) -> vec3 { + // reuse the point light calculations + let point_light = point_light(light_id, input); + + let light = &view_bindings::clusterable_objects.data[light_id]; + + // reconstruct spot dir from x/z and y-direction flag + var spot_dir = vec3((*light).light_custom_data.x, 0.0, (*light).light_custom_data.y); + spot_dir.y = sqrt(max(0.0, 1.0 - spot_dir.x * spot_dir.x - spot_dir.z * spot_dir.z)); + if ((*light).flags & POINT_LIGHT_FLAGS_SPOT_LIGHT_Y_NEGATIVE) != 0u { + spot_dir.y = -spot_dir.y; + } + let light_to_frag = (*light).position_radius.xyz - (*input).P.xyz; + + // calculate attenuation based on filament formula https://google.github.io/filament/Filament.html#listing_glslpunctuallight + // spot_scale and spot_offset have been precomputed + // note we normalize here to get "l" from the filament listing. spot_dir is already normalized + let cd = dot(-spot_dir, normalize(light_to_frag)); + let attenuation = saturate(cd * (*light).light_custom_data.z + (*light).light_custom_data.w); + let spot_attenuation = attenuation * attenuation; + + return point_light * spot_attenuation; +} + +fn directional_light(light_id: u32, input: ptr) -> vec3 { + // Unpack. + let diffuse_color = (*input).diffuse_color; + let NdotV = (*input).layers[LAYER_BASE].NdotV; + let N = (*input).layers[LAYER_BASE].N; + let V = (*input).V; + let roughness = (*input).layers[LAYER_BASE].roughness; + + let light = &view_bindings::lights.directional_lights[light_id]; + + let L = (*light).direction_to_light.xyz; + var derived_input = derive_lighting_input(N, V, L); + + let diffuse = diffuse_color * Fd_Burley(input, &derived_input); + +#ifdef STANDARD_MATERIAL_ANISOTROPY + let specular_light = specular_anisotropy(input, &derived_input, L, 1.0); +#else // STANDARD_MATERIAL_ANISOTROPY + let specular_light = specular(input, &derived_input, 1.0); +#endif // STANDARD_MATERIAL_ANISOTROPY + +#ifdef STANDARD_MATERIAL_CLEARCOAT + let clearcoat_N = (*input).layers[LAYER_CLEARCOAT].N; + let clearcoat_strength = (*input).clearcoat_strength; + + // Perform specular input calculations again for the clearcoat layer. We + // can't reuse the above because the clearcoat normal might be different + // from the main layer normal. + var derived_clearcoat_input = derive_lighting_input(clearcoat_N, V, L); + + let Fc_Frc = + specular_clearcoat(input, &derived_clearcoat_input, clearcoat_strength, 1.0); + let inv_Fc = 1.0 - Fc_Frc.r; + let Frc = Fc_Frc.g; +#endif // STANDARD_MATERIAL_CLEARCOAT + + var color: vec3; +#ifdef STANDARD_MATERIAL_CLEARCOAT + // Account for the Fresnel term from the clearcoat darkening the main layer. + // + // + color = (diffuse + specular_light * inv_Fc) * inv_Fc * derived_input.NdotL + + Frc * derived_clearcoat_input.NdotL; +#else // STANDARD_MATERIAL_CLEARCOAT + color = (diffuse + specular_light) * derived_input.NdotL; +#endif // STANDARD_MATERIAL_CLEARCOAT + + return color * (*light).color.rgb; +} + +``` + +### bevy/crates/bevy_pbr/src/render/pbr + +```rust +#import bevy_pbr::{ + pbr_functions::alpha_discard, + pbr_fragment::pbr_input_from_standard_material, +} + +#ifdef PREPASS_PIPELINE +#import bevy_pbr::{ + prepass_io::{VertexOutput, FragmentOutput}, + pbr_deferred_functions::deferred_output, +} +#else +#import bevy_pbr::{ + forward_io::{VertexOutput, FragmentOutput}, + pbr_functions, + pbr_functions::{apply_pbr_lighting, main_pass_post_lighting_processing}, + pbr_types::STANDARD_MATERIAL_FLAGS_UNLIT_BIT, +} +#endif + +#ifdef MESHLET_MESH_MATERIAL_PASS +#import bevy_pbr::meshlet_visibility_buffer_resolve::resolve_vertex_output +#endif + +@fragment +fn fragment( +#ifdef MESHLET_MESH_MATERIAL_PASS + @builtin(position) frag_coord: vec4, +#else + in: VertexOutput, + @builtin(front_facing) is_front: bool, +#endif +) -> FragmentOutput { +#ifdef MESHLET_MESH_MATERIAL_PASS + let in = resolve_vertex_output(frag_coord); + let is_front = true; +#endif + + // If we're in the crossfade section of a visibility range, conditionally + // discard the fragment according to the visibility pattern. +#ifdef VISIBILITY_RANGE_DITHER + pbr_functions::visibility_range_dither(in.position, in.visibility_range_dither); +#endif + + // generate a PbrInput struct from the StandardMaterial bindings + var pbr_input = pbr_input_from_standard_material(in, is_front); + + // alpha discard + pbr_input.material.base_color = alpha_discard(pbr_input.material, pbr_input.material.base_color); + +#ifdef PREPASS_PIPELINE + // write the gbuffer, lighting pass id, and optionally normal and motion_vector textures + let out = deferred_output(in, pbr_input); +#else + // in forward mode, we calculate the lit color immediately, and then apply some post-lighting effects here. + // in deferred mode the lit color and these effects will be calculated in the deferred lighting shader + var out: FragmentOutput; + if (pbr_input.material.flags & STANDARD_MATERIAL_FLAGS_UNLIT_BIT) == 0u { + out.color = apply_pbr_lighting(pbr_input); + } else { + out.color = pbr_input.material.base_color; + } + + // apply in-shader post processing (fog, alpha-premultiply, and also tonemapping, debanding if the camera is non-hdr) + // note this does not include fullscreen postprocessing effects like bloom. + out.color = main_pass_post_lighting_processing(pbr_input, out.color); +#endif + + return out; +} + +``` + +### bevy/crates/bevy_pbr/src/render/parallax_mapping + +```rust +#define_import_path bevy_pbr::parallax_mapping + +#import bevy_pbr::pbr_bindings::{depth_map_texture, depth_map_sampler} + +fn sample_depth_map(uv: vec2) -> f32 { + // We use `textureSampleLevel` over `textureSample` because the wgpu DX12 + // backend (Fxc) panics when using "gradient instructions" inside a loop. + // It results in the whole loop being unrolled by the shader compiler, + // which it can't do because the upper limit of the loop in steep parallax + // mapping is a variable set by the user. + // The "gradient instructions" comes from `textureSample` computing MIP level + // based on UV derivative. With `textureSampleLevel`, we provide ourselves + // the MIP level, so no gradient instructions are used, and we can use + // sample_depth_map in our loop. + // See https://stackoverflow.com/questions/56581141/direct3d11-gradient-instruction-used-in-a-loop-with-varying-iteration-forcing + return textureSampleLevel(depth_map_texture, depth_map_sampler, uv, 0.0).r; +} + +// An implementation of parallax mapping, see https://en.wikipedia.org/wiki/Parallax_mapping +// Code derived from: https://web.archive.org/web/20150419215321/http://sunandblackcat.com/tipFullView.php?l=eng&topicid=28 +fn parallaxed_uv( + depth_scale: f32, + max_layer_count: f32, + max_steps: u32, + // The original interpolated uv + original_uv: vec2, + // The vector from the camera to the fragment at the surface in tangent space + Vt: vec3, +) -> vec2 { + if max_layer_count < 1.0 { + return original_uv; + } + var uv = original_uv; + + // Steep Parallax Mapping + // ====================== + // Split the depth map into `layer_count` layers. + // When Vt hits the surface of the mesh (excluding depth displacement), + // if the depth is not below or on surface including depth displacement (textureSample), then + // look forward (+= delta_uv) on depth texture according to + // Vt and distance between hit surface and depth map surface, + // repeat until below the surface. + // + // Where `layer_count` is interpolated between `1.0` and + // `max_layer_count` according to the steepness of Vt. + + let view_steepness = abs(Vt.z); + // We mix with minimum value 1.0 because otherwise, + // with 0.0, we get a division by zero in surfaces parallel to viewport, + // resulting in a singularity. + let layer_count = mix(max_layer_count, 1.0, view_steepness); + let layer_depth = 1.0 / layer_count; + var delta_uv = depth_scale * layer_depth * Vt.xy * vec2(1.0, -1.0) / view_steepness; + + var current_layer_depth = 0.0; + var texture_depth = sample_depth_map(uv); + + // texture_depth > current_layer_depth means the depth map depth is deeper + // than the depth the ray would be at this UV offset so the ray has not + // intersected the surface + for (var i: i32 = 0; texture_depth > current_layer_depth && i <= i32(layer_count); i++) { + current_layer_depth += layer_depth; + uv += delta_uv; + texture_depth = sample_depth_map(uv); + } + +#ifdef RELIEF_MAPPING + // Relief Mapping + // ============== + // "Refine" the rough result from Steep Parallax Mapping + // with a **binary search** between the layer selected by steep parallax + // and the next one to find a point closer to the depth map surface. + // This reduces the jaggy step artifacts from steep parallax mapping. + + delta_uv *= 0.5; + var delta_depth = 0.5 * layer_depth; + + uv -= delta_uv; + current_layer_depth -= delta_depth; + + for (var i: u32 = 0u; i < max_steps; i++) { + texture_depth = sample_depth_map(uv); + + // Halve the deltas for the next step + delta_uv *= 0.5; + delta_depth *= 0.5; + + // Step based on whether the current depth is above or below the depth map + if (texture_depth > current_layer_depth) { + uv += delta_uv; + current_layer_depth += delta_depth; + } else { + uv -= delta_uv; + current_layer_depth -= delta_depth; + } + } +#else + // Parallax Occlusion mapping + // ========================== + // "Refine" Steep Parallax Mapping by interpolating between the + // previous layer's depth and the computed layer depth. + // Only requires a single lookup, unlike Relief Mapping, but + // may skip small details and result in writhing material artifacts. + let previous_uv = uv - delta_uv; + let next_depth = texture_depth - current_layer_depth; + let previous_depth = sample_depth_map(previous_uv) - current_layer_depth + layer_depth; + + let weight = next_depth / (next_depth - previous_depth); + + uv = mix(uv, previous_uv, weight); + + current_layer_depth += mix(next_depth, previous_depth, weight); +#endif + + // Note: `current_layer_depth` is not returned, but may be useful + // for light computation later on in future improvements of the pbr shader. + return uv; +} + +``` + +### bevy/crates/bevy_pbr/src/render/fog + +```rust +#define_import_path bevy_pbr::fog + +#import bevy_pbr::{ + mesh_view_bindings::fog, + mesh_view_types::Fog, +} + +// Fog formulas adapted from: +// https://learn.microsoft.com/en-us/windows/win32/direct3d9/fog-formulas +// https://catlikecoding.com/unity/tutorials/rendering/part-14/ +// https://iquilezles.org/articles/fog/ (Atmospheric Fog and Scattering) + +fn scattering_adjusted_fog_color( + fog_params: Fog, + scattering: vec3, +) -> vec4 { + if (fog_params.directional_light_color.a > 0.0) { + return vec4( + fog_params.base_color.rgb + + scattering * fog_params.directional_light_color.rgb * fog_params.directional_light_color.a, + fog_params.base_color.a, + ); + } else { + return fog_params.base_color; + } +} + +fn linear_fog( + fog_params: Fog, + input_color: vec4, + distance: f32, + scattering: vec3, +) -> vec4 { + var fog_color = scattering_adjusted_fog_color(fog_params, scattering); + let start = fog_params.be.x; + let end = fog_params.be.y; + fog_color.a *= 1.0 - clamp((end - distance) / (end - start), 0.0, 1.0); + return vec4(mix(input_color.rgb, fog_color.rgb, fog_color.a), input_color.a); +} + +fn exponential_fog( + fog_params: Fog, + input_color: vec4, + distance: f32, + scattering: vec3, +) -> vec4 { + var fog_color = scattering_adjusted_fog_color(fog_params, scattering); + let density = fog_params.be.x; + fog_color.a *= 1.0 - 1.0 / exp(distance * density); + return vec4(mix(input_color.rgb, fog_color.rgb, fog_color.a), input_color.a); +} + +fn exponential_squared_fog( + fog_params: Fog, + input_color: vec4, + distance: f32, + scattering: vec3, +) -> vec4 { + var fog_color = scattering_adjusted_fog_color(fog_params, scattering); + let distance_times_density = distance * fog_params.be.x; + fog_color.a *= 1.0 - 1.0 / exp(distance_times_density * distance_times_density); + return vec4(mix(input_color.rgb, fog_color.rgb, fog_color.a), input_color.a); +} + +fn atmospheric_fog( + fog_params: Fog, + input_color: vec4, + distance: f32, + scattering: vec3, +) -> vec4 { + var fog_color = scattering_adjusted_fog_color(fog_params, scattering); + let extinction_factor = 1.0 - 1.0 / exp(distance * fog_params.be); + let inscattering_factor = 1.0 - 1.0 / exp(distance * fog_params.bi); + return vec4( + input_color.rgb * (1.0 - extinction_factor * fog_color.a) + + fog_color.rgb * inscattering_factor * fog_color.a, + input_color.a + ); +} + +``` + +### bevy/crates/bevy_pbr/src/render/pbr_prepass + +```rust +#import bevy_pbr::{ + pbr_prepass_functions, + pbr_bindings, + pbr_bindings::material, + pbr_types, + pbr_functions, + pbr_functions::SampleBias, + prepass_io, + mesh_view_bindings::view, +} + +#ifdef MESHLET_MESH_MATERIAL_PASS +#import bevy_pbr::meshlet_visibility_buffer_resolve::resolve_vertex_output +#endif + +#ifdef PREPASS_FRAGMENT +@fragment +fn fragment( +#ifdef MESHLET_MESH_MATERIAL_PASS + @builtin(position) frag_coord: vec4, +#else + in: prepass_io::VertexOutput, + @builtin(front_facing) is_front: bool, +#endif +) -> prepass_io::FragmentOutput { +#ifdef MESHLET_MESH_MATERIAL_PASS + let in = resolve_vertex_output(frag_coord); + let is_front = true; +#else + pbr_prepass_functions::prepass_alpha_discard(in); +#endif + + var out: prepass_io::FragmentOutput; + +#ifdef DEPTH_CLAMP_ORTHO + out.frag_depth = in.clip_position_unclamped.z; +#endif // DEPTH_CLAMP_ORTHO + +#ifdef NORMAL_PREPASS + // NOTE: Unlit bit not set means == 0 is true, so the true case is if lit + if (material.flags & pbr_types::STANDARD_MATERIAL_FLAGS_UNLIT_BIT) == 0u { + let double_sided = (material.flags & pbr_types::STANDARD_MATERIAL_FLAGS_DOUBLE_SIDED_BIT) != 0u; + + let world_normal = pbr_functions::prepare_world_normal( + in.world_normal, + double_sided, + is_front, + ); + + var normal = world_normal; + +#ifdef VERTEX_UVS +#ifdef VERTEX_TANGENTS +#ifdef STANDARD_MATERIAL_NORMAL_MAP + +#ifdef STANDARD_MATERIAL_NORMAL_MAP_UV_B + let uv = (material.uv_transform * vec3(in.uv_b, 1.0)).xy; +#else + let uv = (material.uv_transform * vec3(in.uv, 1.0)).xy; +#endif + + // Fill in the sample bias so we can sample from textures. + var bias: SampleBias; +#ifdef MESHLET_MESH_MATERIAL_PASS + bias.ddx_uv = in.ddx_uv; + bias.ddy_uv = in.ddy_uv; +#else // MESHLET_MESH_MATERIAL_PASS + bias.mip_bias = view.mip_bias; +#endif // MESHLET_MESH_MATERIAL_PASS + + let Nt = pbr_functions::sample_texture( + pbr_bindings::normal_map_texture, + pbr_bindings::normal_map_sampler, + uv, + bias, + ).rgb; + let TBN = pbr_functions::calculate_tbn_mikktspace(normal, in.world_tangent); + + normal = pbr_functions::apply_normal_mapping( + material.flags, + TBN, + double_sided, + is_front, + Nt, + ); + +#endif // STANDARD_MATERIAL_NORMAL_MAP +#endif // VERTEX_TANGENTS +#endif // VERTEX_UVS + + out.normal = vec4(normal * 0.5 + vec3(0.5), 1.0); + } else { + out.normal = vec4(in.world_normal * 0.5 + vec3(0.5), 1.0); + } +#endif // NORMAL_PREPASS + +#ifdef MOTION_VECTOR_PREPASS +#ifdef MESHLET_MESH_MATERIAL_PASS + out.motion_vector = in.motion_vector; +#else + out.motion_vector = pbr_prepass_functions::calculate_motion_vector(in.world_position, in.previous_world_position); +#endif +#endif + + return out; +} +#else +@fragment +fn fragment(in: prepass_io::VertexOutput) { + pbr_prepass_functions::prepass_alpha_discard(in); +} +#endif // PREPASS_FRAGMENT + +``` + +### bevy/crates/bevy_pbr/src/render/mesh_preprocess + +```rust +// GPU mesh uniform building. +// +// This is a compute shader that expands each `MeshInputUniform` out to a full +// `MeshUniform` for each view before rendering. (Thus `MeshInputUniform` +// and `MeshUniform` are in a 1:N relationship.) It runs in parallel for all +// meshes for all views. As part of this process, the shader gathers each +// mesh's transform on the previous frame and writes it into the `MeshUniform` +// so that TAA works. + +#import bevy_pbr::mesh_types::Mesh +#import bevy_render::maths +#import bevy_render::view::View + +// Per-frame data that the CPU supplies to the GPU. +struct MeshInput { + // The model transform. + world_from_local: mat3x4, + // The lightmap UV rect, packed into 64 bits. + lightmap_uv_rect: vec2, + // Various flags. + flags: u32, + // The index of this mesh's `MeshInput` in the `previous_input` array, if + // applicable. If not present, this is `u32::MAX`. + previous_input_index: u32, +} + +// Information about each mesh instance needed to cull it on GPU. +// +// At the moment, this just consists of its axis-aligned bounding box (AABB). +struct MeshCullingData { + // The 3D center of the AABB in model space, padded with an extra unused + // float value. + aabb_center: vec4, + // The 3D extents of the AABB in model space, divided by two, padded with + // an extra unused float value. + aabb_half_extents: vec4, +} + +// One invocation of this compute shader: i.e. one mesh instance in a view. +struct PreprocessWorkItem { + // The index of the `MeshInput` in the `current_input` buffer that we read + // from. + input_index: u32, + // In direct mode, the index of the `Mesh` in `output` that we write to. In + // indirect mode, the index of the `IndirectParameters` in + // `indirect_parameters` that we write to. + output_index: u32, +} + +// The `wgpu` indirect parameters structure. This is a union of two structures. +// For more information, see the corresponding comment in +// `gpu_preprocessing.rs`. +struct IndirectParameters { + // `vertex_count` or `index_count`. + data0: u32, + // `instance_count` in both structures. + instance_count: atomic, + // `first_vertex` in both structures. + first_vertex: u32, + // `first_instance` or `base_vertex`. + data1: u32, + // A read-only copy of `instance_index`. + instance_index: u32, +} + +// The current frame's `MeshInput`. +@group(0) @binding(0) var current_input: array; +// The `MeshInput` values from the previous frame. +@group(0) @binding(1) var previous_input: array; +// Indices into the `MeshInput` buffer. +// +// There may be many indices that map to the same `MeshInput`. +@group(0) @binding(2) var work_items: array; +// The output array of `Mesh`es. +@group(0) @binding(3) var output: array; + +#ifdef INDIRECT +// The array of indirect parameters for drawcalls. +@group(0) @binding(4) var indirect_parameters: array; +#endif + +#ifdef FRUSTUM_CULLING +// Data needed to cull the meshes. +// +// At the moment, this consists only of AABBs. +@group(0) @binding(5) var mesh_culling_data: array; + +// The view data, including the view matrix. +@group(0) @binding(6) var view: View; + +// Returns true if the view frustum intersects an oriented bounding box (OBB). +// +// `aabb_center.w` should be 1.0. +fn view_frustum_intersects_obb( + world_from_local: mat4x4, + aabb_center: vec4, + aabb_half_extents: vec3, +) -> bool { + + for (var i = 0; i < 5; i += 1) { + // Calculate relative radius of the sphere associated with this plane. + let plane_normal = view.frustum[i]; + let relative_radius = dot( + abs( + vec3( + dot(plane_normal, world_from_local[0]), + dot(plane_normal, world_from_local[1]), + dot(plane_normal, world_from_local[2]), + ) + ), + aabb_half_extents + ); + + // Check the frustum plane. + if (!maths::sphere_intersects_plane_half_space( + plane_normal, aabb_center, relative_radius)) { + return false; + } + } + + return true; +} +#endif + +@compute +@workgroup_size(64) +fn main(@builtin(global_invocation_id) global_invocation_id: vec3) { + // Figure out our instance index. If this thread doesn't correspond to any + // index, bail. + let instance_index = global_invocation_id.x; + if (instance_index >= arrayLength(&work_items)) { + return; + } + + // Unpack. + let input_index = work_items[instance_index].input_index; + let output_index = work_items[instance_index].output_index; + let world_from_local_affine_transpose = current_input[input_index].world_from_local; + let world_from_local = maths::affine3_to_square(world_from_local_affine_transpose); + + // Cull if necessary. +#ifdef FRUSTUM_CULLING + let aabb_center = mesh_culling_data[input_index].aabb_center.xyz; + let aabb_half_extents = mesh_culling_data[input_index].aabb_half_extents.xyz; + + // Do an OBB-based frustum cull. + let model_center = world_from_local * vec4(aabb_center, 1.0); + if (!view_frustum_intersects_obb(world_from_local, model_center, aabb_half_extents)) { + return; + } +#endif + + // Calculate inverse transpose. + let local_from_world_transpose = transpose(maths::inverse_affine3(transpose( + world_from_local_affine_transpose))); + + // Pack inverse transpose. + let local_from_world_transpose_a = mat2x4( + vec4(local_from_world_transpose[0].xyz, local_from_world_transpose[1].x), + vec4(local_from_world_transpose[1].yz, local_from_world_transpose[2].xy)); + let local_from_world_transpose_b = local_from_world_transpose[2].z; + + // Look up the previous model matrix. + let previous_input_index = current_input[input_index].previous_input_index; + var previous_world_from_local: mat3x4; + if (previous_input_index == 0xffffffff) { + previous_world_from_local = world_from_local_affine_transpose; + } else { + previous_world_from_local = previous_input[previous_input_index].world_from_local; + } + + // Figure out the output index. In indirect mode, this involves bumping the + // instance index in the indirect parameters structure. Otherwise, this + // index was directly supplied to us. +#ifdef INDIRECT + let mesh_output_index = indirect_parameters[output_index].instance_index + + atomicAdd(&indirect_parameters[output_index].instance_count, 1u); +#else + let mesh_output_index = output_index; +#endif + + // Write the output. + output[mesh_output_index].world_from_local = world_from_local_affine_transpose; + output[mesh_output_index].previous_world_from_local = previous_world_from_local; + output[mesh_output_index].local_from_world_transpose_a = local_from_world_transpose_a; + output[mesh_output_index].local_from_world_transpose_b = local_from_world_transpose_b; + output[mesh_output_index].flags = current_input[input_index].flags; + output[mesh_output_index].lightmap_uv_rect = current_input[input_index].lightmap_uv_rect; +} + +``` + +### bevy/crates/bevy_pbr/src/render/pbr_fragment + +```rust +#define_import_path bevy_pbr::pbr_fragment + +#import bevy_pbr::{ + pbr_functions, + pbr_functions::SampleBias, + pbr_bindings, + pbr_types, + prepass_utils, + lighting, + mesh_bindings::mesh, + mesh_view_bindings::view, + parallax_mapping::parallaxed_uv, + lightmap::lightmap, +} + +#ifdef SCREEN_SPACE_AMBIENT_OCCLUSION +#import bevy_pbr::mesh_view_bindings::screen_space_ambient_occlusion_texture +#import bevy_pbr::gtao_utils::gtao_multibounce +#endif + +#ifdef MESHLET_MESH_MATERIAL_PASS +#import bevy_pbr::meshlet_visibility_buffer_resolve::VertexOutput +#else ifdef PREPASS_PIPELINE +#import bevy_pbr::prepass_io::VertexOutput +#else +#import bevy_pbr::forward_io::VertexOutput +#endif + +// prepare a basic PbrInput from the vertex stage output, mesh binding and view binding +fn pbr_input_from_vertex_output( + in: VertexOutput, + is_front: bool, + double_sided: bool, +) -> pbr_types::PbrInput { + var pbr_input: pbr_types::PbrInput = pbr_types::pbr_input_new(); + +#ifdef MESHLET_MESH_MATERIAL_PASS + pbr_input.flags = in.mesh_flags; +#else + pbr_input.flags = mesh[in.instance_index].flags; +#endif + + pbr_input.is_orthographic = view.clip_from_view[3].w == 1.0; + pbr_input.V = pbr_functions::calculate_view(in.world_position, pbr_input.is_orthographic); + pbr_input.frag_coord = in.position; + pbr_input.world_position = in.world_position; + +#ifdef VERTEX_COLORS + pbr_input.material.base_color = in.color; +#endif + + pbr_input.world_normal = pbr_functions::prepare_world_normal( + in.world_normal, + double_sided, + is_front, + ); + +#ifdef LOAD_PREPASS_NORMALS + pbr_input.N = prepass_utils::prepass_normal(in.position, 0u); +#else + pbr_input.N = normalize(pbr_input.world_normal); +#endif + + return pbr_input; +} + +// Prepare a full PbrInput by sampling all textures to resolve +// the material members +fn pbr_input_from_standard_material( + in: VertexOutput, + is_front: bool, +) -> pbr_types::PbrInput { + let double_sided = (pbr_bindings::material.flags & pbr_types::STANDARD_MATERIAL_FLAGS_DOUBLE_SIDED_BIT) != 0u; + + var pbr_input: pbr_types::PbrInput = pbr_input_from_vertex_output(in, is_front, double_sided); + pbr_input.material.flags = pbr_bindings::material.flags; + pbr_input.material.base_color *= pbr_bindings::material.base_color; + pbr_input.material.deferred_lighting_pass_id = pbr_bindings::material.deferred_lighting_pass_id; + + // Neubelt and Pettineo 2013, "Crafting a Next-gen Material Pipeline for The Order: 1886" + let NdotV = max(dot(pbr_input.N, pbr_input.V), 0.0001); + + // Fill in the sample bias so we can sample from textures. + var bias: SampleBias; +#ifdef MESHLET_MESH_MATERIAL_PASS + bias.ddx_uv = in.ddx_uv; + bias.ddy_uv = in.ddy_uv; +#else // MESHLET_MESH_MATERIAL_PASS + bias.mip_bias = view.mip_bias; +#endif // MESHLET_MESH_MATERIAL_PASS + +#ifdef VERTEX_UVS + let uv_transform = pbr_bindings::material.uv_transform; +#ifdef VERTEX_UVS_A + var uv = (uv_transform * vec3(in.uv, 1.0)).xy; +#endif + +#ifdef VERTEX_UVS_B + var uv_b = (uv_transform * vec3(in.uv_b, 1.0)).xy; +#else + var uv_b = uv; +#endif + +#ifdef VERTEX_TANGENTS + if ((pbr_bindings::material.flags & pbr_types::STANDARD_MATERIAL_FLAGS_DEPTH_MAP_BIT) != 0u) { + let V = pbr_input.V; + let N = in.world_normal; + let T = in.world_tangent.xyz; + let B = in.world_tangent.w * cross(N, T); + // Transform V from fragment to camera in world space to tangent space. + let Vt = vec3(dot(V, T), dot(V, B), dot(V, N)); +#ifdef VERTEX_UVS_A + uv = parallaxed_uv( + pbr_bindings::material.parallax_depth_scale, + pbr_bindings::material.max_parallax_layer_count, + pbr_bindings::material.max_relief_mapping_search_steps, + uv, + // Flip the direction of Vt to go toward the surface to make the + // parallax mapping algorithm easier to understand and reason + // about. + -Vt, + ); +#endif + +#ifdef VERTEX_UVS_B + uv_b = parallaxed_uv( + pbr_bindings::material.parallax_depth_scale, + pbr_bindings::material.max_parallax_layer_count, + pbr_bindings::material.max_relief_mapping_search_steps, + uv_b, + // Flip the direction of Vt to go toward the surface to make the + // parallax mapping algorithm easier to understand and reason + // about. + -Vt, + ); +#else + uv_b = uv; +#endif + } +#endif // VERTEX_TANGENTS + + if ((pbr_bindings::material.flags & pbr_types::STANDARD_MATERIAL_FLAGS_BASE_COLOR_TEXTURE_BIT) != 0u) { + pbr_input.material.base_color *= pbr_functions::sample_texture( + pbr_bindings::base_color_texture, + pbr_bindings::base_color_sampler, +#ifdef STANDARD_MATERIAL_BASE_COLOR_UV_B + uv_b, +#else + uv, +#endif + bias, + ); + +#ifdef ALPHA_TO_COVERAGE + // Sharpen alpha edges. + // + // https://bgolus.medium.com/anti-aliased-alpha-test-the-esoteric-alpha-to-coverage-8b177335ae4f + let alpha_mode = pbr_bindings::material.flags & + pbr_types::STANDARD_MATERIAL_FLAGS_ALPHA_MODE_RESERVED_BITS; + if alpha_mode == pbr_types::STANDARD_MATERIAL_FLAGS_ALPHA_MODE_ALPHA_TO_COVERAGE { + pbr_input.material.base_color.a = (pbr_input.material.base_color.a - + pbr_bindings::material.alpha_cutoff) / + max(fwidth(pbr_input.material.base_color.a), 0.0001) + 0.5; + } +#endif // ALPHA_TO_COVERAGE + + } +#endif // VERTEX_UVS + + pbr_input.material.flags = pbr_bindings::material.flags; + + // NOTE: Unlit bit not set means == 0 is true, so the true case is if lit + if ((pbr_bindings::material.flags & pbr_types::STANDARD_MATERIAL_FLAGS_UNLIT_BIT) == 0u) { + pbr_input.material.reflectance = pbr_bindings::material.reflectance; + pbr_input.material.ior = pbr_bindings::material.ior; + pbr_input.material.attenuation_color = pbr_bindings::material.attenuation_color; + pbr_input.material.attenuation_distance = pbr_bindings::material.attenuation_distance; + pbr_input.material.alpha_cutoff = pbr_bindings::material.alpha_cutoff; + + // emissive + var emissive: vec4 = pbr_bindings::material.emissive; +#ifdef VERTEX_UVS + if ((pbr_bindings::material.flags & pbr_types::STANDARD_MATERIAL_FLAGS_EMISSIVE_TEXTURE_BIT) != 0u) { + emissive = vec4(emissive.rgb * pbr_functions::sample_texture( + pbr_bindings::emissive_texture, + pbr_bindings::emissive_sampler, +#ifdef STANDARD_MATERIAL_EMISSIVE_UV_B + uv_b, +#else + uv, +#endif + bias, + ).rgb, emissive.a); + } +#endif + pbr_input.material.emissive = emissive; + + // metallic and perceptual roughness + var metallic: f32 = pbr_bindings::material.metallic; + var perceptual_roughness: f32 = pbr_bindings::material.perceptual_roughness; + let roughness = lighting::perceptualRoughnessToRoughness(perceptual_roughness); +#ifdef VERTEX_UVS + if ((pbr_bindings::material.flags & pbr_types::STANDARD_MATERIAL_FLAGS_METALLIC_ROUGHNESS_TEXTURE_BIT) != 0u) { + let metallic_roughness = pbr_functions::sample_texture( + pbr_bindings::metallic_roughness_texture, + pbr_bindings::metallic_roughness_sampler, +#ifdef STANDARD_MATERIAL_METALLIC_ROUGHNESS_UV_B + uv_b, +#else + uv, +#endif + bias, + ); + // Sampling from GLTF standard channels for now + metallic *= metallic_roughness.b; + perceptual_roughness *= metallic_roughness.g; + } +#endif + pbr_input.material.metallic = metallic; + pbr_input.material.perceptual_roughness = perceptual_roughness; + + // Clearcoat factor + pbr_input.material.clearcoat = pbr_bindings::material.clearcoat; +#ifdef VERTEX_UVS +#ifdef PBR_MULTI_LAYER_MATERIAL_TEXTURES_SUPPORTED + if ((pbr_bindings::material.flags & pbr_types::STANDARD_MATERIAL_FLAGS_CLEARCOAT_TEXTURE_BIT) != 0u) { + pbr_input.material.clearcoat *= pbr_functions::sample_texture( + pbr_bindings::clearcoat_texture, + pbr_bindings::clearcoat_sampler, +#ifdef STANDARD_MATERIAL_CLEARCOAT_UV_B + uv_b, +#else + uv, +#endif + bias, + ).r; + } +#endif // PBR_MULTI_LAYER_MATERIAL_TEXTURES_SUPPORTED +#endif // VERTEX_UVS + + // Clearcoat roughness + pbr_input.material.clearcoat_perceptual_roughness = pbr_bindings::material.clearcoat_perceptual_roughness; +#ifdef VERTEX_UVS +#ifdef PBR_MULTI_LAYER_MATERIAL_TEXTURES_SUPPORTED + if ((pbr_bindings::material.flags & pbr_types::STANDARD_MATERIAL_FLAGS_CLEARCOAT_ROUGHNESS_TEXTURE_BIT) != 0u) { + pbr_input.material.clearcoat_perceptual_roughness *= pbr_functions::sample_texture( + pbr_bindings::clearcoat_roughness_texture, + pbr_bindings::clearcoat_roughness_sampler, +#ifdef STANDARD_MATERIAL_CLEARCOAT_ROUGHNESS_UV_B + uv_b, +#else + uv, +#endif + bias, + ).g; + } +#endif // PBR_MULTI_LAYER_MATERIAL_TEXTURES_SUPPORTED +#endif // VERTEX_UVS + + var specular_transmission: f32 = pbr_bindings::material.specular_transmission; +#ifdef VERTEX_UVS +#ifdef PBR_TRANSMISSION_TEXTURES_SUPPORTED + if ((pbr_bindings::material.flags & pbr_types::STANDARD_MATERIAL_FLAGS_SPECULAR_TRANSMISSION_TEXTURE_BIT) != 0u) { + specular_transmission *= pbr_functions::sample_texture( + pbr_bindings::specular_transmission_texture, + pbr_bindings::specular_transmission_sampler, +#ifdef STANDARD_MATERIAL_SPECULAR_TRANSMISSION_UV_B + uv_b, +#else + uv, +#endif + bias, + ).r; + } +#endif +#endif + pbr_input.material.specular_transmission = specular_transmission; + + var thickness: f32 = pbr_bindings::material.thickness; +#ifdef VERTEX_UVS +#ifdef PBR_TRANSMISSION_TEXTURES_SUPPORTED + if ((pbr_bindings::material.flags & pbr_types::STANDARD_MATERIAL_FLAGS_THICKNESS_TEXTURE_BIT) != 0u) { + thickness *= pbr_functions::sample_texture( + pbr_bindings::thickness_texture, + pbr_bindings::thickness_sampler, +#ifdef STANDARD_MATERIAL_THICKNESS_UV_B + uv_b, +#else + uv, +#endif + bias, + ).g; + } +#endif +#endif + // scale thickness, accounting for non-uniform scaling (e.g. a “squished” mesh) + // TODO: Meshlet support +#ifndef MESHLET_MESH_MATERIAL_PASS + thickness *= length( + (transpose(mesh[in.instance_index].world_from_local) * vec4(pbr_input.N, 0.0)).xyz + ); +#endif + pbr_input.material.thickness = thickness; + + var diffuse_transmission = pbr_bindings::material.diffuse_transmission; +#ifdef VERTEX_UVS +#ifdef PBR_TRANSMISSION_TEXTURES_SUPPORTED + if ((pbr_bindings::material.flags & pbr_types::STANDARD_MATERIAL_FLAGS_DIFFUSE_TRANSMISSION_TEXTURE_BIT) != 0u) { + diffuse_transmission *= pbr_functions::sample_texture( + pbr_bindings::diffuse_transmission_texture, + pbr_bindings::diffuse_transmission_sampler, +#ifdef STANDARD_MATERIAL_DIFFUSE_TRANSMISSION_UV_B + uv_b, +#else + uv, +#endif + bias, + ).a; + } +#endif +#endif + pbr_input.material.diffuse_transmission = diffuse_transmission; + + var diffuse_occlusion: vec3 = vec3(1.0); + var specular_occlusion: f32 = 1.0; +#ifdef VERTEX_UVS + if ((pbr_bindings::material.flags & pbr_types::STANDARD_MATERIAL_FLAGS_OCCLUSION_TEXTURE_BIT) != 0u) { + diffuse_occlusion *= pbr_functions::sample_texture( + pbr_bindings::occlusion_texture, + pbr_bindings::occlusion_sampler, +#ifdef STANDARD_MATERIAL_OCCLUSION_UV_B + uv_b, +#else + uv, +#endif + bias, + ).r; + } +#endif +#ifdef SCREEN_SPACE_AMBIENT_OCCLUSION + let ssao = textureLoad(screen_space_ambient_occlusion_texture, vec2(in.position.xy), 0i).r; + let ssao_multibounce = gtao_multibounce(ssao, pbr_input.material.base_color.rgb); + diffuse_occlusion = min(diffuse_occlusion, ssao_multibounce); + // Use SSAO to estimate the specular occlusion. + // Lagarde and Rousiers 2014, "Moving Frostbite to Physically Based Rendering" + specular_occlusion = saturate(pow(NdotV + ssao, exp2(-16.0 * roughness - 1.0)) - 1.0 + ssao); +#endif + pbr_input.diffuse_occlusion = diffuse_occlusion; + pbr_input.specular_occlusion = specular_occlusion; + + // N (normal vector) +#ifndef LOAD_PREPASS_NORMALS + + pbr_input.N = normalize(pbr_input.world_normal); + pbr_input.clearcoat_N = pbr_input.N; + +#ifdef VERTEX_UVS +#ifdef VERTEX_TANGENTS + + let TBN = pbr_functions::calculate_tbn_mikktspace(pbr_input.world_normal, in.world_tangent); + +#ifdef STANDARD_MATERIAL_NORMAL_MAP + + let Nt = pbr_functions::sample_texture( + pbr_bindings::normal_map_texture, + pbr_bindings::normal_map_sampler, +#ifdef STANDARD_MATERIAL_NORMAL_MAP_UV_B + uv_b, +#else + uv, +#endif + bias, + ).rgb; + + pbr_input.N = pbr_functions::apply_normal_mapping( + pbr_bindings::material.flags, + TBN, + double_sided, + is_front, + Nt, + ); + +#endif // STANDARD_MATERIAL_NORMAL_MAP + +#ifdef STANDARD_MATERIAL_CLEARCOAT + + // Note: `KHR_materials_clearcoat` specifies that, if there's no + // clearcoat normal map, we must set the normal to the mesh's normal, + // and not to the main layer's bumped normal. + +#ifdef STANDARD_MATERIAL_CLEARCOAT_NORMAL_MAP + + let clearcoat_Nt = pbr_functions::sample_texture( + pbr_bindings::clearcoat_normal_texture, + pbr_bindings::clearcoat_normal_sampler, +#ifdef STANDARD_MATERIAL_CLEARCOAT_NORMAL_UV_B + uv_b, +#else + uv, +#endif + bias, + ).rgb; + + pbr_input.clearcoat_N = pbr_functions::apply_normal_mapping( + pbr_bindings::material.flags, + TBN, + double_sided, + is_front, + clearcoat_Nt, + ); + +#endif // STANDARD_MATERIAL_CLEARCOAT_NORMAL_MAP + +#endif // STANDARD_MATERIAL_CLEARCOAT + +#endif // VERTEX_TANGENTS +#endif // VERTEX_UVS + + // Take anisotropy into account. + // + // This code comes from the `KHR_materials_anisotropy` spec: + // +#ifdef PBR_ANISOTROPY_TEXTURE_SUPPORTED +#ifdef VERTEX_TANGENTS +#ifdef STANDARD_MATERIAL_ANISOTROPY + + var anisotropy_strength = pbr_bindings::material.anisotropy_strength; + var anisotropy_direction = pbr_bindings::material.anisotropy_rotation; + + // Adjust based on the anisotropy map if there is one. + if ((pbr_bindings::material.flags & pbr_types::STANDARD_MATERIAL_FLAGS_ANISOTROPY_TEXTURE_BIT) != 0u) { + let anisotropy_texel = pbr_functions::sample_texture( + pbr_bindings::anisotropy_texture, + pbr_bindings::anisotropy_sampler, +#ifdef STANDARD_MATERIAL_ANISOTROPY_UV_B + uv_b, +#else // STANDARD_MATERIAL_ANISOTROPY_UV_B + uv, +#endif // STANDARD_MATERIAL_ANISOTROPY_UV_B + bias, + ).rgb; + + let anisotropy_direction_from_texture = normalize(anisotropy_texel.rg * 2.0 - 1.0); + // Rotate by the anisotropy direction. + anisotropy_direction = + mat2x2(anisotropy_direction.xy, anisotropy_direction.yx * vec2(-1.0, 1.0)) * + anisotropy_direction_from_texture; + anisotropy_strength *= anisotropy_texel.b; + } + + pbr_input.anisotropy_strength = anisotropy_strength; + + let anisotropy_T = normalize(TBN * vec3(anisotropy_direction, 0.0)); + let anisotropy_B = normalize(cross(pbr_input.world_normal, anisotropy_T)); + pbr_input.anisotropy_T = anisotropy_T; + pbr_input.anisotropy_B = anisotropy_B; + +#endif // STANDARD_MATERIAL_ANISOTROPY +#endif // VERTEX_TANGENTS +#endif // PBR_ANISOTROPY_TEXTURE_SUPPORTED + +#endif // LOAD_PREPASS_NORMALS + +// TODO: Meshlet support +#ifdef LIGHTMAP + pbr_input.lightmap_light = lightmap( + in.uv_b, + pbr_bindings::material.lightmap_exposure, + in.instance_index); +#endif + } + + return pbr_input; +} + +``` + +### bevy/crates/bevy_pbr/src/render/forward_io + +```rust +#define_import_path bevy_pbr::forward_io + +struct Vertex { + @builtin(instance_index) instance_index: u32, +#ifdef VERTEX_POSITIONS + @location(0) position: vec3, +#endif +#ifdef VERTEX_NORMALS + @location(1) normal: vec3, +#endif +#ifdef VERTEX_UVS_A + @location(2) uv: vec2, +#endif +#ifdef VERTEX_UVS_B + @location(3) uv_b: vec2, +#endif +#ifdef VERTEX_TANGENTS + @location(4) tangent: vec4, +#endif +#ifdef VERTEX_COLORS + @location(5) color: vec4, +#endif +#ifdef SKINNED + @location(6) joint_indices: vec4, + @location(7) joint_weights: vec4, +#endif +#ifdef MORPH_TARGETS + @builtin(vertex_index) index: u32, +#endif +}; + +struct VertexOutput { + // This is `clip position` when the struct is used as a vertex stage output + // and `frag coord` when used as a fragment stage input + @builtin(position) position: vec4, + @location(0) world_position: vec4, + @location(1) world_normal: vec3, +#ifdef VERTEX_UVS_A + @location(2) uv: vec2, +#endif +#ifdef VERTEX_UVS_B + @location(3) uv_b: vec2, +#endif +#ifdef VERTEX_TANGENTS + @location(4) world_tangent: vec4, +#endif +#ifdef VERTEX_COLORS + @location(5) color: vec4, +#endif +#ifdef VERTEX_OUTPUT_INSTANCE_INDEX + @location(6) @interpolate(flat) instance_index: u32, +#endif +#ifdef VISIBILITY_RANGE_DITHER + @location(7) @interpolate(flat) visibility_range_dither: i32, +#endif +} + +struct FragmentOutput { + @location(0) color: vec4, +} + +``` + +### bevy/crates/bevy_pbr/src/render/mesh_view_types + +```rust +#define_import_path bevy_pbr::mesh_view_types + +struct ClusterableObject { + // For point lights: the lower-right 2x2 values of the projection matrix [2][2] [2][3] [3][2] [3][3] + // For spot lights: the direction (x,z), spot_scale and spot_offset + light_custom_data: vec4, + color_inverse_square_range: vec4, + position_radius: vec4, + // 'flags' is a bit field indicating various options. u32 is 32 bits so we have up to 32 options. + flags: u32, + shadow_depth_bias: f32, + shadow_normal_bias: f32, + spot_light_tan_angle: f32, +}; + +const POINT_LIGHT_FLAGS_SHADOWS_ENABLED_BIT: u32 = 1u; +const POINT_LIGHT_FLAGS_SPOT_LIGHT_Y_NEGATIVE: u32 = 2u; + +struct DirectionalCascade { + clip_from_world: mat4x4, + texel_size: f32, + far_bound: f32, +} + +struct DirectionalLight { + cascades: array, + color: vec4, + direction_to_light: vec3, + // 'flags' is a bit field indicating various options. u32 is 32 bits so we have up to 32 options. + flags: u32, + shadow_depth_bias: f32, + shadow_normal_bias: f32, + num_cascades: u32, + cascades_overlap_proportion: f32, + depth_texture_base_index: u32, + skip: u32, +}; + +const DIRECTIONAL_LIGHT_FLAGS_SHADOWS_ENABLED_BIT: u32 = 1u; +const DIRECTIONAL_LIGHT_FLAGS_VOLUMETRIC_BIT: u32 = 2u; + +struct Lights { + // NOTE: this array size must be kept in sync with the constants defined in bevy_pbr/src/render/light.rs + directional_lights: array, + ambient_color: vec4, + // x/y/z dimensions and n_clusters in w + cluster_dimensions: vec4, + // xy are vec2(cluster_dimensions.xy) / vec2(view.width, view.height) + // + // For perspective projections: + // z is cluster_dimensions.z / log(far / near) + // w is cluster_dimensions.z * log(near) / log(far / near) + // + // For orthographic projections: + // NOTE: near and far are +ve but -z is infront of the camera + // z is -near + // w is cluster_dimensions.z / (-far - -near) + cluster_factors: vec4, + n_directional_lights: u32, + spot_light_shadowmap_offset: i32, + environment_map_smallest_specular_mip_level: u32, + environment_map_intensity: f32, +}; + +struct Fog { + base_color: vec4, + directional_light_color: vec4, + // `be` and `bi` are allocated differently depending on the fog mode + // + // For Linear Fog: + // be.x = start, be.y = end + // For Exponential and ExponentialSquared Fog: + // be.x = density + // For Atmospheric Fog: + // be = per-channel extinction density + // bi = per-channel inscattering density + be: vec3, + directional_light_exponent: f32, + bi: vec3, + mode: u32, +} + +// Important: These must be kept in sync with `fog.rs` +const FOG_MODE_OFF: u32 = 0u; +const FOG_MODE_LINEAR: u32 = 1u; +const FOG_MODE_EXPONENTIAL: u32 = 2u; +const FOG_MODE_EXPONENTIAL_SQUARED: u32 = 3u; +const FOG_MODE_ATMOSPHERIC: u32 = 4u; + +#if AVAILABLE_STORAGE_BUFFER_BINDINGS >= 3 +struct ClusterableObjects { + data: array, +}; +struct ClusterLightIndexLists { + data: array, +}; +struct ClusterOffsetsAndCounts { + data: array>, +}; +#else +struct ClusterableObjects { + data: array, +}; +struct ClusterLightIndexLists { + // each u32 contains 4 u8 indices into the ClusterableObjects array + data: array, 1024u>, +}; +struct ClusterOffsetsAndCounts { + // each u32 contains a 24-bit index into ClusterLightIndexLists in the high 24 bits + // and an 8-bit count of the number of lights in the low 8 bits + data: array, 1024u>, +}; +#endif + +struct LightProbe { + // This is stored as the transpose in order to save space in this structure. + // It'll be transposed in the `environment_map_light` function. + light_from_world_transposed: mat3x4, + cubemap_index: i32, + intensity: f32, +}; + +struct LightProbes { + // This must match `MAX_VIEW_REFLECTION_PROBES` on the Rust side. + reflection_probes: array, + irradiance_volumes: array, + reflection_probe_count: i32, + irradiance_volume_count: i32, + // The index of the view environment map cubemap binding, or -1 if there's + // no such cubemap. + view_cubemap_index: i32, + // The smallest valid mipmap level for the specular environment cubemap + // associated with the view. + smallest_specular_mip_level_for_view: u32, + // The intensity of the environment map associated with the view. + intensity_for_view: f32, +}; + +// Settings for screen space reflections. +// +// For more information on these settings, see the documentation for +// `bevy_pbr::ssr::ScreenSpaceReflectionsSettings`. +struct ScreenSpaceReflectionsSettings { + perceptual_roughness_threshold: f32, + thickness: f32, + linear_steps: u32, + linear_march_exponent: f32, + bisection_steps: u32, + use_secant: u32, +}; + +``` + +### bevy/crates/bevy_pbr/src/render/pbr_ambient + +```rust +#define_import_path bevy_pbr::ambient + +#import bevy_pbr::{ + lighting::{EnvBRDFApprox, F_AB}, + mesh_view_bindings::lights, +} + +// A precomputed `NdotV` is provided because it is computed regardless, +// but `world_normal` and the view vector `V` are provided separately for more advanced uses. +fn ambient_light( + world_position: vec4, + world_normal: vec3, + V: vec3, + NdotV: f32, + diffuse_color: vec3, + specular_color: vec3, + perceptual_roughness: f32, + occlusion: vec3, +) -> vec3 { + let diffuse_ambient = EnvBRDFApprox(diffuse_color, F_AB(1.0, NdotV)); + let specular_ambient = EnvBRDFApprox(specular_color, F_AB(perceptual_roughness, NdotV)); + + // No real world material has specular values under 0.02, so we use this range as a + // "pre-baked specular occlusion" that extinguishes the fresnel term, for artistic control. + // See: https://google.github.io/filament/Filament.html#specularocclusion + let specular_occlusion = saturate(dot(specular_color, vec3(50.0 * 0.33))); + + return (diffuse_ambient + specular_ambient * specular_occlusion) * lights.ambient_color.rgb * occlusion; +} + +``` + +### bevy/crates/bevy_pbr/src/render/mesh + +```rust +#import bevy_pbr::{ + mesh_functions, + skinning, + morph::morph, + forward_io::{Vertex, VertexOutput}, + view_transformations::position_world_to_clip, +} + +#ifdef MORPH_TARGETS +fn morph_vertex(vertex_in: Vertex) -> Vertex { + var vertex = vertex_in; + let weight_count = bevy_pbr::morph::layer_count(); + for (var i: u32 = 0u; i < weight_count; i ++) { + let weight = bevy_pbr::morph::weight_at(i); + if weight == 0.0 { + continue; + } + vertex.position += weight * morph(vertex.index, bevy_pbr::morph::position_offset, i); +#ifdef VERTEX_NORMALS + vertex.normal += weight * morph(vertex.index, bevy_pbr::morph::normal_offset, i); +#endif +#ifdef VERTEX_TANGENTS + vertex.tangent += vec4(weight * morph(vertex.index, bevy_pbr::morph::tangent_offset, i), 0.0); +#endif + } + return vertex; +} +#endif + +@vertex +fn vertex(vertex_no_morph: Vertex) -> VertexOutput { + var out: VertexOutput; + +#ifdef MORPH_TARGETS + var vertex = morph_vertex(vertex_no_morph); +#else + var vertex = vertex_no_morph; +#endif + +#ifdef SKINNED + var world_from_local = skinning::skin_model(vertex.joint_indices, vertex.joint_weights); +#else + // Use vertex_no_morph.instance_index instead of vertex.instance_index to work around a wgpu dx12 bug. + // See https://github.com/gfx-rs/naga/issues/2416 . + var world_from_local = mesh_functions::get_world_from_local(vertex_no_morph.instance_index); +#endif + +#ifdef VERTEX_NORMALS +#ifdef SKINNED + out.world_normal = skinning::skin_normals(world_from_local, vertex.normal); +#else + out.world_normal = mesh_functions::mesh_normal_local_to_world( + vertex.normal, + // Use vertex_no_morph.instance_index instead of vertex.instance_index to work around a wgpu dx12 bug. + // See https://github.com/gfx-rs/naga/issues/2416 + vertex_no_morph.instance_index + ); +#endif +#endif + +#ifdef VERTEX_POSITIONS + out.world_position = mesh_functions::mesh_position_local_to_world(world_from_local, vec4(vertex.position, 1.0)); + out.position = position_world_to_clip(out.world_position.xyz); +#endif + +#ifdef VERTEX_UVS_A + out.uv = vertex.uv; +#endif +#ifdef VERTEX_UVS_B + out.uv_b = vertex.uv_b; +#endif + +#ifdef VERTEX_TANGENTS + out.world_tangent = mesh_functions::mesh_tangent_local_to_world( + world_from_local, + vertex.tangent, + // Use vertex_no_morph.instance_index instead of vertex.instance_index to work around a wgpu dx12 bug. + // See https://github.com/gfx-rs/naga/issues/2416 + vertex_no_morph.instance_index + ); +#endif + +#ifdef VERTEX_COLORS + out.color = vertex.color; +#endif + +#ifdef VERTEX_OUTPUT_INSTANCE_INDEX + // Use vertex_no_morph.instance_index instead of vertex.instance_index to work around a wgpu dx12 bug. + // See https://github.com/gfx-rs/naga/issues/2416 + out.instance_index = vertex_no_morph.instance_index; +#endif + +#ifdef VISIBILITY_RANGE_DITHER + out.visibility_range_dither = mesh_functions::get_visibility_range_dither_level( + vertex_no_morph.instance_index, world_from_local[3]); +#endif + + return out; +} + +@fragment +fn fragment( + mesh: VertexOutput, +) -> @location(0) vec4 { +#ifdef VERTEX_COLORS + return mesh.color; +#else + return vec4(1.0, 0.0, 1.0, 1.0); +#endif +} + +``` + +### bevy/crates/bevy_pbr/src/render/wireframe + +```rust +#import bevy_pbr::forward_io::VertexOutput + +struct WireframeMaterial { + color: vec4, +}; + +@group(2) @binding(0) +var material: WireframeMaterial; +@fragment +fn fragment(in: VertexOutput) -> @location(0) vec4 { + return material.color; +} + +``` + +### bevy/crates/bevy_pbr/src/render/pbr_transmission + +```rust +#define_import_path bevy_pbr::transmission + +#import bevy_pbr::{ + lighting, + prepass_utils, + utils::interleaved_gradient_noise, + utils, + mesh_view_bindings as view_bindings, +}; + +#import bevy_render::maths::PI + +#ifdef TONEMAP_IN_SHADER +#import bevy_core_pipeline::tonemapping::approximate_inverse_tone_mapping +#endif + +fn specular_transmissive_light(world_position: vec4, frag_coord: vec3, view_z: f32, N: vec3, V: vec3, F0: vec3, ior: f32, thickness: f32, perceptual_roughness: f32, specular_transmissive_color: vec3, transmitted_environment_light_specular: vec3) -> vec3 { + // Calculate the ratio between refaction indexes. Assume air/vacuum for the space outside the mesh + let eta = 1.0 / ior; + + // Calculate incidence vector (opposite to view vector) and its dot product with the mesh normal + let I = -V; + let NdotI = dot(N, I); + + // Calculate refracted direction using Snell's law + let k = 1.0 - eta * eta * (1.0 - NdotI * NdotI); + let T = eta * I - (eta * NdotI + sqrt(k)) * N; + + // Calculate the exit position of the refracted ray, by propagating refacted direction through thickness + let exit_position = world_position.xyz + T * thickness; + + // Transform exit_position into clip space + let clip_exit_position = view_bindings::view.clip_from_world * vec4(exit_position, 1.0); + + // Scale / offset position so that coordinate is in right space for sampling transmissive background texture + let offset_position = (clip_exit_position.xy / clip_exit_position.w) * vec2(0.5, -0.5) + 0.5; + + // Fetch background color + var background_color: vec4; + if perceptual_roughness == 0.0 { + // If the material has zero roughness, we can use a faster approach without the blur + background_color = fetch_transmissive_background_non_rough(offset_position, frag_coord); + } else { + background_color = fetch_transmissive_background(offset_position, frag_coord, view_z, perceptual_roughness); + } + + // Compensate for exposure, since the background color is coming from an already exposure-adjusted texture + background_color = vec4(background_color.rgb / view_bindings::view.exposure, background_color.a); + + // Dot product of the refracted direction with the exit normal (Note: We assume the exit normal is the entry normal but inverted) + let MinusNdotT = dot(-N, T); + + // Calculate 1.0 - fresnel factor (how much light is _NOT_ reflected, i.e. how much is transmitted) + let F = vec3(1.0) - lighting::fresnel(F0, MinusNdotT); + + // Calculate final color by applying fresnel multiplied specular transmissive color to a mix of background color and transmitted specular environment light + return F * specular_transmissive_color * mix(transmitted_environment_light_specular, background_color.rgb, background_color.a); +} + +fn fetch_transmissive_background_non_rough(offset_position: vec2, frag_coord: vec3) -> vec4 { + var background_color = textureSampleLevel( + view_bindings::view_transmission_texture, + view_bindings::view_transmission_sampler, + offset_position, + 0.0 + ); + +#ifdef DEPTH_PREPASS +#ifndef WEBGL2 + // Use depth prepass data to reject values that are in front of the current fragment + if prepass_utils::prepass_depth(vec4(offset_position * view_bindings::view.viewport.zw, 0.0, 0.0), 0u) > frag_coord.z { + background_color.a = 0.0; + } +#endif +#endif + +#ifdef TONEMAP_IN_SHADER + background_color = approximate_inverse_tone_mapping(background_color, view_bindings::view.color_grading); +#endif + + return background_color; +} + +fn fetch_transmissive_background(offset_position: vec2, frag_coord: vec3, view_z: f32, perceptual_roughness: f32) -> vec4 { + // Calculate view aspect ratio, used to scale offset so that it's proportionate + let aspect = view_bindings::view.viewport.z / view_bindings::view.viewport.w; + + // Calculate how “blurry” the transmission should be. + // Blur is more or less eyeballed to look approximately “right”, since the “correct” + // approach would involve projecting many scattered rays and figuring out their individual + // exit positions. IRL, light rays can be scattered when entering/exiting a material (due to + // roughness) or inside the material (due to subsurface scattering). Here, we only consider + // the first scenario. + // + // Blur intensity is: + // - proportional to the square of `perceptual_roughness` + // - proportional to the inverse of view z + let blur_intensity = (perceptual_roughness * perceptual_roughness) / view_z; + +#ifdef SCREEN_SPACE_SPECULAR_TRANSMISSION_BLUR_TAPS + let num_taps = #{SCREEN_SPACE_SPECULAR_TRANSMISSION_BLUR_TAPS}; // Controlled by the `Camera3d::screen_space_specular_transmission_quality` property +#else + let num_taps = 8; // Fallback to 8 taps, if not specified +#endif + let num_spirals = i32(ceil(f32(num_taps) / 8.0)); +#ifdef TEMPORAL_JITTER + let random_angle = interleaved_gradient_noise(frag_coord.xy, view_bindings::globals.frame_count); +#else + let random_angle = interleaved_gradient_noise(frag_coord.xy, 0u); +#endif + // Pixel checkerboard pattern (helps make the interleaved gradient noise pattern less visible) + let pixel_checkboard = ( +#ifdef TEMPORAL_JITTER + // 0 or 1 on even/odd pixels, alternates every frame + (i32(frag_coord.x) + i32(frag_coord.y) + i32(view_bindings::globals.frame_count)) % 2 +#else + // 0 or 1 on even/odd pixels + (i32(frag_coord.x) + i32(frag_coord.y)) % 2 +#endif + ); + + var result = vec4(0.0); + for (var i: i32 = 0; i < num_taps; i = i + 1) { + let current_spiral = (i >> 3u); + let angle = (random_angle + f32(current_spiral) / f32(num_spirals)) * 2.0 * PI; + let m = vec2(sin(angle), cos(angle)); + let rotation_matrix = mat2x2( + m.y, -m.x, + m.x, m.y + ); + + // Get spiral offset + var spiral_offset: vec2; + switch i & 7 { + // https://www.iryoku.com/next-generation-post-processing-in-call-of-duty-advanced-warfare (slides 120-135) + // TODO: Figure out a more reasonable way of doing this, as WGSL + // seems to only allow constant indexes into constant arrays at the moment. + // The downstream shader compiler should be able to optimize this into a single + // constant when unrolling the for loop, but it's still not ideal. + case 0: { spiral_offset = utils::SPIRAL_OFFSET_0_; } // Note: We go even first and then odd, so that the lowest + case 1: { spiral_offset = utils::SPIRAL_OFFSET_2_; } // quality possible (which does 4 taps) still does a full spiral + case 2: { spiral_offset = utils::SPIRAL_OFFSET_4_; } // instead of just the first half of it + case 3: { spiral_offset = utils::SPIRAL_OFFSET_6_; } + case 4: { spiral_offset = utils::SPIRAL_OFFSET_1_; } + case 5: { spiral_offset = utils::SPIRAL_OFFSET_3_; } + case 6: { spiral_offset = utils::SPIRAL_OFFSET_5_; } + case 7: { spiral_offset = utils::SPIRAL_OFFSET_7_; } + default: {} + } + + // Make each consecutive spiral slightly smaller than the previous one + spiral_offset *= 1.0 - (0.5 * f32(current_spiral + 1) / f32(num_spirals)); + + // Rotate and correct for aspect ratio + let rotated_spiral_offset = (rotation_matrix * spiral_offset) * vec2(1.0, aspect); + + // Calculate final offset position, with blur and spiral offset + let modified_offset_position = offset_position + rotated_spiral_offset * blur_intensity * (1.0 - f32(pixel_checkboard) * 0.1); + + // Sample the view transmission texture at the offset position + noise offset, to get the background color + var sample = textureSampleLevel( + view_bindings::view_transmission_texture, + view_bindings::view_transmission_sampler, + modified_offset_position, + 0.0 + ); + +#ifdef DEPTH_PREPASS +#ifndef WEBGL2 + // Use depth prepass data to reject values that are in front of the current fragment + if prepass_utils::prepass_depth(vec4(modified_offset_position * view_bindings::view.viewport.zw, 0.0, 0.0), 0u) > frag_coord.z { + sample = vec4(0.0); + } +#endif +#endif + + // As blur intensity grows higher, gradually limit *very bright* color RGB values towards a + // maximum length of 1.0 to prevent stray “firefly” pixel artifacts. This can potentially make + // very strong emissive meshes appear much dimmer, but the artifacts are noticeable enough to + // warrant this treatment. + let normalized_rgb = normalize(sample.rgb); + result += vec4(min(sample.rgb, normalized_rgb / saturate(blur_intensity / 2.0)), sample.a); + } + + result /= f32(num_taps); + +#ifdef TONEMAP_IN_SHADER + result = approximate_inverse_tone_mapping(result, view_bindings::view.color_grading); +#endif + + return result; +} + +``` + +### bevy/crates/bevy_pbr/src/render/rgb9e5 + +```rust +#define_import_path bevy_pbr::rgb9e5 + +const RGB9E5_EXPONENT_BITS = 5u; +const RGB9E5_MANTISSA_BITS = 9; +const RGB9E5_MANTISSA_BITSU = 9u; +const RGB9E5_EXP_BIAS = 15; +const RGB9E5_MAX_VALID_BIASED_EXP = 31u; + +//#define MAX_RGB9E5_EXP (RGB9E5_MAX_VALID_BIASED_EXP - RGB9E5_EXP_BIAS) +//#define RGB9E5_MANTISSA_VALUES (1< i32 { + let f = bitcast(x); + let biasedexponent = (f & 0x7F800000u) >> 23u; + return i32(biasedexponent) - 127; +} + +// https://www.khronos.org/registry/OpenGL/extensions/EXT/EXT_texture_shared_exponent.txt +fn vec3_to_rgb9e5_(rgb_in: vec3) -> u32 { + let rgb = clamp(rgb_in, vec3(0.0), vec3(MAX_RGB9E5_)); + + let maxrgb = max(rgb.r, max(rgb.g, rgb.b)); + var exp_shared = max(-RGB9E5_EXP_BIAS - 1, floor_log2_(maxrgb)) + 1 + RGB9E5_EXP_BIAS; + var denom = exp2(f32(exp_shared - RGB9E5_EXP_BIAS - RGB9E5_MANTISSA_BITS)); + + let maxm = i32(floor(maxrgb / denom + 0.5)); + if (maxm == RGB9E5_MANTISSA_VALUES) { + denom *= 2.0; + exp_shared += 1; + } + + let n = vec3(floor(rgb / denom + 0.5)); + + return (u32(exp_shared) << 27u) | (n.b << 18u) | (n.g << 9u) | (n.r << 0u); +} + +// Builtin extractBits() is not working on WEBGL or DX12 +// DX12: HLSL: Unimplemented("write_expr_math ExtractBits") +fn extract_bits(value: u32, offset: u32, bits: u32) -> u32 { + let mask = (1u << bits) - 1u; + return (value >> offset) & mask; +} + +fn rgb9e5_to_vec3_(v: u32) -> vec3 { + let exponent = i32(extract_bits(v, 27u, RGB9E5_EXPONENT_BITS)) - RGB9E5_EXP_BIAS - RGB9E5_MANTISSA_BITS; + let scale = exp2(f32(exponent)); + + return vec3( + f32(extract_bits(v, 0u, RGB9E5_MANTISSA_BITSU)), + f32(extract_bits(v, 9u, RGB9E5_MANTISSA_BITSU)), + f32(extract_bits(v, 18u, RGB9E5_MANTISSA_BITSU)) + ) * scale; +} + +``` + +### bevy/crates/bevy_pbr/src/render/clustered_forward + +```rust +#define_import_path bevy_pbr::clustered_forward + +#import bevy_pbr::{ + mesh_view_bindings as bindings, + utils::rand_f, +} + +#import bevy_render::{ + color_operations::hsv_to_rgb, + maths::PI_2, +} + +// NOTE: Keep in sync with bevy_pbr/src/light.rs +fn view_z_to_z_slice(view_z: f32, is_orthographic: bool) -> u32 { + var z_slice: u32 = 0u; + if is_orthographic { + // NOTE: view_z is correct in the orthographic case + z_slice = u32(floor((view_z - bindings::lights.cluster_factors.z) * bindings::lights.cluster_factors.w)); + } else { + // NOTE: had to use -view_z to make it positive else log(negative) is nan + z_slice = u32(log(-view_z) * bindings::lights.cluster_factors.z - bindings::lights.cluster_factors.w + 1.0); + } + // NOTE: We use min as we may limit the far z plane used for clustering to be closer than + // the furthest thing being drawn. This means that we need to limit to the maximum cluster. + return min(z_slice, bindings::lights.cluster_dimensions.z - 1u); +} + +fn fragment_cluster_index(frag_coord: vec2, view_z: f32, is_orthographic: bool) -> u32 { + let xy = vec2(floor((frag_coord - bindings::view.viewport.xy) * bindings::lights.cluster_factors.xy)); + let z_slice = view_z_to_z_slice(view_z, is_orthographic); + // NOTE: Restricting cluster index to avoid undefined behavior when accessing uniform buffer + // arrays based on the cluster index. + return min( + (xy.y * bindings::lights.cluster_dimensions.x + xy.x) * bindings::lights.cluster_dimensions.z + z_slice, + bindings::lights.cluster_dimensions.w - 1u + ); +} + +// this must match CLUSTER_COUNT_SIZE in light.rs +const CLUSTER_COUNT_SIZE = 9u; +fn unpack_offset_and_counts(cluster_index: u32) -> vec3 { +#if AVAILABLE_STORAGE_BUFFER_BINDINGS >= 3 + return bindings::cluster_offsets_and_counts.data[cluster_index].xyz; +#else + let offset_and_counts = bindings::cluster_offsets_and_counts.data[cluster_index >> 2u][cluster_index & ((1u << 2u) - 1u)]; + // [ 31 .. 18 | 17 .. 9 | 8 .. 0 ] + // [ offset | point light count | spot light count ] + return vec3( + (offset_and_counts >> (CLUSTER_COUNT_SIZE * 2u)) & ((1u << (32u - (CLUSTER_COUNT_SIZE * 2u))) - 1u), + (offset_and_counts >> CLUSTER_COUNT_SIZE) & ((1u << CLUSTER_COUNT_SIZE) - 1u), + offset_and_counts & ((1u << CLUSTER_COUNT_SIZE) - 1u), + ); +#endif +} + +fn get_clusterable_object_id(index: u32) -> u32 { +#if AVAILABLE_STORAGE_BUFFER_BINDINGS >= 3 + return bindings::clusterable_object_index_lists.data[index]; +#else + // The index is correct but in clusterable_object_index_lists we pack 4 u8s into a u32 + // This means the index into clusterable_object_index_lists is index / 4 + let indices = bindings::clusterable_object_index_lists.data[index >> 4u][(index >> 2u) & + ((1u << 2u) - 1u)]; + // And index % 4 gives the sub-index of the u8 within the u32 so we shift by 8 * sub-index + return (indices >> (8u * (index & ((1u << 2u) - 1u)))) & ((1u << 8u) - 1u); +#endif +} + +fn cluster_debug_visualization( + input_color: vec4, + view_z: f32, + is_orthographic: bool, + offset_and_counts: vec3, + cluster_index: u32, +) -> vec4 { + var output_color = input_color; + + // Cluster allocation debug (using 'over' alpha blending) +#ifdef CLUSTERED_FORWARD_DEBUG_Z_SLICES + // NOTE: This debug mode visualises the z-slices + let cluster_overlay_alpha = 0.1; + var z_slice: u32 = view_z_to_z_slice(view_z, is_orthographic); + // A hack to make the colors alternate a bit more + if (z_slice & 1u) == 1u { + z_slice = z_slice + bindings::lights.cluster_dimensions.z / 2u; + } + let slice_color = hsv_to_rgb( + f32(z_slice) / f32(bindings::lights.cluster_dimensions.z + 1u) * PI_2, + 1.0, + 0.5 + ); + output_color = vec4( + (1.0 - cluster_overlay_alpha) * output_color.rgb + cluster_overlay_alpha * slice_color, + output_color.a + ); +#endif // CLUSTERED_FORWARD_DEBUG_Z_SLICES +#ifdef CLUSTERED_FORWARD_DEBUG_CLUSTER_COMPLEXITY + // NOTE: This debug mode visualises the number of clusterable objects within + // the cluster that contains the fragment. It shows a sort of cluster + // complexity measure. + let cluster_overlay_alpha = 0.1; + let max_complexity_per_cluster = 64.0; + output_color.r = (1.0 - cluster_overlay_alpha) * output_color.r + cluster_overlay_alpha * + smoothStep( + 0.0, + max_complexity_per_cluster, + f32(offset_and_counts[1] + offset_and_counts[2])); + output_color.g = (1.0 - cluster_overlay_alpha) * output_color.g + cluster_overlay_alpha * + (1.0 - smoothStep( + 0.0, + max_complexity_per_cluster, + f32(offset_and_counts[1] + offset_and_counts[2]))); +#endif // CLUSTERED_FORWARD_DEBUG_CLUSTER_COMPLEXITY +#ifdef CLUSTERED_FORWARD_DEBUG_CLUSTER_COHERENCY + // NOTE: Visualizes the cluster to which the fragment belongs + let cluster_overlay_alpha = 0.1; + var rng = cluster_index; + let cluster_color = hsv_to_rgb(rand_f(&rng) * PI_2, 1.0, 0.5); + output_color = vec4( + (1.0 - cluster_overlay_alpha) * output_color.rgb + cluster_overlay_alpha * cluster_color, + output_color.a + ); +#endif // CLUSTERED_FORWARD_DEBUG_CLUSTER_COHERENCY + + return output_color; +} + +``` + +### bevy/crates/bevy_pbr/src/render/mesh_view_bindings + +```rust +#define_import_path bevy_pbr::mesh_view_bindings + +#import bevy_pbr::mesh_view_types as types +#import bevy_render::{ + view::View, + globals::Globals, +} + +@group(0) @binding(0) var view: View; +@group(0) @binding(1) var lights: types::Lights; +#ifdef NO_CUBE_ARRAY_TEXTURES_SUPPORT +@group(0) @binding(2) var point_shadow_textures: texture_depth_cube; +#else +@group(0) @binding(2) var point_shadow_textures: texture_depth_cube_array; +#endif +@group(0) @binding(3) var point_shadow_textures_sampler: sampler_comparison; +#ifdef NO_ARRAY_TEXTURES_SUPPORT +@group(0) @binding(4) var directional_shadow_textures: texture_depth_2d; +#else +@group(0) @binding(4) var directional_shadow_textures: texture_depth_2d_array; +#endif +@group(0) @binding(5) var directional_shadow_textures_sampler: sampler_comparison; + +#if AVAILABLE_STORAGE_BUFFER_BINDINGS >= 3 +@group(0) @binding(6) var clusterable_objects: types::ClusterableObjects; +@group(0) @binding(7) var clusterable_object_index_lists: types::ClusterLightIndexLists; +@group(0) @binding(8) var cluster_offsets_and_counts: types::ClusterOffsetsAndCounts; +#else +@group(0) @binding(6) var clusterable_objects: types::ClusterableObjects; +@group(0) @binding(7) var clusterable_object_index_lists: types::ClusterLightIndexLists; +@group(0) @binding(8) var cluster_offsets_and_counts: types::ClusterOffsetsAndCounts; +#endif + +@group(0) @binding(9) var globals: Globals; +@group(0) @binding(10) var fog: types::Fog; +@group(0) @binding(11) var light_probes: types::LightProbes; + +const VISIBILITY_RANGE_UNIFORM_BUFFER_SIZE: u32 = 64u; +#if AVAILABLE_STORAGE_BUFFER_BINDINGS >= 6 +@group(0) @binding(12) var visibility_ranges: array>; +#else +@group(0) @binding(12) var visibility_ranges: array, VISIBILITY_RANGE_UNIFORM_BUFFER_SIZE>; +#endif + +@group(0) @binding(13) var ssr_settings: types::ScreenSpaceReflectionsSettings; +@group(0) @binding(14) var screen_space_ambient_occlusion_texture: texture_2d; + +#ifdef MULTIPLE_LIGHT_PROBES_IN_ARRAY +@group(0) @binding(15) var diffuse_environment_maps: binding_array, 8u>; +@group(0) @binding(16) var specular_environment_maps: binding_array, 8u>; +#else +@group(0) @binding(15) var diffuse_environment_map: texture_cube; +@group(0) @binding(16) var specular_environment_map: texture_cube; +#endif +@group(0) @binding(17) var environment_map_sampler: sampler; + +#ifdef IRRADIANCE_VOLUMES_ARE_USABLE +#ifdef MULTIPLE_LIGHT_PROBES_IN_ARRAY +@group(0) @binding(18) var irradiance_volumes: binding_array, 8u>; +#else +@group(0) @binding(18) var irradiance_volume: texture_3d; +#endif +@group(0) @binding(19) var irradiance_volume_sampler: sampler; +#endif + +@group(0) @binding(20) var dt_lut_texture: texture_3d; +@group(0) @binding(21) var dt_lut_sampler: sampler; + +#ifdef MULTISAMPLED +#ifdef DEPTH_PREPASS +@group(0) @binding(22) var depth_prepass_texture: texture_depth_multisampled_2d; +#endif // DEPTH_PREPASS +#ifdef NORMAL_PREPASS +@group(0) @binding(23) var normal_prepass_texture: texture_multisampled_2d; +#endif // NORMAL_PREPASS +#ifdef MOTION_VECTOR_PREPASS +@group(0) @binding(24) var motion_vector_prepass_texture: texture_multisampled_2d; +#endif // MOTION_VECTOR_PREPASS + +#else // MULTISAMPLED + +#ifdef DEPTH_PREPASS +@group(0) @binding(22) var depth_prepass_texture: texture_depth_2d; +#endif // DEPTH_PREPASS +#ifdef NORMAL_PREPASS +@group(0) @binding(23) var normal_prepass_texture: texture_2d; +#endif // NORMAL_PREPASS +#ifdef MOTION_VECTOR_PREPASS +@group(0) @binding(24) var motion_vector_prepass_texture: texture_2d; +#endif // MOTION_VECTOR_PREPASS + +#endif // MULTISAMPLED + +#ifdef DEFERRED_PREPASS +@group(0) @binding(25) var deferred_prepass_texture: texture_2d; +#endif // DEFERRED_PREPASS + +@group(0) @binding(26) var view_transmission_texture: texture_2d; +@group(0) @binding(27) var view_transmission_sampler: sampler; + +``` + +### bevy/crates/bevy_pbr/src/render/pbr_prepass_functions + +```rust +#define_import_path bevy_pbr::pbr_prepass_functions + +#import bevy_pbr::{ + prepass_io::VertexOutput, + prepass_bindings::previous_view_uniforms, + mesh_view_bindings::view, + pbr_bindings, + pbr_types, +} + +// Cutoff used for the premultiplied alpha modes BLEND, ADD, and ALPHA_TO_COVERAGE. +const PREMULTIPLIED_ALPHA_CUTOFF = 0.05; + +// We can use a simplified version of alpha_discard() here since we only need to handle the alpha_cutoff +fn prepass_alpha_discard(in: VertexOutput) { + +#ifdef MAY_DISCARD + var output_color: vec4 = pbr_bindings::material.base_color; + +#ifdef VERTEX_UVS +#ifdef STANDARD_MATERIAL_BASE_COLOR_UV_B + var uv = in.uv_b; +#else // STANDARD_MATERIAL_BASE_COLOR_UV_B + var uv = in.uv; +#endif // STANDARD_MATERIAL_BASE_COLOR_UV_B + + let uv_transform = pbr_bindings::material.uv_transform; + uv = (uv_transform * vec3(uv, 1.0)).xy; + if (pbr_bindings::material.flags & pbr_types::STANDARD_MATERIAL_FLAGS_BASE_COLOR_TEXTURE_BIT) != 0u { + output_color = output_color * textureSampleBias(pbr_bindings::base_color_texture, pbr_bindings::base_color_sampler, uv, view.mip_bias); + } +#endif // VERTEX_UVS + + let alpha_mode = pbr_bindings::material.flags & pbr_types::STANDARD_MATERIAL_FLAGS_ALPHA_MODE_RESERVED_BITS; + if alpha_mode == pbr_types::STANDARD_MATERIAL_FLAGS_ALPHA_MODE_MASK { + if output_color.a < pbr_bindings::material.alpha_cutoff { + discard; + } + } else if (alpha_mode == pbr_types::STANDARD_MATERIAL_FLAGS_ALPHA_MODE_BLEND || + alpha_mode == pbr_types::STANDARD_MATERIAL_FLAGS_ALPHA_MODE_ADD || + alpha_mode == pbr_types::STANDARD_MATERIAL_FLAGS_ALPHA_MODE_ALPHA_TO_COVERAGE) { + if output_color.a < PREMULTIPLIED_ALPHA_CUTOFF { + discard; + } + } else if alpha_mode == pbr_types::STANDARD_MATERIAL_FLAGS_ALPHA_MODE_PREMULTIPLIED { + if all(output_color < vec4(PREMULTIPLIED_ALPHA_CUTOFF)) { + discard; + } + } + +#endif // MAY_DISCARD +} + +#ifdef MOTION_VECTOR_PREPASS +fn calculate_motion_vector(world_position: vec4, previous_world_position: vec4) -> vec2 { + let clip_position_t = view.unjittered_clip_from_world * world_position; + let clip_position = clip_position_t.xy / clip_position_t.w; + let previous_clip_position_t = previous_view_uniforms.clip_from_world * previous_world_position; + let previous_clip_position = previous_clip_position_t.xy / previous_clip_position_t.w; + // These motion vectors are used as offsets to UV positions and are stored + // in the range -1,1 to allow offsetting from the one corner to the + // diagonally-opposite corner in UV coordinates, in either direction. + // A difference between diagonally-opposite corners of clip space is in the + // range -2,2, so this needs to be scaled by 0.5. And the V direction goes + // down where clip space y goes up, so y needs to be flipped. + return (clip_position - previous_clip_position) * vec2(0.5, -0.5); +} +#endif // MOTION_VECTOR_PREPASS + +``` + +### bevy/crates/bevy_pbr/src/render/mesh_bindings + +```rust +#define_import_path bevy_pbr::mesh_bindings + +#import bevy_pbr::mesh_types::Mesh + +#ifdef PER_OBJECT_BUFFER_BATCH_SIZE +@group(1) @binding(0) var mesh: array; +#else +@group(1) @binding(0) var mesh: array; +#endif // PER_OBJECT_BUFFER_BATCH_SIZE + +``` + +### bevy/crates/bevy_pbr/src/render/shadows + +```rust +#define_import_path bevy_pbr::shadows + +#import bevy_pbr::{ + mesh_view_types::POINT_LIGHT_FLAGS_SPOT_LIGHT_Y_NEGATIVE, + mesh_view_bindings as view_bindings, + shadow_sampling::{SPOT_SHADOW_TEXEL_SIZE, sample_shadow_cubemap, sample_shadow_map} +} + +#import bevy_render::{ + color_operations::hsv_to_rgb, + maths::PI_2 +} + +const flip_z: vec3 = vec3(1.0, 1.0, -1.0); + +fn fetch_point_shadow(light_id: u32, frag_position: vec4, surface_normal: vec3) -> f32 { + let light = &view_bindings::clusterable_objects.data[light_id]; + + // because the shadow maps align with the axes and the frustum planes are at 45 degrees + // we can get the worldspace depth by taking the largest absolute axis + let surface_to_light = (*light).position_radius.xyz - frag_position.xyz; + let surface_to_light_abs = abs(surface_to_light); + let distance_to_light = max(surface_to_light_abs.x, max(surface_to_light_abs.y, surface_to_light_abs.z)); + + // The normal bias here is already scaled by the texel size at 1 world unit from the light. + // The texel size increases proportionally with distance from the light so multiplying by + // distance to light scales the normal bias to the texel size at the fragment distance. + let normal_offset = (*light).shadow_normal_bias * distance_to_light * surface_normal.xyz; + let depth_offset = (*light).shadow_depth_bias * normalize(surface_to_light.xyz); + let offset_position = frag_position.xyz + normal_offset + depth_offset; + + // similar largest-absolute-axis trick as above, but now with the offset fragment position + let frag_ls = offset_position.xyz - (*light).position_radius.xyz ; + let abs_position_ls = abs(frag_ls); + let major_axis_magnitude = max(abs_position_ls.x, max(abs_position_ls.y, abs_position_ls.z)); + + // NOTE: These simplifications come from multiplying: + // projection * vec4(0, 0, -major_axis_magnitude, 1.0) + // and keeping only the terms that have any impact on the depth. + // Projection-agnostic approach: + let zw = -major_axis_magnitude * (*light).light_custom_data.xy + (*light).light_custom_data.zw; + let depth = zw.x / zw.y; + + // Do the lookup, using HW PCF and comparison. Cubemaps assume a left-handed coordinate space, + // so we have to flip the z-axis when sampling. + return sample_shadow_cubemap(frag_ls * flip_z, distance_to_light, depth, light_id); +} + +fn fetch_spot_shadow(light_id: u32, frag_position: vec4, surface_normal: vec3) -> f32 { + let light = &view_bindings::clusterable_objects.data[light_id]; + + let surface_to_light = (*light).position_radius.xyz - frag_position.xyz; + + // construct the light view matrix + var spot_dir = vec3((*light).light_custom_data.x, 0.0, (*light).light_custom_data.y); + // reconstruct spot dir from x/z and y-direction flag + spot_dir.y = sqrt(max(0.0, 1.0 - spot_dir.x * spot_dir.x - spot_dir.z * spot_dir.z)); + if (((*light).flags & POINT_LIGHT_FLAGS_SPOT_LIGHT_Y_NEGATIVE) != 0u) { + spot_dir.y = -spot_dir.y; + } + + // view matrix z_axis is the reverse of transform.forward() + let fwd = -spot_dir; + let distance_to_light = dot(fwd, surface_to_light); + let offset_position = + -surface_to_light + + ((*light).shadow_depth_bias * normalize(surface_to_light)) + + (surface_normal.xyz * (*light).shadow_normal_bias) * distance_to_light; + + // the construction of the up and right vectors needs to precisely mirror the code + // in render/light.rs:spot_light_view_matrix + var sign = -1.0; + if (fwd.z >= 0.0) { + sign = 1.0; + } + let a = -1.0 / (fwd.z + sign); + let b = fwd.x * fwd.y * a; + let up_dir = vec3(1.0 + sign * fwd.x * fwd.x * a, sign * b, -sign * fwd.x); + let right_dir = vec3(-b, -sign - fwd.y * fwd.y * a, fwd.y); + let light_inv_rot = mat3x3(right_dir, up_dir, fwd); + + // because the matrix is a pure rotation matrix, the inverse is just the transpose, and to calculate + // the product of the transpose with a vector we can just post-multiply instead of pre-multiplying. + // this allows us to keep the matrix construction code identical between CPU and GPU. + let projected_position = offset_position * light_inv_rot; + + // divide xy by perspective matrix "f" and by -projected.z (projected.z is -projection matrix's w) + // to get ndc coordinates + let f_div_minus_z = 1.0 / ((*light).spot_light_tan_angle * -projected_position.z); + let shadow_xy_ndc = projected_position.xy * f_div_minus_z; + // convert to uv coordinates + let shadow_uv = shadow_xy_ndc * vec2(0.5, -0.5) + vec2(0.5, 0.5); + + // 0.1 must match POINT_LIGHT_NEAR_Z + let depth = 0.1 / -projected_position.z; + + return sample_shadow_map( + shadow_uv, + depth, + i32(light_id) + view_bindings::lights.spot_light_shadowmap_offset, + SPOT_SHADOW_TEXEL_SIZE + ); +} + +fn get_cascade_index(light_id: u32, view_z: f32) -> u32 { + let light = &view_bindings::lights.directional_lights[light_id]; + + for (var i: u32 = 0u; i < (*light).num_cascades; i = i + 1u) { + if (-view_z < (*light).cascades[i].far_bound) { + return i; + } + } + return (*light).num_cascades; +} + +// Converts from world space to the uv position in the light's shadow map. +// +// The depth is stored in the return value's z coordinate. If the return value's +// w coordinate is 0.0, then we landed outside the shadow map entirely. +fn world_to_directional_light_local( + light_id: u32, + cascade_index: u32, + offset_position: vec4 +) -> vec4 { + let light = &view_bindings::lights.directional_lights[light_id]; + let cascade = &(*light).cascades[cascade_index]; + + let offset_position_clip = (*cascade).clip_from_world * offset_position; + if (offset_position_clip.w <= 0.0) { + return vec4(0.0); + } + let offset_position_ndc = offset_position_clip.xyz / offset_position_clip.w; + // No shadow outside the orthographic projection volume + if (any(offset_position_ndc.xy < vec2(-1.0)) || offset_position_ndc.z < 0.0 + || any(offset_position_ndc > vec3(1.0))) { + return vec4(0.0); + } + + // compute texture coordinates for shadow lookup, compensating for the Y-flip difference + // between the NDC and texture coordinates + let flip_correction = vec2(0.5, -0.5); + let light_local = offset_position_ndc.xy * flip_correction + vec2(0.5, 0.5); + + let depth = offset_position_ndc.z; + + return vec4(light_local, depth, 1.0); +} + +fn sample_directional_cascade(light_id: u32, cascade_index: u32, frag_position: vec4, surface_normal: vec3) -> f32 { + let light = &view_bindings::lights.directional_lights[light_id]; + let cascade = &(*light).cascades[cascade_index]; + + // The normal bias is scaled to the texel size. + let normal_offset = (*light).shadow_normal_bias * (*cascade).texel_size * surface_normal.xyz; + let depth_offset = (*light).shadow_depth_bias * (*light).direction_to_light.xyz; + let offset_position = vec4(frag_position.xyz + normal_offset + depth_offset, frag_position.w); + + let light_local = world_to_directional_light_local(light_id, cascade_index, offset_position); + if (light_local.w == 0.0) { + return 1.0; + } + + let array_index = i32((*light).depth_texture_base_index + cascade_index); + return sample_shadow_map(light_local.xy, light_local.z, array_index, (*cascade).texel_size); +} + +fn fetch_directional_shadow(light_id: u32, frag_position: vec4, surface_normal: vec3, view_z: f32) -> f32 { + let light = &view_bindings::lights.directional_lights[light_id]; + let cascade_index = get_cascade_index(light_id, view_z); + + if (cascade_index >= (*light).num_cascades) { + return 1.0; + } + + var shadow = sample_directional_cascade(light_id, cascade_index, frag_position, surface_normal); + + // Blend with the next cascade, if there is one. + let next_cascade_index = cascade_index + 1u; + if (next_cascade_index < (*light).num_cascades) { + let this_far_bound = (*light).cascades[cascade_index].far_bound; + let next_near_bound = (1.0 - (*light).cascades_overlap_proportion) * this_far_bound; + if (-view_z >= next_near_bound) { + let next_shadow = sample_directional_cascade(light_id, next_cascade_index, frag_position, surface_normal); + shadow = mix(shadow, next_shadow, (-view_z - next_near_bound) / (this_far_bound - next_near_bound)); + } + } + return shadow; +} + +fn cascade_debug_visualization( + output_color: vec3, + light_id: u32, + view_z: f32, +) -> vec3 { + let overlay_alpha = 0.95; + let cascade_index = get_cascade_index(light_id, view_z); + let cascade_color = hsv_to_rgb( + f32(cascade_index) / f32(#{MAX_CASCADES_PER_LIGHT}u + 1u) * PI_2, + 1.0, + 0.5 + ); + return vec3( + (1.0 - overlay_alpha) * output_color.rgb + overlay_alpha * cascade_color + ); +} + +``` + +### bevy/crates/bevy_pbr/src/meshlet/cull_clusters + +```rust +#import bevy_pbr::meshlet_bindings::{ + meshlet_cluster_meshlet_ids, + meshlet_bounding_spheres, + meshlet_cluster_instance_ids, + meshlet_instance_uniforms, + meshlet_second_pass_candidates, + depth_pyramid, + view, + previous_view, + should_cull_instance, + cluster_is_second_pass_candidate, + meshlets, + draw_indirect_args, + draw_triangle_buffer, +} +#import bevy_render::maths::affine3_to_square + +/// Culls individual clusters (1 per thread) in two passes (two pass occlusion culling), and outputs a bitmask of which clusters survived. +/// 1. The first pass tests instance visibility, frustum culling, LOD selection, and finally occlusion culling using last frame's depth pyramid. +/// 2. The second pass performs occlusion culling (using the depth buffer generated from the first pass) on all clusters that passed +/// the instance, frustum, and LOD tests in the first pass, but were not visible last frame according to the occlusion culling. + +@compute +@workgroup_size(128, 1, 1) // 128 threads per workgroup, 1 cluster per thread +fn cull_clusters( + @builtin(workgroup_id) workgroup_id: vec3, + @builtin(num_workgroups) num_workgroups: vec3, + @builtin(local_invocation_id) local_invocation_id: vec3, +) { + // Calculate the cluster ID for this thread + let cluster_id = local_invocation_id.x + 128u * dot(workgroup_id, vec3(num_workgroups.x * num_workgroups.x, num_workgroups.x, 1u)); + if cluster_id >= arrayLength(&meshlet_cluster_meshlet_ids) { return; } + +#ifdef MESHLET_SECOND_CULLING_PASS + if !cluster_is_second_pass_candidate(cluster_id) { return; } +#endif + + // Check for instance culling + let instance_id = meshlet_cluster_instance_ids[cluster_id]; +#ifdef MESHLET_FIRST_CULLING_PASS + if should_cull_instance(instance_id) { return; } +#endif + + // Calculate world-space culling bounding sphere for the cluster + let instance_uniform = meshlet_instance_uniforms[instance_id]; + let meshlet_id = meshlet_cluster_meshlet_ids[cluster_id]; + let world_from_local = affine3_to_square(instance_uniform.world_from_local); + let world_scale = max(length(world_from_local[0]), max(length(world_from_local[1]), length(world_from_local[2]))); + let bounding_spheres = meshlet_bounding_spheres[meshlet_id]; + var culling_bounding_sphere_center = world_from_local * vec4(bounding_spheres.self_culling.center, 1.0); + var culling_bounding_sphere_radius = world_scale * bounding_spheres.self_culling.radius; + +#ifdef MESHLET_FIRST_CULLING_PASS + // Frustum culling + // TODO: Faster method from https://vkguide.dev/docs/gpudriven/compute_culling/#frustum-culling-function + for (var i = 0u; i < 6u; i++) { + if dot(view.frustum[i], culling_bounding_sphere_center) + culling_bounding_sphere_radius <= 0.0 { + return; + } + } + + // Calculate view-space LOD bounding sphere for the meshlet + let lod_bounding_sphere_center = world_from_local * vec4(bounding_spheres.self_lod.center, 1.0); + let lod_bounding_sphere_radius = world_scale * bounding_spheres.self_lod.radius; + let lod_bounding_sphere_center_view_space = (view.view_from_world * vec4(lod_bounding_sphere_center.xyz, 1.0)).xyz; + + // Calculate view-space LOD bounding sphere for the meshlet's parent + let parent_lod_bounding_sphere_center = world_from_local * vec4(bounding_spheres.parent_lod.center, 1.0); + let parent_lod_bounding_sphere_radius = world_scale * bounding_spheres.parent_lod.radius; + let parent_lod_bounding_sphere_center_view_space = (view.view_from_world * vec4(parent_lod_bounding_sphere_center.xyz, 1.0)).xyz; + + // Check LOD cut (meshlet error imperceptible, and parent error not imperceptible) + let lod_is_ok = lod_error_is_imperceptible(lod_bounding_sphere_center_view_space, lod_bounding_sphere_radius); + let parent_lod_is_ok = lod_error_is_imperceptible(parent_lod_bounding_sphere_center_view_space, parent_lod_bounding_sphere_radius); + if !lod_is_ok || parent_lod_is_ok { return; } +#endif + + // Project the culling bounding sphere to view-space for occlusion culling +#ifdef MESHLET_FIRST_CULLING_PASS + let previous_world_from_local = affine3_to_square(instance_uniform.previous_world_from_local); + let previous_world_from_local_scale = max(length(previous_world_from_local[0]), max(length(previous_world_from_local[1]), length(previous_world_from_local[2]))); + culling_bounding_sphere_center = previous_world_from_local * vec4(bounding_spheres.self_culling.center, 1.0); + culling_bounding_sphere_radius = previous_world_from_local_scale * bounding_spheres.self_culling.radius; +#endif + let culling_bounding_sphere_center_view_space = (view.view_from_world * vec4(culling_bounding_sphere_center.xyz, 1.0)).xyz; + + let aabb = project_view_space_sphere_to_screen_space_aabb(culling_bounding_sphere_center_view_space, culling_bounding_sphere_radius); + let depth_pyramid_size_mip_0 = vec2(textureDimensions(depth_pyramid, 0)); + let width = (aabb.z - aabb.x) * depth_pyramid_size_mip_0.x; + let height = (aabb.w - aabb.y) * depth_pyramid_size_mip_0.y; + let depth_level = max(0, i32(ceil(log2(max(width, height))))); // TODO: Naga doesn't like this being a u32 + let depth_pyramid_size = vec2(textureDimensions(depth_pyramid, depth_level)); + let aabb_top_left = vec2(aabb.xy * depth_pyramid_size); + + let depth_quad_a = textureLoad(depth_pyramid, aabb_top_left, depth_level).x; + let depth_quad_b = textureLoad(depth_pyramid, aabb_top_left + vec2(1u, 0u), depth_level).x; + let depth_quad_c = textureLoad(depth_pyramid, aabb_top_left + vec2(0u, 1u), depth_level).x; + let depth_quad_d = textureLoad(depth_pyramid, aabb_top_left + vec2(1u, 1u), depth_level).x; + let occluder_depth = min(min(depth_quad_a, depth_quad_b), min(depth_quad_c, depth_quad_d)); + + // Check whether or not the cluster would be occluded if drawn + var cluster_visible: bool; + if view.clip_from_view[3][3] == 1.0 { + // Orthographic + let sphere_depth = view.clip_from_view[3][2] + (culling_bounding_sphere_center_view_space.z + culling_bounding_sphere_radius) * view.clip_from_view[2][2]; + cluster_visible = sphere_depth >= occluder_depth; + } else { + // Perspective + let sphere_depth = -view.clip_from_view[3][2] / (culling_bounding_sphere_center_view_space.z + culling_bounding_sphere_radius); + cluster_visible = sphere_depth >= occluder_depth; + } + + // Write if the cluster should be occlusion tested in the second pass +#ifdef MESHLET_FIRST_CULLING_PASS + if !cluster_visible { + let bit = 1u << cluster_id % 32u; + atomicOr(&meshlet_second_pass_candidates[cluster_id / 32u], bit); + } +#endif + + // Append a list of this cluster's triangles to draw if not culled + if cluster_visible { + let meshlet_triangle_count = meshlets[meshlet_id].triangle_count; + let buffer_start = atomicAdd(&draw_indirect_args.vertex_count, meshlet_triangle_count * 3u) / 3u; + let cluster_id_packed = cluster_id << 6u; + for (var triangle_id = 0u; triangle_id < meshlet_triangle_count; triangle_id++) { + draw_triangle_buffer[buffer_start + triangle_id] = cluster_id_packed | triangle_id; + } + } +} + +// https://stackoverflow.com/questions/21648630/radius-of-projected-sphere-in-screen-space/21649403#21649403 +fn lod_error_is_imperceptible(cp: vec3, r: f32) -> bool { + let d2 = dot(cp, cp); + let r2 = r * r; + let sphere_diameter_uv = view.clip_from_view[0][0] * r / sqrt(d2 - r2); + let view_size = f32(max(view.viewport.z, view.viewport.w)); + let sphere_diameter_pixels = sphere_diameter_uv * view_size; + return sphere_diameter_pixels < 1.0; +} + +// https://zeux.io/2023/01/12/approximate-projected-bounds +fn project_view_space_sphere_to_screen_space_aabb(cp: vec3, r: f32) -> vec4 { + let inv_width = view.clip_from_view[0][0] * 0.5; + let inv_height = view.clip_from_view[1][1] * 0.5; + if view.clip_from_view[3][3] == 1.0 { + // Orthographic + let min_x = cp.x - r; + let max_x = cp.x + r; + + let min_y = cp.y - r; + let max_y = cp.y + r; + + return vec4(min_x * inv_width, 1.0 - max_y * inv_height, max_x * inv_width, 1.0 - min_y * inv_height); + } else { + // Perspective + let c = vec3(cp.xy, -cp.z); + let cr = c * r; + let czr2 = c.z * c.z - r * r; + + let vx = sqrt(c.x * c.x + czr2); + let min_x = (vx * c.x - cr.z) / (vx * c.z + cr.x); + let max_x = (vx * c.x + cr.z) / (vx * c.z - cr.x); + + let vy = sqrt(c.y * c.y + czr2); + let min_y = (vy * c.y - cr.z) / (vy * c.z + cr.y); + let max_y = (vy * c.y + cr.z) / (vy * c.z - cr.y); + + return vec4(min_x * inv_width, -max_y * inv_height, max_x * inv_width, -min_y * inv_height) + vec4(0.5); + } +} + +``` + +### bevy/crates/bevy_pbr/src/meshlet/fill_cluster_buffers + +```rust +#import bevy_pbr::meshlet_bindings::{ + cluster_count, + meshlet_instance_meshlet_counts_prefix_sum, + meshlet_instance_meshlet_slice_starts, + meshlet_cluster_instance_ids, + meshlet_cluster_meshlet_ids, +} + +/// Writes out instance_id and meshlet_id to the global buffers for each cluster in the scene. + +@compute +@workgroup_size(128, 1, 1) // 128 threads per workgroup, 1 cluster per thread +fn fill_cluster_buffers( + @builtin(workgroup_id) workgroup_id: vec3, + @builtin(num_workgroups) num_workgroups: vec3, + @builtin(local_invocation_id) local_invocation_id: vec3 +) { + // Calculate the cluster ID for this thread + let cluster_id = local_invocation_id.x + 128u * dot(workgroup_id, vec3(num_workgroups.x * num_workgroups.x, num_workgroups.x, 1u)); + if cluster_id >= cluster_count { return; } + + // Binary search to find the instance this cluster belongs to + var left = 0u; + var right = arrayLength(&meshlet_instance_meshlet_counts_prefix_sum) - 1u; + while left <= right { + let mid = (left + right) / 2u; + if meshlet_instance_meshlet_counts_prefix_sum[mid] <= cluster_id { + left = mid + 1u; + } else { + right = mid - 1u; + } + } + let instance_id = right; + + // Find the meshlet ID for this cluster within the instance's MeshletMesh + let meshlet_id_local = cluster_id - meshlet_instance_meshlet_counts_prefix_sum[instance_id]; + + // Find the overall meshlet ID in the global meshlet buffer + let meshlet_id = meshlet_id_local + meshlet_instance_meshlet_slice_starts[instance_id]; + + // Write results to buffers + meshlet_cluster_instance_ids[cluster_id] = instance_id; + meshlet_cluster_meshlet_ids[cluster_id] = meshlet_id; +} + +``` + +### bevy/crates/bevy_pbr/src/meshlet/visibility_buffer_raster + +```rust +#import bevy_pbr::{ + meshlet_bindings::{ + meshlet_cluster_meshlet_ids, + meshlets, + meshlet_vertex_ids, + meshlet_vertex_data, + meshlet_cluster_instance_ids, + meshlet_instance_uniforms, + meshlet_instance_material_ids, + draw_triangle_buffer, + view, + get_meshlet_index, + unpack_meshlet_vertex, + }, + mesh_functions::mesh_position_local_to_world, +} +#import bevy_render::maths::affine3_to_square + +/// Vertex/fragment shader for rasterizing meshlets into a visibility buffer. + +struct VertexOutput { + @builtin(position) clip_position: vec4, +#ifdef MESHLET_VISIBILITY_BUFFER_RASTER_PASS_OUTPUT + @location(0) @interpolate(flat) visibility: u32, + @location(1) @interpolate(flat) material_depth: u32, +#endif +#ifdef DEPTH_CLAMP_ORTHO + @location(0) unclamped_clip_depth: f32, +#endif +} + +#ifdef MESHLET_VISIBILITY_BUFFER_RASTER_PASS_OUTPUT +struct FragmentOutput { + @location(0) visibility: vec4, + @location(1) material_depth: vec4, +} +#endif + +@vertex +fn vertex(@builtin(vertex_index) vertex_index: u32) -> VertexOutput { + let packed_ids = draw_triangle_buffer[vertex_index / 3u]; + let cluster_id = packed_ids >> 6u; + let triangle_id = extractBits(packed_ids, 0u, 6u); + let index_id = (triangle_id * 3u) + (vertex_index % 3u); + let meshlet_id = meshlet_cluster_meshlet_ids[cluster_id]; + let meshlet = meshlets[meshlet_id]; + let index = get_meshlet_index(meshlet.start_index_id + index_id); + let vertex_id = meshlet_vertex_ids[meshlet.start_vertex_id + index]; + let vertex = unpack_meshlet_vertex(meshlet_vertex_data[vertex_id]); + let instance_id = meshlet_cluster_instance_ids[cluster_id]; + let instance_uniform = meshlet_instance_uniforms[instance_id]; + + let world_from_local = affine3_to_square(instance_uniform.world_from_local); + let world_position = mesh_position_local_to_world(world_from_local, vec4(vertex.position, 1.0)); + var clip_position = view.clip_from_world * vec4(world_position.xyz, 1.0); +#ifdef DEPTH_CLAMP_ORTHO + let unclamped_clip_depth = clip_position.z; + clip_position.z = min(clip_position.z, 1.0); +#endif + + return VertexOutput( + clip_position, +#ifdef MESHLET_VISIBILITY_BUFFER_RASTER_PASS_OUTPUT + packed_ids, + meshlet_instance_material_ids[instance_id], +#endif +#ifdef DEPTH_CLAMP_ORTHO + unclamped_clip_depth, +#endif + ); +} + +#ifdef MESHLET_VISIBILITY_BUFFER_RASTER_PASS_OUTPUT +@fragment +fn fragment(vertex_output: VertexOutput) -> FragmentOutput { + return FragmentOutput( + vec4(vertex_output.visibility, 0u, 0u, 0u), + vec4(vertex_output.material_depth, 0u, 0u, 0u), + ); +} +#endif + +#ifdef DEPTH_CLAMP_ORTHO +@fragment +fn fragment(vertex_output: VertexOutput) -> @builtin(frag_depth) f32 { + return vertex_output.unclamped_clip_depth; +} +#endif + +``` + +### bevy/crates/bevy_pbr/src/meshlet/meshlet_mesh_material + +```rust +#import bevy_pbr::{ + meshlet_visibility_buffer_resolve::resolve_vertex_output, + view_transformations::uv_to_ndc, + prepass_io, + pbr_prepass_functions, + utils::rand_f, +} + +@vertex +fn vertex(@builtin(vertex_index) vertex_input: u32) -> @builtin(position) vec4 { + let vertex_index = vertex_input % 3u; + let material_id = vertex_input / 3u; + let material_depth = f32(material_id) / 65535.0; + let uv = vec2(vec2(vertex_index >> 1u, vertex_index & 1u)) * 2.0; + return vec4(uv_to_ndc(uv), material_depth, 1.0); +} + +@fragment +fn fragment(@builtin(position) frag_coord: vec4) -> @location(0) vec4 { + let vertex_output = resolve_vertex_output(frag_coord); + var rng = vertex_output.cluster_id; + let color = vec3(rand_f(&rng), rand_f(&rng), rand_f(&rng)); + return vec4(color, 1.0); +} + +#ifdef PREPASS_FRAGMENT +@fragment +fn prepass_fragment(@builtin(position) frag_coord: vec4) -> prepass_io::FragmentOutput { + let vertex_output = resolve_vertex_output(frag_coord); + + var out: prepass_io::FragmentOutput; + +#ifdef NORMAL_PREPASS + out.normal = vec4(vertex_output.world_normal * 0.5 + vec3(0.5), 1.0); +#endif + +#ifdef MOTION_VECTOR_PREPASS + out.motion_vector = vertex_output.motion_vector; +#endif + +#ifdef DEFERRED_PREPASS + // There isn't any material info available for this default prepass shader so we are just writing  + // emissive magenta out to the deferred gbuffer to be rendered by the first deferred lighting pass layer. + // This is here so if the default prepass fragment is used for deferred magenta will be rendered, and also + // as an example to show that a user could write to the deferred gbuffer if they were to start from this shader. + out.deferred = vec4(0u, bevy_pbr::rgb9e5::vec3_to_rgb9e5_(vec3(1.0, 0.0, 1.0)), 0u, 0u); + out.deferred_lighting_pass_id = 1u; +#endif + + return out; +} +#endif + +``` + +### bevy/crates/bevy_pbr/src/meshlet/visibility_buffer_resolve + +```rust +#define_import_path bevy_pbr::meshlet_visibility_buffer_resolve + +#import bevy_pbr::{ + meshlet_bindings::{ + meshlet_visibility_buffer, + meshlet_cluster_meshlet_ids, + meshlets, + meshlet_vertex_ids, + meshlet_vertex_data, + meshlet_cluster_instance_ids, + meshlet_instance_uniforms, + get_meshlet_index, + unpack_meshlet_vertex, + }, + mesh_view_bindings::view, + mesh_functions::{mesh_position_local_to_world, sign_determinant_model_3x3m}, + mesh_types::{Mesh, MESH_FLAGS_SIGN_DETERMINANT_MODEL_3X3_BIT}, + view_transformations::{position_world_to_clip, frag_coord_to_ndc}, +} +#import bevy_render::maths::{affine3_to_square, mat2x4_f32_to_mat3x3_unpack} + +#ifdef PREPASS_FRAGMENT +#ifdef MOTION_VECTOR_PREPASS +#import bevy_pbr::{ + prepass_bindings::previous_view_uniforms, + pbr_prepass_functions::calculate_motion_vector, +} +#endif +#endif + +/// Functions to be used by materials for reading from a meshlet visibility buffer texture. + +#ifdef MESHLET_MESH_MATERIAL_PASS +struct PartialDerivatives { + barycentrics: vec3, + ddx: vec3, + ddy: vec3, +} + +// https://github.com/ConfettiFX/The-Forge/blob/2d453f376ef278f66f97cbaf36c0d12e4361e275/Examples_3/Visibility_Buffer/src/Shaders/FSL/visibilityBuffer_shade.frag.fsl#L83-L139 +fn compute_partial_derivatives(vertex_clip_positions: array, 3>, ndc_uv: vec2, screen_size: vec2) -> PartialDerivatives { + var result: PartialDerivatives; + + let inv_w = 1.0 / vec3(vertex_clip_positions[0].w, vertex_clip_positions[1].w, vertex_clip_positions[2].w); + let ndc_0 = vertex_clip_positions[0].xy * inv_w[0]; + let ndc_1 = vertex_clip_positions[1].xy * inv_w[1]; + let ndc_2 = vertex_clip_positions[2].xy * inv_w[2]; + + let inv_det = 1.0 / determinant(mat2x2(ndc_2 - ndc_1, ndc_0 - ndc_1)); + result.ddx = vec3(ndc_1.y - ndc_2.y, ndc_2.y - ndc_0.y, ndc_0.y - ndc_1.y) * inv_det * inv_w; + result.ddy = vec3(ndc_2.x - ndc_1.x, ndc_0.x - ndc_2.x, ndc_1.x - ndc_0.x) * inv_det * inv_w; + + var ddx_sum = dot(result.ddx, vec3(1.0)); + var ddy_sum = dot(result.ddy, vec3(1.0)); + + let delta_v = ndc_uv - ndc_0; + let interp_inv_w = inv_w.x + delta_v.x * ddx_sum + delta_v.y * ddy_sum; + let interp_w = 1.0 / interp_inv_w; + + result.barycentrics = vec3( + interp_w * (delta_v.x * result.ddx.x + delta_v.y * result.ddy.x + inv_w.x), + interp_w * (delta_v.x * result.ddx.y + delta_v.y * result.ddy.y), + interp_w * (delta_v.x * result.ddx.z + delta_v.y * result.ddy.z), + ); + + result.ddx *= 2.0 / screen_size.x; + result.ddy *= 2.0 / screen_size.y; + ddx_sum *= 2.0 / screen_size.x; + ddy_sum *= 2.0 / screen_size.y; + + let interp_ddx_w = 1.0 / (interp_inv_w + ddx_sum); + let interp_ddy_w = 1.0 / (interp_inv_w + ddy_sum); + + result.ddx = interp_ddx_w * (result.barycentrics * interp_inv_w + result.ddx) - result.barycentrics; + result.ddy = interp_ddy_w * (result.barycentrics * interp_inv_w + result.ddy) - result.barycentrics; + return result; +} + +struct VertexOutput { + position: vec4, + world_position: vec4, + world_normal: vec3, + uv: vec2, + ddx_uv: vec2, + ddy_uv: vec2, + world_tangent: vec4, + mesh_flags: u32, + cluster_id: u32, +#ifdef PREPASS_FRAGMENT +#ifdef MOTION_VECTOR_PREPASS + motion_vector: vec2, +#endif +#endif +} + +/// Load the visibility buffer texture and resolve it into a VertexOutput. +fn resolve_vertex_output(frag_coord: vec4) -> VertexOutput { + let packed_ids = textureLoad(meshlet_visibility_buffer, vec2(frag_coord.xy), 0).r; + let cluster_id = packed_ids >> 6u; + let meshlet_id = meshlet_cluster_meshlet_ids[cluster_id]; + let meshlet = meshlets[meshlet_id]; + + let triangle_id = extractBits(packed_ids, 0u, 6u); + let index_ids = meshlet.start_index_id + vec3(triangle_id * 3u) + vec3(0u, 1u, 2u); + let indices = meshlet.start_vertex_id + vec3(get_meshlet_index(index_ids.x), get_meshlet_index(index_ids.y), get_meshlet_index(index_ids.z)); + let vertex_ids = vec3(meshlet_vertex_ids[indices.x], meshlet_vertex_ids[indices.y], meshlet_vertex_ids[indices.z]); + let vertex_1 = unpack_meshlet_vertex(meshlet_vertex_data[vertex_ids.x]); + let vertex_2 = unpack_meshlet_vertex(meshlet_vertex_data[vertex_ids.y]); + let vertex_3 = unpack_meshlet_vertex(meshlet_vertex_data[vertex_ids.z]); + + let instance_id = meshlet_cluster_instance_ids[cluster_id]; + var instance_uniform = meshlet_instance_uniforms[instance_id]; + + let world_from_local = affine3_to_square(instance_uniform.world_from_local); + let world_position_1 = mesh_position_local_to_world(world_from_local, vec4(vertex_1.position, 1.0)); + let world_position_2 = mesh_position_local_to_world(world_from_local, vec4(vertex_2.position, 1.0)); + let world_position_3 = mesh_position_local_to_world(world_from_local, vec4(vertex_3.position, 1.0)); + + let clip_position_1 = position_world_to_clip(world_position_1.xyz); + let clip_position_2 = position_world_to_clip(world_position_2.xyz); + let clip_position_3 = position_world_to_clip(world_position_3.xyz); + let frag_coord_ndc = frag_coord_to_ndc(frag_coord).xy; + let partial_derivatives = compute_partial_derivatives( + array(clip_position_1, clip_position_2, clip_position_3), + frag_coord_ndc, + view.viewport.zw, + ); + + let world_position = mat3x4(world_position_1, world_position_2, world_position_3) * partial_derivatives.barycentrics; + let world_normal = mat3x3( + normal_local_to_world(vertex_1.normal, &instance_uniform), + normal_local_to_world(vertex_2.normal, &instance_uniform), + normal_local_to_world(vertex_3.normal, &instance_uniform), + ) * partial_derivatives.barycentrics; + let uv = mat3x2(vertex_1.uv, vertex_2.uv, vertex_3.uv) * partial_derivatives.barycentrics; + let ddx_uv = mat3x2(vertex_1.uv, vertex_2.uv, vertex_3.uv) * partial_derivatives.ddx; + let ddy_uv = mat3x2(vertex_1.uv, vertex_2.uv, vertex_3.uv) * partial_derivatives.ddy; + let world_tangent = mat3x4( + tangent_local_to_world(vertex_1.tangent, world_from_local, instance_uniform.flags), + tangent_local_to_world(vertex_2.tangent, world_from_local, instance_uniform.flags), + tangent_local_to_world(vertex_3.tangent, world_from_local, instance_uniform.flags), + ) * partial_derivatives.barycentrics; + +#ifdef PREPASS_FRAGMENT +#ifdef MOTION_VECTOR_PREPASS + let previous_world_from_local = affine3_to_square(instance_uniform.previous_world_from_local); + let previous_world_position_1 = mesh_position_local_to_world(previous_world_from_local, vec4(vertex_1.position, 1.0)); + let previous_world_position_2 = mesh_position_local_to_world(previous_world_from_local, vec4(vertex_2.position, 1.0)); + let previous_world_position_3 = mesh_position_local_to_world(previous_world_from_local, vec4(vertex_3.position, 1.0)); + let previous_world_position = mat3x4(previous_world_position_1, previous_world_position_2, previous_world_position_3) * partial_derivatives.barycentrics; + let motion_vector = calculate_motion_vector(world_position, previous_world_position); +#endif +#endif + + return VertexOutput( + frag_coord, + world_position, + world_normal, + uv, + ddx_uv, + ddy_uv, + world_tangent, + instance_uniform.flags, + cluster_id, +#ifdef PREPASS_FRAGMENT +#ifdef MOTION_VECTOR_PREPASS + motion_vector, +#endif +#endif + ); +} + +fn normal_local_to_world(vertex_normal: vec3, instance_uniform: ptr) -> vec3 { + if any(vertex_normal != vec3(0.0)) { + return normalize( + mat2x4_f32_to_mat3x3_unpack( + (*instance_uniform).local_from_world_transpose_a, + (*instance_uniform).local_from_world_transpose_b, + ) * vertex_normal + ); + } else { + return vertex_normal; + } +} + +fn tangent_local_to_world(vertex_tangent: vec4, world_from_local: mat4x4, mesh_flags: u32) -> vec4 { + if any(vertex_tangent != vec4(0.0)) { + return vec4( + normalize( + mat3x3( + world_from_local[0].xyz, + world_from_local[1].xyz, + world_from_local[2].xyz, + ) * vertex_tangent.xyz + ), + vertex_tangent.w * sign_determinant_model_3x3m(mesh_flags) + ); + } else { + return vertex_tangent; + } +} +#endif + +``` + +### bevy/crates/bevy_pbr/src/meshlet/meshlet_bindings + +```rust +#define_import_path bevy_pbr::meshlet_bindings + +#import bevy_pbr::mesh_types::Mesh +#import bevy_render::view::View +#import bevy_pbr::prepass_bindings::PreviousViewUniforms + +struct PackedMeshletVertex { + a: vec4, + b: vec4, + tangent: vec4, +} + +// TODO: Octahedral encode normal, remove tangent and derive from UV derivatives +struct MeshletVertex { + position: vec3, + normal: vec3, + uv: vec2, + tangent: vec4, +} + +fn unpack_meshlet_vertex(packed: PackedMeshletVertex) -> MeshletVertex { + var vertex: MeshletVertex; + vertex.position = packed.a.xyz; + vertex.normal = vec3(packed.a.w, packed.b.xy); + vertex.uv = packed.b.zw; + vertex.tangent = packed.tangent; + return vertex; +} + +struct Meshlet { + start_vertex_id: u32, + start_index_id: u32, + triangle_count: u32, +} + +struct MeshletBoundingSpheres { + self_culling: MeshletBoundingSphere, + self_lod: MeshletBoundingSphere, + parent_lod: MeshletBoundingSphere, +} + +struct MeshletBoundingSphere { + center: vec3, + radius: f32, +} + +struct DrawIndirectArgs { + vertex_count: atomic, + instance_count: u32, + first_vertex: u32, + first_instance: u32, +} + +#ifdef MESHLET_FILL_CLUSTER_BUFFERS_PASS +var cluster_count: u32; +@group(0) @binding(0) var meshlet_instance_meshlet_counts_prefix_sum: array; // Per entity instance +@group(0) @binding(1) var meshlet_instance_meshlet_slice_starts: array; // Per entity instance +@group(0) @binding(2) var meshlet_cluster_instance_ids: array; // Per cluster +@group(0) @binding(3) var meshlet_cluster_meshlet_ids: array; // Per cluster +#endif + +#ifdef MESHLET_CULLING_PASS +@group(0) @binding(0) var meshlet_cluster_meshlet_ids: array; // Per cluster +@group(0) @binding(1) var meshlet_bounding_spheres: array; // Per meshlet +@group(0) @binding(2) var meshlet_cluster_instance_ids: array; // Per cluster +@group(0) @binding(3) var meshlet_instance_uniforms: array; // Per entity instance +@group(0) @binding(4) var meshlet_view_instance_visibility: array; // 1 bit per entity instance, packed as a bitmask +@group(0) @binding(5) var meshlet_second_pass_candidates: array>; // 1 bit per cluster , packed as a bitmask +@group(0) @binding(6) var meshlets: array; // Per meshlet +@group(0) @binding(7) var draw_indirect_args: DrawIndirectArgs; // Single object shared between all workgroups/meshlets/triangles +@group(0) @binding(8) var draw_triangle_buffer: array; // Single object shared between all workgroups/meshlets/triangles +@group(0) @binding(9) var depth_pyramid: texture_2d; // From the end of the last frame for the first culling pass, and from the first raster pass for the second culling pass +@group(0) @binding(10) var view: View; +@group(0) @binding(11) var previous_view: PreviousViewUniforms; + +fn should_cull_instance(instance_id: u32) -> bool { + let bit_offset = instance_id % 32u; + let packed_visibility = meshlet_view_instance_visibility[instance_id / 32u]; + return bool(extractBits(packed_visibility, bit_offset, 1u)); +} + +fn cluster_is_second_pass_candidate(cluster_id: u32) -> bool { + let packed_candidates = meshlet_second_pass_candidates[cluster_id / 32u]; + let bit_offset = cluster_id % 32u; + return bool(extractBits(packed_candidates, bit_offset, 1u)); +} +#endif + +#ifdef MESHLET_VISIBILITY_BUFFER_RASTER_PASS +@group(0) @binding(0) var meshlet_cluster_meshlet_ids: array; // Per cluster +@group(0) @binding(1) var meshlets: array; // Per meshlet +@group(0) @binding(2) var meshlet_indices: array; // Many per meshlet +@group(0) @binding(3) var meshlet_vertex_ids: array; // Many per meshlet +@group(0) @binding(4) var meshlet_vertex_data: array; // Many per meshlet +@group(0) @binding(5) var meshlet_cluster_instance_ids: array; // Per cluster +@group(0) @binding(6) var meshlet_instance_uniforms: array; // Per entity instance +@group(0) @binding(7) var meshlet_instance_material_ids: array; // Per entity instance +@group(0) @binding(8) var draw_triangle_buffer: array; // Single object shared between all workgroups/meshlets/triangles +@group(0) @binding(9) var view: View; + +fn get_meshlet_index(index_id: u32) -> u32 { + let packed_index = meshlet_indices[index_id / 4u]; + let bit_offset = (index_id % 4u) * 8u; + return extractBits(packed_index, bit_offset, 8u); +} +#endif + +#ifdef MESHLET_MESH_MATERIAL_PASS +@group(1) @binding(0) var meshlet_visibility_buffer: texture_2d; // Generated from the meshlet raster passes +@group(1) @binding(1) var meshlet_cluster_meshlet_ids: array; // Per cluster +@group(1) @binding(2) var meshlets: array; // Per meshlet +@group(1) @binding(3) var meshlet_indices: array; // Many per meshlet +@group(1) @binding(4) var meshlet_vertex_ids: array; // Many per meshlet +@group(1) @binding(5) var meshlet_vertex_data: array; // Many per meshlet +@group(1) @binding(6) var meshlet_cluster_instance_ids: array; // Per cluster +@group(1) @binding(7) var meshlet_instance_uniforms: array; // Per entity instance + +fn get_meshlet_index(index_id: u32) -> u32 { + let packed_index = meshlet_indices[index_id / 4u]; + let bit_offset = (index_id % 4u) * 8u; + return extractBits(packed_index, bit_offset, 8u); +} +#endif + +``` + +### bevy/crates/bevy_pbr/src/meshlet/copy_material_depth + +```rust +#import bevy_core_pipeline::fullscreen_vertex_shader::FullscreenVertexOutput + +@group(0) @binding(0) var material_depth: texture_2d; + +/// This pass copies the R16Uint material depth texture to an actual Depth16Unorm depth texture. + +@fragment +fn copy_material_depth(in: FullscreenVertexOutput) -> @builtin(frag_depth) f32 { + return f32(textureLoad(material_depth, vec2(in.position.xy), 0).r) / 65535.0; +} + +``` + +### bevy/crates/bevy_pbr/src/meshlet/dummy_visibility_buffer_resolve + +```rust +#define_import_path bevy_pbr::meshlet_visibility_buffer_resolve + +/// Dummy shader to prevent naga_oil from complaining about missing imports when the MeshletPlugin is not loaded, +/// as naga_oil tries to resolve imports even if they're behind an #ifdef. + +``` + +### bevy/crates/bevy_pbr/src/meshlet/downsample_depth + +```rust +@group(0) @binding(0) var mip_0: texture_depth_2d; +@group(0) @binding(1) var mip_1: texture_storage_2d; +@group(0) @binding(2) var mip_2: texture_storage_2d; +@group(0) @binding(3) var mip_3: texture_storage_2d; +@group(0) @binding(4) var mip_4: texture_storage_2d; +@group(0) @binding(5) var mip_5: texture_storage_2d; +@group(0) @binding(6) var mip_6: texture_storage_2d; +@group(0) @binding(7) var mip_7: texture_storage_2d; +@group(0) @binding(8) var mip_8: texture_storage_2d; +@group(0) @binding(9) var mip_9: texture_storage_2d; +@group(0) @binding(10) var mip_10: texture_storage_2d; +@group(0) @binding(11) var mip_11: texture_storage_2d; +@group(0) @binding(12) var mip_12: texture_storage_2d; +@group(0) @binding(13) var samplr: sampler; +var max_mip_level: u32; + +/// Generates a hierarchical depth buffer. +/// Based on FidelityFX SPD v2.1 https://github.com/GPUOpen-LibrariesAndSDKs/FidelityFX-SDK/blob/d7531ae47d8b36a5d4025663e731a47a38be882f/sdk/include/FidelityFX/gpu/spd/ffx_spd.h#L528 + +var intermediate_memory: array, 16>; + +@compute +@workgroup_size(256, 1, 1) +fn downsample_depth_first( + @builtin(num_workgroups) num_workgroups: vec3u, + @builtin(workgroup_id) workgroup_id: vec3u, + @builtin(local_invocation_index) local_invocation_index: u32, +) { + let sub_xy = remap_for_wave_reduction(local_invocation_index % 64u); + let x = sub_xy.x + 8u * ((local_invocation_index >> 6u) % 2u); + let y = sub_xy.y + 8u * (local_invocation_index >> 7u); + + downsample_mips_0_and_1(x, y, workgroup_id.xy, local_invocation_index); + + downsample_mips_2_to_5(x, y, workgroup_id.xy, local_invocation_index); +} + +@compute +@workgroup_size(256, 1, 1) +fn downsample_depth_second(@builtin(local_invocation_index) local_invocation_index: u32) { + let sub_xy = remap_for_wave_reduction(local_invocation_index % 64u); + let x = sub_xy.x + 8u * ((local_invocation_index >> 6u) % 2u); + let y = sub_xy.y + 8u * (local_invocation_index >> 7u); + + downsample_mips_6_and_7(x, y); + + downsample_mips_8_to_11(x, y, local_invocation_index); +} + +fn downsample_mips_0_and_1(x: u32, y: u32, workgroup_id: vec2u, local_invocation_index: u32) { + var v: vec4f; + + var tex = vec2(workgroup_id * 64u) + vec2(x * 2u, y * 2u); + var pix = vec2(workgroup_id * 32u) + vec2(x, y); + v[0] = reduce_load_mip_0(tex); + textureStore(mip_1, pix, vec4(v[0])); + + tex = vec2(workgroup_id * 64u) + vec2(x * 2u + 32u, y * 2u); + pix = vec2(workgroup_id * 32u) + vec2(x + 16u, y); + v[1] = reduce_load_mip_0(tex); + textureStore(mip_1, pix, vec4(v[1])); + + tex = vec2(workgroup_id * 64u) + vec2(x * 2u, y * 2u + 32u); + pix = vec2(workgroup_id * 32u) + vec2(x, y + 16u); + v[2] = reduce_load_mip_0(tex); + textureStore(mip_1, pix, vec4(v[2])); + + tex = vec2(workgroup_id * 64u) + vec2(x * 2u + 32u, y * 2u + 32u); + pix = vec2(workgroup_id * 32u) + vec2(x + 16u, y + 16u); + v[3] = reduce_load_mip_0(tex); + textureStore(mip_1, pix, vec4(v[3])); + + if max_mip_level <= 1u { return; } + + for (var i = 0u; i < 4u; i++) { + intermediate_memory[x][y] = v[i]; + workgroupBarrier(); + if local_invocation_index < 64u { + v[i] = reduce_4(vec4( + intermediate_memory[x * 2u + 0u][y * 2u + 0u], + intermediate_memory[x * 2u + 1u][y * 2u + 0u], + intermediate_memory[x * 2u + 0u][y * 2u + 1u], + intermediate_memory[x * 2u + 1u][y * 2u + 1u], + )); + pix = (workgroup_id * 16u) + vec2( + x + (i % 2u) * 8u, + y + (i / 2u) * 8u, + ); + textureStore(mip_2, pix, vec4(v[i])); + } + workgroupBarrier(); + } + + if local_invocation_index < 64u { + intermediate_memory[x + 0u][y + 0u] = v[0]; + intermediate_memory[x + 8u][y + 0u] = v[1]; + intermediate_memory[x + 0u][y + 8u] = v[2]; + intermediate_memory[x + 8u][y + 8u] = v[3]; + } +} + +fn downsample_mips_2_to_5(x: u32, y: u32, workgroup_id: vec2u, local_invocation_index: u32) { + if max_mip_level <= 2u { return; } + workgroupBarrier(); + downsample_mip_2(x, y, workgroup_id, local_invocation_index); + + if max_mip_level <= 3u { return; } + workgroupBarrier(); + downsample_mip_3(x, y, workgroup_id, local_invocation_index); + + if max_mip_level <= 4u { return; } + workgroupBarrier(); + downsample_mip_4(x, y, workgroup_id, local_invocation_index); + + if max_mip_level <= 5u { return; } + workgroupBarrier(); + downsample_mip_5(workgroup_id, local_invocation_index); +} + +fn downsample_mip_2(x: u32, y: u32, workgroup_id: vec2u, local_invocation_index: u32) { + if local_invocation_index < 64u { + let v = reduce_4(vec4( + intermediate_memory[x * 2u + 0u][y * 2u + 0u], + intermediate_memory[x * 2u + 1u][y * 2u + 0u], + intermediate_memory[x * 2u + 0u][y * 2u + 1u], + intermediate_memory[x * 2u + 1u][y * 2u + 1u], + )); + textureStore(mip_3, (workgroup_id * 8u) + vec2(x, y), vec4(v)); + intermediate_memory[x * 2u + y % 2u][y * 2u] = v; + } +} + +fn downsample_mip_3(x: u32, y: u32, workgroup_id: vec2u, local_invocation_index: u32) { + if local_invocation_index < 16u { + let v = reduce_4(vec4( + intermediate_memory[x * 4u + 0u + 0u][y * 4u + 0u], + intermediate_memory[x * 4u + 2u + 0u][y * 4u + 0u], + intermediate_memory[x * 4u + 0u + 1u][y * 4u + 2u], + intermediate_memory[x * 4u + 2u + 1u][y * 4u + 2u], + )); + textureStore(mip_4, (workgroup_id * 4u) + vec2(x, y), vec4(v)); + intermediate_memory[x * 4u + y][y * 4u] = v; + } +} + +fn downsample_mip_4(x: u32, y: u32, workgroup_id: vec2u, local_invocation_index: u32) { + if local_invocation_index < 4u { + let v = reduce_4(vec4( + intermediate_memory[x * 8u + 0u + 0u + y * 2u][y * 8u + 0u], + intermediate_memory[x * 8u + 4u + 0u + y * 2u][y * 8u + 0u], + intermediate_memory[x * 8u + 0u + 1u + y * 2u][y * 8u + 4u], + intermediate_memory[x * 8u + 4u + 1u + y * 2u][y * 8u + 4u], + )); + textureStore(mip_5, (workgroup_id * 2u) + vec2(x, y), vec4(v)); + intermediate_memory[x + y * 2u][0u] = v; + } +} + +fn downsample_mip_5(workgroup_id: vec2u, local_invocation_index: u32) { + if local_invocation_index < 1u { + let v = reduce_4(vec4( + intermediate_memory[0u][0u], + intermediate_memory[1u][0u], + intermediate_memory[2u][0u], + intermediate_memory[3u][0u], + )); + textureStore(mip_6, workgroup_id, vec4(v)); + } +} + +fn downsample_mips_6_and_7(x: u32, y: u32) { + var v: vec4f; + + var tex = vec2(x * 4u + 0u, y * 4u + 0u); + var pix = vec2(x * 2u + 0u, y * 2u + 0u); + v[0] = reduce_load_mip_6(tex); + textureStore(mip_7, pix, vec4(v[0])); + + tex = vec2(x * 4u + 2u, y * 4u + 0u); + pix = vec2(x * 2u + 1u, y * 2u + 0u); + v[1] = reduce_load_mip_6(tex); + textureStore(mip_7, pix, vec4(v[1])); + + tex = vec2(x * 4u + 0u, y * 4u + 2u); + pix = vec2(x * 2u + 0u, y * 2u + 1u); + v[2] = reduce_load_mip_6(tex); + textureStore(mip_7, pix, vec4(v[2])); + + tex = vec2(x * 4u + 2u, y * 4u + 2u); + pix = vec2(x * 2u + 1u, y * 2u + 1u); + v[3] = reduce_load_mip_6(tex); + textureStore(mip_7, pix, vec4(v[3])); + + if max_mip_level <= 7u { return; } + + let vr = reduce_4(v); + textureStore(mip_8, vec2(x, y), vec4(vr)); + intermediate_memory[x][y] = vr; +} + +fn downsample_mips_8_to_11(x: u32, y: u32, local_invocation_index: u32) { + if max_mip_level <= 8u { return; } + workgroupBarrier(); + downsample_mip_8(x, y, local_invocation_index); + + if max_mip_level <= 9u { return; } + workgroupBarrier(); + downsample_mip_9(x, y, local_invocation_index); + + if max_mip_level <= 10u { return; } + workgroupBarrier(); + downsample_mip_10(x, y, local_invocation_index); + + if max_mip_level <= 11u { return; } + workgroupBarrier(); + downsample_mip_11(local_invocation_index); +} + +fn downsample_mip_8(x: u32, y: u32, local_invocation_index: u32) { + if local_invocation_index < 64u { + let v = reduce_4(vec4( + intermediate_memory[x * 2u + 0u][y * 2u + 0u], + intermediate_memory[x * 2u + 1u][y * 2u + 0u], + intermediate_memory[x * 2u + 0u][y * 2u + 1u], + intermediate_memory[x * 2u + 1u][y * 2u + 1u], + )); + textureStore(mip_9, vec2(x, y), vec4(v)); + intermediate_memory[x * 2u + y % 2u][y * 2u] = v; + } +} + +fn downsample_mip_9(x: u32, y: u32, local_invocation_index: u32) { + if local_invocation_index < 16u { + let v = reduce_4(vec4( + intermediate_memory[x * 4u + 0u + 0u][y * 4u + 0u], + intermediate_memory[x * 4u + 2u + 0u][y * 4u + 0u], + intermediate_memory[x * 4u + 0u + 1u][y * 4u + 2u], + intermediate_memory[x * 4u + 2u + 1u][y * 4u + 2u], + )); + textureStore(mip_10, vec2(x, y), vec4(v)); + intermediate_memory[x * 4u + y][y * 4u] = v; + } +} + +fn downsample_mip_10(x: u32, y: u32, local_invocation_index: u32) { + if local_invocation_index < 4u { + let v = reduce_4(vec4( + intermediate_memory[x * 8u + 0u + 0u + y * 2u][y * 8u + 0u], + intermediate_memory[x * 8u + 4u + 0u + y * 2u][y * 8u + 0u], + intermediate_memory[x * 8u + 0u + 1u + y * 2u][y * 8u + 4u], + intermediate_memory[x * 8u + 4u + 1u + y * 2u][y * 8u + 4u], + )); + textureStore(mip_11, vec2(x, y), vec4(v)); + intermediate_memory[x + y * 2u][0u] = v; + } +} + +fn downsample_mip_11(local_invocation_index: u32) { + if local_invocation_index < 1u { + let v = reduce_4(vec4( + intermediate_memory[0u][0u], + intermediate_memory[1u][0u], + intermediate_memory[2u][0u], + intermediate_memory[3u][0u], + )); + textureStore(mip_12, vec2(0u, 0u), vec4(v)); + } +} + +fn remap_for_wave_reduction(a: u32) -> vec2u { + return vec2( + insertBits(extractBits(a, 2u, 3u), a, 0u, 1u), + insertBits(extractBits(a, 3u, 3u), extractBits(a, 1u, 2u), 0u, 2u), + ); +} + +fn reduce_load_mip_0(tex: vec2u) -> f32 { + let uv = (vec2f(tex) + 0.5) / vec2f(textureDimensions(mip_0)); + return reduce_4(textureGather(mip_0, samplr, uv)); +} + +fn reduce_load_mip_6(tex: vec2u) -> f32 { + return reduce_4(vec4( + textureLoad(mip_6, tex + vec2(0u, 0u)).r, + textureLoad(mip_6, tex + vec2(0u, 1u)).r, + textureLoad(mip_6, tex + vec2(1u, 0u)).r, + textureLoad(mip_6, tex + vec2(1u, 1u)).r, + )); +} + +fn reduce_4(v: vec4f) -> f32 { + return min(min(v.x, v.y), min(v.z, v.w)); +} + +``` + +### bevy/crates/bevy_pbr/src/light_probe/environment_map + +```rust +#define_import_path bevy_pbr::environment_map + +#import bevy_pbr::light_probe::query_light_probe +#import bevy_pbr::mesh_view_bindings as bindings +#import bevy_pbr::mesh_view_bindings::light_probes +#import bevy_pbr::lighting::{ + F_Schlick_vec, LayerLightingInput, LightingInput, LAYER_BASE, LAYER_CLEARCOAT +} + +struct EnvironmentMapLight { + diffuse: vec3, + specular: vec3, +}; + +struct EnvironmentMapRadiances { + irradiance: vec3, + radiance: vec3, +} + +// Define two versions of this function, one for the case in which there are +// multiple light probes and one for the case in which only the view light probe +// is present. + +#ifdef MULTIPLE_LIGHT_PROBES_IN_ARRAY + +fn compute_radiances( + input: ptr, + layer: u32, + world_position: vec3, + found_diffuse_indirect: bool, +) -> EnvironmentMapRadiances { + // Unpack. + let perceptual_roughness = (*input).layers[layer].perceptual_roughness; + let N = (*input).layers[layer].N; + let R = (*input).layers[layer].R; + + var radiances: EnvironmentMapRadiances; + + // Search for a reflection probe that contains the fragment. + var query_result = query_light_probe(world_position, /*is_irradiance_volume=*/ false); + + // If we didn't find a reflection probe, use the view environment map if applicable. + if (query_result.texture_index < 0) { + query_result.texture_index = light_probes.view_cubemap_index; + query_result.intensity = light_probes.intensity_for_view; + } + + // If there's no cubemap, bail out. + if (query_result.texture_index < 0) { + radiances.irradiance = vec3(0.0); + radiances.radiance = vec3(0.0); + return radiances; + } + + // Split-sum approximation for image based lighting: https://cdn2.unrealengine.com/Resources/files/2013SiggraphPresentationsNotes-26915738.pdf + let radiance_level = perceptual_roughness * f32(textureNumLevels( + bindings::specular_environment_maps[query_result.texture_index]) - 1u); + + if (!found_diffuse_indirect) { + radiances.irradiance = textureSampleLevel( + bindings::diffuse_environment_maps[query_result.texture_index], + bindings::environment_map_sampler, + vec3(N.xy, -N.z), + 0.0).rgb * query_result.intensity; + } + + radiances.radiance = textureSampleLevel( + bindings::specular_environment_maps[query_result.texture_index], + bindings::environment_map_sampler, + vec3(R.xy, -R.z), + radiance_level).rgb * query_result.intensity; + + return radiances; +} + +#else // MULTIPLE_LIGHT_PROBES_IN_ARRAY + +fn compute_radiances( + input: ptr, + layer: u32, + world_position: vec3, + found_diffuse_indirect: bool, +) -> EnvironmentMapRadiances { + // Unpack. + let perceptual_roughness = (*input).layers[layer].perceptual_roughness; + let N = (*input).layers[layer].N; + let R = (*input).layers[layer].R; + + var radiances: EnvironmentMapRadiances; + + if (light_probes.view_cubemap_index < 0) { + radiances.irradiance = vec3(0.0); + radiances.radiance = vec3(0.0); + return radiances; + } + + // Split-sum approximation for image based lighting: https://cdn2.unrealengine.com/Resources/files/2013SiggraphPresentationsNotes-26915738.pdf + // Technically we could use textureNumLevels(specular_environment_map) - 1 here, but we use a uniform + // because textureNumLevels() does not work on WebGL2 + let radiance_level = perceptual_roughness * f32(light_probes.smallest_specular_mip_level_for_view); + + let intensity = light_probes.intensity_for_view; + + if (!found_diffuse_indirect) { + radiances.irradiance = textureSampleLevel( + bindings::diffuse_environment_map, + bindings::environment_map_sampler, + vec3(N.xy, -N.z), + 0.0).rgb * intensity; + } + + radiances.radiance = textureSampleLevel( + bindings::specular_environment_map, + bindings::environment_map_sampler, + vec3(R.xy, -R.z), + radiance_level).rgb * intensity; + + return radiances; +} + +#endif // MULTIPLE_LIGHT_PROBES_IN_ARRAY + +#ifdef STANDARD_MATERIAL_CLEARCOAT + +// Adds the environment map light from the clearcoat layer to that of the base +// layer. +fn environment_map_light_clearcoat( + out: ptr, + input: ptr, + found_diffuse_indirect: bool, +) { + // Unpack. + let world_position = (*input).P; + let clearcoat_NdotV = (*input).layers[LAYER_CLEARCOAT].NdotV; + let clearcoat_strength = (*input).clearcoat_strength; + + // Calculate the Fresnel term `Fc` for the clearcoat layer. + // 0.04 is a hardcoded value for F0 from the Filament spec. + let clearcoat_F0 = vec3(0.04); + let Fc = F_Schlick_vec(clearcoat_F0, 1.0, clearcoat_NdotV) * clearcoat_strength; + let inv_Fc = 1.0 - Fc; + + let clearcoat_radiances = compute_radiances( + input, LAYER_CLEARCOAT, world_position, found_diffuse_indirect); + + // Composite the clearcoat layer on top of the existing one. + // These formulas are from Filament: + // + (*out).diffuse *= inv_Fc; + (*out).specular = (*out).specular * inv_Fc * inv_Fc + clearcoat_radiances.radiance * Fc; +} + +#endif // STANDARD_MATERIAL_CLEARCOAT + +fn environment_map_light( + input: ptr, + found_diffuse_indirect: bool, +) -> EnvironmentMapLight { + // Unpack. + let roughness = (*input).layers[LAYER_BASE].roughness; + let diffuse_color = (*input).diffuse_color; + let NdotV = (*input).layers[LAYER_BASE].NdotV; + let F_ab = (*input).F_ab; + let F0 = (*input).F0_; + let world_position = (*input).P; + + var out: EnvironmentMapLight; + + let radiances = compute_radiances(input, LAYER_BASE, world_position, found_diffuse_indirect); + if (all(radiances.irradiance == vec3(0.0)) && all(radiances.radiance == vec3(0.0))) { + out.diffuse = vec3(0.0); + out.specular = vec3(0.0); + return out; + } + + // No real world material has specular values under 0.02, so we use this range as a + // "pre-baked specular occlusion" that extinguishes the fresnel term, for artistic control. + // See: https://google.github.io/filament/Filament.html#specularocclusion + let specular_occlusion = saturate(dot(F0, vec3(50.0 * 0.33))); + + // Multiscattering approximation: https://www.jcgt.org/published/0008/01/03/paper.pdf + // Useful reference: https://bruop.github.io/ibl + let Fr = max(vec3(1.0 - roughness), F0) - F0; + let kS = F0 + Fr * pow(1.0 - NdotV, 5.0); + let Ess = F_ab.x + F_ab.y; + let FssEss = kS * Ess * specular_occlusion; + let Ems = 1.0 - Ess; + let Favg = F0 + (1.0 - F0) / 21.0; + let Fms = FssEss * Favg / (1.0 - Ems * Favg); + let FmsEms = Fms * Ems; + let Edss = 1.0 - (FssEss + FmsEms); + let kD = diffuse_color * Edss; + + if (!found_diffuse_indirect) { + out.diffuse = (FmsEms + kD) * radiances.irradiance; + } else { + out.diffuse = vec3(0.0); + } + + out.specular = FssEss * radiances.radiance; + +#ifdef STANDARD_MATERIAL_CLEARCOAT + environment_map_light_clearcoat(&out, input, found_diffuse_indirect); +#endif // STANDARD_MATERIAL_CLEARCOAT + + return out; +} + +``` + +### bevy/crates/bevy_pbr/src/light_probe/irradiance_volume + +```rust +#define_import_path bevy_pbr::irradiance_volume + +#import bevy_pbr::light_probe::query_light_probe +#import bevy_pbr::mesh_view_bindings::{ + irradiance_volumes, + irradiance_volume, + irradiance_volume_sampler, + light_probes, +}; + +#ifdef IRRADIANCE_VOLUMES_ARE_USABLE + +// See: +// https://advances.realtimerendering.com/s2006/Mitchell-ShadingInValvesSourceEngine.pdf +// Slide 28, "Ambient Cube Basis" +fn irradiance_volume_light(world_position: vec3, N: vec3) -> vec3 { + // Search for an irradiance volume that contains the fragment. + let query_result = query_light_probe(world_position, /*is_irradiance_volume=*/ true); + + // If there was no irradiance volume found, bail out. + if (query_result.texture_index < 0) { + return vec3(0.0f); + } + +#ifdef MULTIPLE_LIGHT_PROBES_IN_ARRAY + let irradiance_volume_texture = irradiance_volumes[query_result.texture_index]; +#else + let irradiance_volume_texture = irradiance_volume; +#endif + + let atlas_resolution = vec3(textureDimensions(irradiance_volume_texture)); + let resolution = vec3(textureDimensions(irradiance_volume_texture) / vec3(1u, 2u, 3u)); + + // Make sure to clamp to the edges to avoid texture bleed. + var unit_pos = (query_result.light_from_world * vec4(world_position, 1.0f)).xyz; + let stp = clamp((unit_pos + 0.5) * resolution, vec3(0.5f), resolution - vec3(0.5f)); + let uvw = stp / atlas_resolution; + + // The bottom half of each cube slice is the negative part, so choose it if applicable on each + // slice. + let neg_offset = select(vec3(0.0f), vec3(0.5f), N < vec3(0.0f)); + + let uvw_x = uvw + vec3(0.0f, neg_offset.x, 0.0f); + let uvw_y = uvw + vec3(0.0f, neg_offset.y, 1.0f / 3.0f); + let uvw_z = uvw + vec3(0.0f, neg_offset.z, 2.0f / 3.0f); + + let rgb_x = textureSampleLevel(irradiance_volume_texture, irradiance_volume_sampler, uvw_x, 0.0).rgb; + let rgb_y = textureSampleLevel(irradiance_volume_texture, irradiance_volume_sampler, uvw_y, 0.0).rgb; + let rgb_z = textureSampleLevel(irradiance_volume_texture, irradiance_volume_sampler, uvw_z, 0.0).rgb; + + // Use Valve's formula to sample. + let NN = N * N; + return (rgb_x * NN.x + rgb_y * NN.y + rgb_z * NN.z) * query_result.intensity; +} + +#endif // IRRADIANCE_VOLUMES_ARE_USABLE + +``` + +### bevy/crates/bevy_pbr/src/light_probe/light_probe + +```rust +#define_import_path bevy_pbr::light_probe + +#import bevy_pbr::mesh_view_bindings::light_probes +#import bevy_pbr::mesh_view_types::LightProbe + +// The result of searching for a light probe. +struct LightProbeQueryResult { + // The index of the light probe texture or textures in the binding array or + // arrays. + texture_index: i32, + // A scale factor that's applied to the diffuse and specular light from the + // light probe. This is in units of cd/m² (candela per square meter). + intensity: f32, + // Transform from world space to the light probe model space. In light probe + // model space, the light probe is a 1×1×1 cube centered on the origin. + light_from_world: mat4x4, +}; + +fn transpose_affine_matrix(matrix: mat3x4) -> mat4x4 { + let matrix4x4 = mat4x4( + matrix[0], + matrix[1], + matrix[2], + vec4(0.0, 0.0, 0.0, 1.0)); + return transpose(matrix4x4); +} + +// Searches for a light probe that contains the fragment. +// +// TODO: Interpolate between multiple light probes. +fn query_light_probe( + world_position: vec3, + is_irradiance_volume: bool, +) -> LightProbeQueryResult { + var result: LightProbeQueryResult; + result.texture_index = -1; + + var light_probe_count: i32; + if is_irradiance_volume { + light_probe_count = light_probes.irradiance_volume_count; + } else { + light_probe_count = light_probes.reflection_probe_count; + } + + for (var light_probe_index: i32 = 0; + light_probe_index < light_probe_count && result.texture_index < 0; + light_probe_index += 1) { + var light_probe: LightProbe; + if is_irradiance_volume { + light_probe = light_probes.irradiance_volumes[light_probe_index]; + } else { + light_probe = light_probes.reflection_probes[light_probe_index]; + } + + // Unpack the inverse transform. + let light_from_world = + transpose_affine_matrix(light_probe.light_from_world_transposed); + + // Check to see if the transformed point is inside the unit cube + // centered at the origin. + let probe_space_pos = (light_from_world * vec4(world_position, 1.0f)).xyz; + if (all(abs(probe_space_pos) <= vec3(0.5f))) { + result.texture_index = light_probe.cubemap_index; + result.intensity = light_probe.intensity; + result.light_from_world = light_from_world; + + // TODO: Workaround for ICE in DXC https://github.com/microsoft/DirectXShaderCompiler/issues/6183 + // We can't use `break` here because of the ICE. + // So instead we rely on the fact that we set `result.texture_index` + // above and check its value in the `for` loop header before + // looping. + // break; + } + } + + return result; +} + + +``` + +### bevy/crates/bevy_pbr/src/lightmap/lightmap + +```rust +#define_import_path bevy_pbr::lightmap + +#import bevy_pbr::mesh_bindings::mesh + +@group(1) @binding(4) var lightmaps_texture: texture_2d; +@group(1) @binding(5) var lightmaps_sampler: sampler; + +// Samples the lightmap, if any, and returns indirect illumination from it. +fn lightmap(uv: vec2, exposure: f32, instance_index: u32) -> vec3 { + let packed_uv_rect = mesh[instance_index].lightmap_uv_rect; + let uv_rect = vec4(vec4( + packed_uv_rect.x & 0xffffu, + packed_uv_rect.x >> 16u, + packed_uv_rect.y & 0xffffu, + packed_uv_rect.y >> 16u)) / 65535.0; + + let lightmap_uv = mix(uv_rect.xy, uv_rect.zw, uv); + + // Mipmapping lightmaps is usually a bad idea due to leaking across UV + // islands, so there's no harm in using mip level 0 and it lets us avoid + // control flow uniformity problems. + // + // TODO(pcwalton): Consider bicubic filtering. + return textureSampleLevel( + lightmaps_texture, + lightmaps_sampler, + lightmap_uv, + 0.0).rgb * exposure; +} + +``` + +### bevy/crates/bevy_pbr/src/volumetric_fog/volumetric_fog + +```rust +// A postprocessing shader that implements volumetric fog via raymarching and +// sampling directional light shadow maps. +// +// The overall approach is a combination of the volumetric rendering in [1] and +// the shadow map raymarching in [2]. First, we sample the depth buffer to +// determine how long our ray is. Then we do a raymarch, with physically-based +// calculations at each step to determine how much light was absorbed, scattered +// out, and scattered in. To determine in-scattering, we sample the shadow map +// for the light to determine whether the point was in shadow or not. +// +// [1]: https://www.scratchapixel.com/lessons/3d-basic-rendering/volume-rendering-for-developers/intro-volume-rendering.html +// +// [2]: http://www.alexandre-pestana.com/volumetric-lights/ + +#import bevy_core_pipeline::fullscreen_vertex_shader::FullscreenVertexOutput +#import bevy_pbr::mesh_view_bindings::{lights, view} +#import bevy_pbr::mesh_view_types::DIRECTIONAL_LIGHT_FLAGS_VOLUMETRIC_BIT +#import bevy_pbr::shadow_sampling::sample_shadow_map_hardware +#import bevy_pbr::shadows::{get_cascade_index, world_to_directional_light_local} +#import bevy_pbr::view_transformations::{ + frag_coord_to_ndc, + position_ndc_to_view, + position_ndc_to_world +} + +// The GPU version of [`VolumetricFogSettings`]. See the comments in +// `volumetric_fog/mod.rs` for descriptions of the fields here. +struct VolumetricFog { + fog_color: vec3, + light_tint: vec3, + ambient_color: vec3, + ambient_intensity: f32, + step_count: u32, + max_depth: f32, + absorption: f32, + scattering: f32, + density: f32, + scattering_asymmetry: f32, + light_intensity: f32, +} + +@group(1) @binding(0) var volumetric_fog: VolumetricFog; +@group(1) @binding(1) var color_texture: texture_2d; +@group(1) @binding(2) var color_sampler: sampler; + +#ifdef MULTISAMPLED +@group(1) @binding(3) var depth_texture: texture_depth_multisampled_2d; +#else +@group(1) @binding(3) var depth_texture: texture_depth_2d; +#endif + +// 1 / (4π) +const FRAC_4_PI: f32 = 0.07957747154594767; + +// The common Henyey-Greenstein asymmetric phase function [1] [2]. +// +// This determines how much light goes toward the viewer as opposed to away from +// the viewer. From a visual point of view, it controls how the light shafts +// appear and disappear as the camera looks at the light source. +// +// [1]: https://www.scratchapixel.com/lessons/3d-basic-rendering/volume-rendering-for-developers/ray-marching-get-it-right.html +// +// [2]: https://www.pbr-book.org/4ed/Volume_Scattering/Phase_Functions#TheHenyeyndashGreensteinPhaseFunction +fn henyey_greenstein(neg_LdotV: f32) -> f32 { + let g = volumetric_fog.scattering_asymmetry; + let denom = 1.0 + g * g - 2.0 * g * neg_LdotV; + return FRAC_4_PI * (1.0 - g * g) / (denom * sqrt(denom)); +} + +@fragment +fn fragment(in: FullscreenVertexOutput) -> @location(0) vec4 { + // Unpack the `volumetric_fog` settings. + let fog_color = volumetric_fog.fog_color; + let ambient_color = volumetric_fog.ambient_color; + let ambient_intensity = volumetric_fog.ambient_intensity; + let step_count = volumetric_fog.step_count; + let max_depth = volumetric_fog.max_depth; + let absorption = volumetric_fog.absorption; + let scattering = volumetric_fog.scattering; + let density = volumetric_fog.density; + let light_tint = volumetric_fog.light_tint; + let light_intensity = volumetric_fog.light_intensity; + + let exposure = view.exposure; + + // Sample the depth. If this is multisample, just use sample 0; this is + // approximate but good enough. + let frag_coord = in.position; + let depth = textureLoad(depth_texture, vec2(frag_coord.xy), 0); + + // Starting at the end depth, which we got above, figure out how long the + // ray we want to trace is and the length of each increment. + let end_depth = min( + max_depth, + -position_ndc_to_view(frag_coord_to_ndc(vec4(in.position.xy, depth, 1.0))).z + ); + let step_size = end_depth / f32(step_count); + + let directional_light_count = lights.n_directional_lights; + + // Calculate the ray origin (`Ro`) and the ray direction (`Rd`) in NDC, + // view, and world coordinates. + let Rd_ndc = vec3(frag_coord_to_ndc(in.position).xy, 1.0); + let Rd_view = normalize(position_ndc_to_view(Rd_ndc)); + let Ro_world = view.world_position; + let Rd_world = normalize(position_ndc_to_world(Rd_ndc) - Ro_world); + + // Use Beer's law [1] [2] to calculate the maximum amount of light that each + // directional light could contribute, and modulate that value by the light + // tint and fog color. (The actual value will in turn be modulated by the + // phase according to the Henyey-Greenstein formula.) + // + // We use a bit of a hack here. Conceptually, directional lights are + // infinitely far away. But, if we modeled exactly that, then directional + // lights would never contribute any light to the fog, because an + // infinitely-far directional light combined with an infinite amount of fog + // would result in complete absorption of the light. So instead we pretend + // that the directional light is `max_depth` units away and do the + // calculation in those terms. Because the fake distance to the directional + // light is a constant, this lets us perform the calculation once up here + // instead of marching secondary rays toward the light during the + // raymarching step, which improves performance dramatically. + // + // [1]: https://www.scratchapixel.com/lessons/3d-basic-rendering/volume-rendering-for-developers/intro-volume-rendering.html + // + // [2]: https://en.wikipedia.org/wiki/Beer%E2%80%93Lambert_law + let light_attenuation = exp(-density * max_depth * (absorption + scattering)); + let light_factors_per_step = fog_color * light_tint * light_attenuation * scattering * + density * step_size * light_intensity * exposure; + + // Use Beer's law again to accumulate the ambient light all along the path. + var accumulated_color = exp(-end_depth * (absorption + scattering)) * ambient_color * + ambient_intensity; + + // Pre-calculate absorption (amount of light absorbed by the fog) and + // out-scattering (amount of light the fog scattered away). This is the same + // amount for every step. + let sample_attenuation = exp(-step_size * density * (absorption + scattering)); + + // This is the amount of the background that shows through. We're actually + // going to recompute this over and over again for each directional light, + // coming up with the same values each time. + var background_alpha = 1.0; + + for (var light_index = 0u; light_index < directional_light_count; light_index += 1u) { + // Volumetric lights are all sorted first, so the first time we come to + // a non-volumetric light, we know we've seen them all. + let light = &lights.directional_lights[light_index]; + if (((*light).flags & DIRECTIONAL_LIGHT_FLAGS_VOLUMETRIC_BIT) == 0) { + break; + } + + // Offset the depth value by the bias. + let depth_offset = (*light).shadow_depth_bias * (*light).direction_to_light.xyz; + + // Compute phase, which determines the fraction of light that's + // scattered toward the camera instead of away from it. + let neg_LdotV = dot(normalize((*light).direction_to_light.xyz), Rd_world); + let phase = henyey_greenstein(neg_LdotV); + + // Modulate the factor we calculated above by the phase, fog color, + // light color, light tint. + let light_color_per_step = (*light).color.rgb * phase * light_factors_per_step; + + // Reset `background_alpha` for a new raymarch. + background_alpha = 1.0; + + // Start raymarching. + for (var step = 0u; step < step_count; step += 1u) { + // As an optimization, break if we've gotten too dark. + if (background_alpha < 0.001) { + break; + } + + // Calculate where we are in the ray. + let P_world = Ro_world + Rd_world * f32(step) * step_size; + let P_view = Rd_view * f32(step) * step_size; + + // Process absorption and out-scattering. + background_alpha *= sample_attenuation; + + // Compute in-scattering (amount of light other fog particles + // scattered into this ray). This is where any directional light is + // scattered in. + + // Prepare to sample the shadow map. + let cascade_index = get_cascade_index(light_index, P_view.z); + let light_local = world_to_directional_light_local( + light_index, + cascade_index, + vec4(P_world + depth_offset, 1.0) + ); + + // If we're outside the shadow map entirely, local light attenuation + // is zero. + var local_light_attenuation = f32(light_local.w != 0.0); + + // Otherwise, sample the shadow map to determine whether, and by how + // much, this sample is in the light. + if (local_light_attenuation != 0.0) { + let cascade = &(*light).cascades[cascade_index]; + let array_index = i32((*light).depth_texture_base_index + cascade_index); + local_light_attenuation = + sample_shadow_map_hardware(light_local.xy, light_local.z, array_index); + } + + if (local_light_attenuation != 0.0) { + // Accumulate the light. + accumulated_color += light_color_per_step * local_light_attenuation * + background_alpha; + } + } + } + + // We're done! Blend between the source color and the lit fog color. + let source = textureSample(color_texture, color_sampler, in.uv); + return vec4(source.rgb * background_alpha + accumulated_color, source.a); +} + +``` + +### bevy/crates/bevy_pbr/src/ssr/raymarch + +```rust +// Copyright (c) 2023 Tomasz Stachowiak +// +// This contribution is dual licensed under EITHER OF +// +// Apache License, Version 2.0, (http://www.apache.org/licenses/LICENSE-2.0) +// MIT license (http://opensource.org/licenses/MIT) +// +// at your option. +// +// This is a port of the original [`raymarch.hlsl`] to WGSL. It's deliberately +// kept as close as possible so that patches to the original `raymarch.hlsl` +// have the greatest chances of applying to this version. +// +// [`raymarch.hlsl`]: +// https://gist.github.com/h3r2tic/9c8356bdaefbe80b1a22ae0aaee192db + +#define_import_path bevy_pbr::raymarch + +#import bevy_pbr::mesh_view_bindings::depth_prepass_texture +#import bevy_pbr::view_transformations::{ + direction_world_to_clip, + ndc_to_uv, + perspective_camera_near, + position_world_to_ndc, +} + +// Allows us to sample from the depth buffer with bilinear filtering. +@group(1) @binding(2) var depth_linear_sampler: sampler; + +// Allows us to sample from the depth buffer with nearest-neighbor filtering. +@group(1) @binding(3) var depth_nearest_sampler: sampler; + +// Main code + +struct HybridRootFinder { + linear_steps: u32, + bisection_steps: u32, + use_secant: bool, + linear_march_exponent: f32, + + jitter: f32, + min_t: f32, + max_t: f32, +} + +fn hybrid_root_finder_new_with_linear_steps(v: u32) -> HybridRootFinder { + var res: HybridRootFinder; + res.linear_steps = v; + res.bisection_steps = 0u; + res.use_secant = false; + res.linear_march_exponent = 1.0; + res.jitter = 1.0; + res.min_t = 0.0; + res.max_t = 1.0; + return res; +} + +fn hybrid_root_finder_find_root( + root_finder: ptr, + start: vec3, + end: vec3, + distance_fn: ptr, + hit_t: ptr, + miss_t: ptr, + hit_d: ptr, +) -> bool { + let dir = end - start; + + var min_t = (*root_finder).min_t; + var max_t = (*root_finder).max_t; + + var min_d = DistanceWithPenetration(0.0, false, 0.0); + var max_d = DistanceWithPenetration(0.0, false, 0.0); + + let step_size = (max_t - min_t) / f32((*root_finder).linear_steps); + + var intersected = false; + + // + // Ray march using linear steps + + if ((*root_finder).linear_steps > 0u) { + let candidate_t = mix( + min_t, + max_t, + pow( + (*root_finder).jitter / f32((*root_finder).linear_steps), + (*root_finder).linear_march_exponent + ) + ); + + let candidate = start + dir * candidate_t; + let candidate_d = depth_raymarch_distance_fn_evaluate(distance_fn, candidate); + intersected = candidate_d.distance < 0.0 && candidate_d.valid; + + if (intersected) { + max_t = candidate_t; + max_d = candidate_d; + // The `[min_t .. max_t]` interval contains an intersection. End the linear search. + } else { + // No intersection yet. Carry on. + min_t = candidate_t; + min_d = candidate_d; + + for (var step = 1u; step < (*root_finder).linear_steps; step += 1u) { + let candidate_t = mix( + (*root_finder).min_t, + (*root_finder).max_t, + pow( + (f32(step) + (*root_finder).jitter) / f32((*root_finder).linear_steps), + (*root_finder).linear_march_exponent + ) + ); + + let candidate = start + dir * candidate_t; + let candidate_d = depth_raymarch_distance_fn_evaluate(distance_fn, candidate); + intersected = candidate_d.distance < 0.0 && candidate_d.valid; + + if (intersected) { + max_t = candidate_t; + max_d = candidate_d; + // The `[min_t .. max_t]` interval contains an intersection. + // End the linear search. + break; + } else { + // No intersection yet. Carry on. + min_t = candidate_t; + min_d = candidate_d; + } + } + } + } + + *miss_t = min_t; + *hit_t = min_t; + + // + // Refine the hit using bisection + + if (intersected) { + for (var step = 0u; step < (*root_finder).bisection_steps; step += 1u) { + let mid_t = (min_t + max_t) * 0.5; + let candidate = start + dir * mid_t; + let candidate_d = depth_raymarch_distance_fn_evaluate(distance_fn, candidate); + + if (candidate_d.distance < 0.0 && candidate_d.valid) { + // Intersection at the mid point. Refine the first half. + max_t = mid_t; + max_d = candidate_d; + } else { + // No intersection yet at the mid point. Refine the second half. + min_t = mid_t; + min_d = candidate_d; + } + } + + if ((*root_finder).use_secant) { + // Finish with one application of the secant method + let total_d = min_d.distance + -max_d.distance; + + let mid_t = mix(min_t, max_t, min_d.distance / total_d); + let candidate = start + dir * mid_t; + let candidate_d = depth_raymarch_distance_fn_evaluate(distance_fn, candidate); + + // Only accept the result of the secant method if it improves upon + // the previous result. + // + // Technically root_finder should be `abs(candidate_d.distance) < + // min(min_d.distance, -max_d.distance) * frac`, but root_finder seems + // sufficient. + if (abs(candidate_d.distance) < min_d.distance * 0.9 && candidate_d.valid) { + *hit_t = mid_t; + *hit_d = candidate_d; + } else { + *hit_t = max_t; + *hit_d = max_d; + } + + return true; + } else { + *hit_t = max_t; + *hit_d = max_d; + return true; + } + } else { + // Mark the conservative miss distance. + *hit_t = min_t; + return false; + } +} + +struct DistanceWithPenetration { + /// Distance to the surface of which a root we're trying to find + distance: f32, + + /// Whether to consider this sample valid for intersection. + /// Mostly relevant for allowing the ray marcher to travel behind surfaces, + /// as it will mark surfaces it travels under as invalid. + valid: bool, + + /// Conservative estimate of depth to which the ray penetrates the marched surface. + penetration: f32, +} + +struct DepthRaymarchDistanceFn { + depth_tex_size: vec2, + + march_behind_surfaces: bool, + depth_thickness: f32, + + use_sloppy_march: bool, +} + +fn depth_raymarch_distance_fn_evaluate( + distance_fn: ptr, + ray_point_cs: vec3, +) -> DistanceWithPenetration { + let interp_uv = ndc_to_uv(ray_point_cs.xy); + + let ray_depth = 1.0 / ray_point_cs.z; + + // We're using both point-sampled and bilinear-filtered values from the depth buffer. + // + // That's really stupid but works like magic. For samples taken near the ray origin, + // the discrete nature of the depth buffer becomes a problem. It's not a land of continuous surfaces, + // but a bunch of stacked duplo bricks. + // + // Technically we should be taking discrete steps in distance_fn duplo land, but then we're at the mercy + // of arbitrary quantization of our directions -- and sometimes we'll take a step which would + // claim that the ray is occluded -- even though the underlying smooth surface wouldn't occlude it. + // + // If we instead take linear taps from the depth buffer, we reconstruct the linear surface. + // That fixes acne, but introduces false shadowing near object boundaries, as we now pretend + // that everything is shrink-wrapped by distance_fn continuous 2.5D surface, and our depth thickness + // heuristic ends up falling apart. + // + // The fix is to consider both the smooth and the discrete surfaces, and only claim occlusion + // when the ray descends below both. + // + // The two approaches end up fixing each other's artifacts: + // * The false occlusions due to duplo land are rejected because the ray stays above the smooth surface. + // * The shrink-wrap surface is no longer continuous, so it's possible for rays to miss it. + + let linear_depth = + 1.0 / textureSampleLevel(depth_prepass_texture, depth_linear_sampler, interp_uv, 0.0); + let unfiltered_depth = + 1.0 / textureSampleLevel(depth_prepass_texture, depth_nearest_sampler, interp_uv, 0.0); + + var max_depth: f32; + var min_depth: f32; + + if ((*distance_fn).use_sloppy_march) { + max_depth = unfiltered_depth; + min_depth = unfiltered_depth; + } else { + max_depth = max(linear_depth, unfiltered_depth); + min_depth = min(linear_depth, unfiltered_depth); + } + + let bias = 0.000002; + + var res: DistanceWithPenetration; + res.distance = max_depth * (1.0 + bias) - ray_depth; + + // distance_fn will be used at the end of the ray march to potentially discard the hit. + res.penetration = ray_depth - min_depth; + + if ((*distance_fn).march_behind_surfaces) { + res.valid = res.penetration < (*distance_fn).depth_thickness; + } else { + res.valid = true; + } + + return res; +} + +struct DepthRayMarchResult { + /// True if the raymarch hit something. + hit: bool, + + /// In case of a hit, the normalized distance to it. + /// + /// In case of a miss, the furthest the ray managed to travel, which could either be + /// exceeding the max range, or getting behind a surface further than the depth thickness. + /// + /// Range: `0..=1` as a lerp factor over `ray_start_cs..=ray_end_cs`. + hit_t: f32, + + /// UV correspindong to `hit_t`. + hit_uv: vec2, + + /// The distance that the hit point penetrates into the hit surface. + /// Will normally be non-zero due to limited precision of the ray march. + /// + /// In case of a miss: undefined. + hit_penetration: f32, + + /// Ditto, within the range `0..DepthRayMarch::depth_thickness_linear_z` + /// + /// In case of a miss: undefined. + hit_penetration_frac: f32, +} + +struct DepthRayMarch { + /// Number of steps to be taken at regular intervals to find an initial intersection. + /// Must not be zero. + linear_steps: u32, + + /// Exponent to be applied in the linear part of the march. + /// + /// A value of 1.0 will result in equidistant steps, and higher values will compress + /// the earlier steps, and expand the later ones. This might be desirable in order + /// to get more detail close to objects in SSR or SSGI. + /// + /// For optimal performance, this should be a small compile-time unsigned integer, + /// such as 1 or 2. + linear_march_exponent: f32, + + /// Number of steps in a bisection (binary search) to perform once the linear search + /// has found an intersection. Helps narrow down the hit, increasing the chance of + /// the secant method finding an accurate hit point. + /// + /// Useful when sampling color, e.g. SSR or SSGI, but pointless for contact shadows. + bisection_steps: u32, + + /// Approximate the root position using the secant method -- by solving for line-line + /// intersection between the ray approach rate and the surface gradient. + /// + /// Useful when sampling color, e.g. SSR or SSGI, but pointless for contact shadows. + use_secant: bool, + + /// Jitter to apply to the first step of the linear search; 0..=1 range, mapping + /// to the extent of a single linear step in the first phase of the search. + /// Use 1.0 if you don't want jitter. + jitter: f32, + + /// Clip space coordinates (w=1) of the ray. + ray_start_cs: vec3, + ray_end_cs: vec3, + + /// Should be used for contact shadows, but not for any color bounce, e.g. SSR. + /// + /// For SSR etc. this can easily create leaks, but with contact shadows it allows the rays + /// to pass over invalid occlusions (due to thickness), and find potentially valid ones ahead. + /// + /// Note that this will cause the linear search to potentially miss surfaces, + /// because when the ray overshoots and ends up penetrating a surface further than + /// `depth_thickness_linear_z`, the ray marcher will just carry on. + /// + /// For this reason, this may require a lot of samples, or high depth thickness, + /// so that `depth_thickness_linear_z >= world space ray length / linear_steps`. + march_behind_surfaces: bool, + + /// If `true`, the ray marcher only performs nearest lookups of the depth buffer, + /// resulting in aliasing and false occlusion when marching tiny detail. + /// It should work fine for longer traces with fewer rays though. + use_sloppy_march: bool, + + /// When marching the depth buffer, we only have 2.5D information, and don't know how + /// thick surfaces are. We shall assume that the depth buffer fragments are little squares + /// with a constant thickness defined by this parameter. + depth_thickness_linear_z: f32, + + /// Size of the depth buffer we're marching in, in pixels. + depth_tex_size: vec2, +} + +fn depth_ray_march_new_from_depth(depth_tex_size: vec2) -> DepthRayMarch { + var res: DepthRayMarch; + res.jitter = 1.0; + res.linear_steps = 4u; + res.bisection_steps = 0u; + res.linear_march_exponent = 1.0; + res.depth_tex_size = depth_tex_size; + res.depth_thickness_linear_z = 1.0; + res.march_behind_surfaces = false; + res.use_sloppy_march = false; + return res; +} + +fn depth_ray_march_to_cs_dir_impl( + raymarch: ptr, + dir_cs: vec4, + infinite: bool, +) { + var end_cs = vec4((*raymarch).ray_start_cs, 1.0) + dir_cs; + + // Perform perspective division, but avoid dividing by zero for rays + // heading directly towards the eye. + end_cs /= select(-1.0, 1.0, end_cs.w >= 0.0) * max(1e-10, abs(end_cs.w)); + + // Clip ray start to the view frustum + var delta_cs = end_cs.xyz - (*raymarch).ray_start_cs; + let near_edge = select(vec3(-1.0, -1.0, 0.0), vec3(1.0, 1.0, 1.0), delta_cs < vec3(0.0)); + let dist_to_near_edge = (near_edge - (*raymarch).ray_start_cs) / delta_cs; + let max_dist_to_near_edge = max(dist_to_near_edge.x, dist_to_near_edge.y); + (*raymarch).ray_start_cs += delta_cs * max(0.0, max_dist_to_near_edge); + + // Clip ray end to the view frustum + + delta_cs = end_cs.xyz - (*raymarch).ray_start_cs; + let far_edge = select(vec3(-1.0, -1.0, 0.0), vec3(1.0, 1.0, 1.0), delta_cs >= vec3(0.0)); + let dist_to_far_edge = (far_edge - (*raymarch).ray_start_cs) / delta_cs; + let min_dist_to_far_edge = min( + min(dist_to_far_edge.x, dist_to_far_edge.y), + dist_to_far_edge.z + ); + + if (infinite) { + delta_cs *= min_dist_to_far_edge; + } else { + // If unbounded, would make the ray reach the end of the frustum + delta_cs *= min(1.0, min_dist_to_far_edge); + } + + (*raymarch).ray_end_cs = (*raymarch).ray_start_cs + delta_cs; +} + +/// March from a clip-space position (w = 1) +fn depth_ray_march_from_cs(raymarch: ptr, v: vec3) { + (*raymarch).ray_start_cs = v; +} + +/// March to a clip-space position (w = 1) +/// +/// Must be called after `from_cs`, as it will clip the world-space ray to the view frustum. +fn depth_ray_march_to_cs(raymarch: ptr, end_cs: vec3) { + let dir = vec4(end_cs - (*raymarch).ray_start_cs, 0.0) * sign(end_cs.z); + depth_ray_march_to_cs_dir_impl(raymarch, dir, false); +} + +/// March towards a clip-space direction. Infinite (ray is extended to cover the whole view frustum). +/// +/// Must be called after `from_cs`, as it will clip the world-space ray to the view frustum. +fn depth_ray_march_to_cs_dir(raymarch: ptr, dir: vec4) { + depth_ray_march_to_cs_dir_impl(raymarch, dir, true); +} + +/// March to a world-space position. +/// +/// Must be called after `from_cs`, as it will clip the world-space ray to the view frustum. +fn depth_ray_march_to_ws(raymarch: ptr, end: vec3) { + depth_ray_march_to_cs(raymarch, position_world_to_ndc(end)); +} + +/// March towards a world-space direction. Infinite (ray is extended to cover the whole view frustum). +/// +/// Must be called after `from_cs`, as it will clip the world-space ray to the view frustum. +fn depth_ray_march_to_ws_dir(raymarch: ptr, dir: vec3) { + depth_ray_march_to_cs_dir_impl(raymarch, direction_world_to_clip(dir), true); +} + +/// Perform the ray march. +fn depth_ray_march_march(raymarch: ptr) -> DepthRayMarchResult { + var res = DepthRayMarchResult(false, 0.0, vec2(0.0), 0.0, 0.0); + + let ray_start_uv = ndc_to_uv((*raymarch).ray_start_cs.xy); + let ray_end_uv = ndc_to_uv((*raymarch).ray_end_cs.xy); + + let ray_uv_delta = ray_end_uv - ray_start_uv; + let ray_len_px = ray_uv_delta * (*raymarch).depth_tex_size; + + let min_px_per_step = 1u; + let step_count = max( + 2, + min(i32((*raymarch).linear_steps), i32(floor(length(ray_len_px) / f32(min_px_per_step)))) + ); + + let linear_z_to_scaled_linear_z = 1.0 / perspective_camera_near(); + let depth_thickness = (*raymarch).depth_thickness_linear_z * linear_z_to_scaled_linear_z; + + var distance_fn: DepthRaymarchDistanceFn; + distance_fn.depth_tex_size = (*raymarch).depth_tex_size; + distance_fn.march_behind_surfaces = (*raymarch).march_behind_surfaces; + distance_fn.depth_thickness = depth_thickness; + distance_fn.use_sloppy_march = (*raymarch).use_sloppy_march; + + var hit: DistanceWithPenetration; + + var hit_t = 0.0; + var miss_t = 0.0; + var root_finder = hybrid_root_finder_new_with_linear_steps(u32(step_count)); + root_finder.bisection_steps = (*raymarch).bisection_steps; + root_finder.use_secant = (*raymarch).use_secant; + root_finder.linear_march_exponent = (*raymarch).linear_march_exponent; + root_finder.jitter = (*raymarch).jitter; + let intersected = hybrid_root_finder_find_root( + &root_finder, + (*raymarch).ray_start_cs, + (*raymarch).ray_end_cs, + &distance_fn, + &hit_t, + &miss_t, + &hit + ); + + res.hit_t = hit_t; + + if (intersected && hit.penetration < depth_thickness && hit.distance < depth_thickness) { + res.hit = true; + res.hit_uv = mix(ray_start_uv, ray_end_uv, res.hit_t); + res.hit_penetration = hit.penetration / linear_z_to_scaled_linear_z; + res.hit_penetration_frac = hit.penetration / depth_thickness; + return res; + } + + res.hit_t = miss_t; + res.hit_uv = mix(ray_start_uv, ray_end_uv, res.hit_t); + + return res; +} + +``` + +### bevy/crates/bevy_pbr/src/ssr/ssr + +```rust +// A postprocessing pass that performs screen-space reflections. + +#define_import_path bevy_pbr::ssr + +#import bevy_core_pipeline::fullscreen_vertex_shader::FullscreenVertexOutput +#import bevy_pbr::{ + lighting, + lighting::{LAYER_BASE, LAYER_CLEARCOAT}, + mesh_view_bindings::{view, depth_prepass_texture, deferred_prepass_texture, ssr_settings}, + pbr_deferred_functions::pbr_input_from_deferred_gbuffer, + pbr_deferred_types, + pbr_functions, + prepass_utils, + raymarch::{ + depth_ray_march_from_cs, + depth_ray_march_march, + depth_ray_march_new_from_depth, + depth_ray_march_to_ws_dir, + }, + utils, + view_transformations::{ + depth_ndc_to_view_z, + frag_coord_to_ndc, + ndc_to_frag_coord, + ndc_to_uv, + position_view_to_ndc, + position_world_to_ndc, + position_world_to_view, + }, +} +#import bevy_render::view::View + +#ifdef ENVIRONMENT_MAP +#import bevy_pbr::environment_map +#endif + +// The texture representing the color framebuffer. +@group(1) @binding(0) var color_texture: texture_2d; + +// The sampler that lets us sample from the color framebuffer. +@group(1) @binding(1) var color_sampler: sampler; + +// Group 1, bindings 2 and 3 are in `raymarch.wgsl`. + +// Returns the reflected color in the RGB channel and the specular occlusion in +// the alpha channel. +// +// The general approach here is similar to [1]. We first project the reflection +// ray into screen space. Then we perform uniform steps along that screen-space +// reflected ray, converting each step to view space. +// +// The arguments are: +// +// * `R_world`: The reflection vector in world space. +// +// * `P_world`: The current position in world space. +// +// [1]: https://lettier.github.io/3d-game-shaders-for-beginners/screen-space-reflection.html +fn evaluate_ssr(R_world: vec3, P_world: vec3) -> vec4 { + let depth_size = vec2(textureDimensions(depth_prepass_texture)); + + var raymarch = depth_ray_march_new_from_depth(depth_size); + depth_ray_march_from_cs(&raymarch, position_world_to_ndc(P_world)); + depth_ray_march_to_ws_dir(&raymarch, normalize(R_world)); + raymarch.linear_steps = ssr_settings.linear_steps; + raymarch.bisection_steps = ssr_settings.bisection_steps; + raymarch.use_secant = ssr_settings.use_secant != 0u; + raymarch.depth_thickness_linear_z = ssr_settings.thickness; + raymarch.jitter = 1.0; // Disable jitter for now. + raymarch.march_behind_surfaces = false; + + let raymarch_result = depth_ray_march_march(&raymarch); + if (raymarch_result.hit) { + return vec4( + textureSampleLevel(color_texture, color_sampler, raymarch_result.hit_uv, 0.0).rgb, + 0.0 + ); + } + + return vec4(0.0, 0.0, 0.0, 1.0); +} + +@fragment +fn fragment(in: FullscreenVertexOutput) -> @location(0) vec4 { + // Sample the depth. + var frag_coord = in.position; + frag_coord.z = prepass_utils::prepass_depth(in.position, 0u); + + // Load the G-buffer data. + let fragment = textureLoad(color_texture, vec2(frag_coord.xy), 0); + let gbuffer = textureLoad(deferred_prepass_texture, vec2(frag_coord.xy), 0); + let pbr_input = pbr_input_from_deferred_gbuffer(frag_coord, gbuffer); + + // Don't do anything if the surface is too rough, since we can't blur or do + // temporal accumulation yet. + let perceptual_roughness = pbr_input.material.perceptual_roughness; + if (perceptual_roughness > ssr_settings.perceptual_roughness_threshold) { + return fragment; + } + + // Unpack the PBR input. + var specular_occlusion = pbr_input.specular_occlusion; + let world_position = pbr_input.world_position.xyz; + let N = pbr_input.N; + let V = pbr_input.V; + + // Calculate the reflection vector. + let R = reflect(-V, N); + + // Do the raymarching. + let ssr_specular = evaluate_ssr(R, world_position); + var indirect_light = ssr_specular.rgb; + specular_occlusion *= ssr_specular.a; + + // Sample the environment map if necessary. + // + // This will take the specular part of the environment map into account if + // the ray missed. Otherwise, it only takes the diffuse part. + // + // TODO: Merge this with the duplicated code in `apply_pbr_lighting`. +#ifdef ENVIRONMENT_MAP + // Unpack values required for environment mapping. + let base_color = pbr_input.material.base_color.rgb; + let metallic = pbr_input.material.metallic; + let reflectance = pbr_input.material.reflectance; + let specular_transmission = pbr_input.material.specular_transmission; + let diffuse_transmission = pbr_input.material.diffuse_transmission; + let diffuse_occlusion = pbr_input.diffuse_occlusion; + +#ifdef STANDARD_MATERIAL_CLEARCOAT + // Do the above calculations again for the clearcoat layer. Remember that + // the clearcoat can have its own roughness and its own normal. + let clearcoat = pbr_input.material.clearcoat; + let clearcoat_perceptual_roughness = pbr_input.material.clearcoat_perceptual_roughness; + let clearcoat_roughness = lighting::perceptualRoughnessToRoughness(clearcoat_perceptual_roughness); + let clearcoat_N = pbr_input.clearcoat_N; + let clearcoat_NdotV = max(dot(clearcoat_N, pbr_input.V), 0.0001); + let clearcoat_R = reflect(-pbr_input.V, clearcoat_N); +#endif // STANDARD_MATERIAL_CLEARCOAT + + // Calculate various other values needed for environment mapping. + let roughness = lighting::perceptualRoughnessToRoughness(perceptual_roughness); + let diffuse_color = pbr_functions::calculate_diffuse_color( + base_color, + metallic, + specular_transmission, + diffuse_transmission + ); + let NdotV = max(dot(N, V), 0.0001); + let F_ab = lighting::F_AB(perceptual_roughness, NdotV); + let F0 = pbr_functions::calculate_F0(base_color, metallic, reflectance); + + // Pack all the values into a structure. + var lighting_input: lighting::LightingInput; + lighting_input.layers[LAYER_BASE].NdotV = NdotV; + lighting_input.layers[LAYER_BASE].N = N; + lighting_input.layers[LAYER_BASE].R = R; + lighting_input.layers[LAYER_BASE].perceptual_roughness = perceptual_roughness; + lighting_input.layers[LAYER_BASE].roughness = roughness; + lighting_input.P = world_position.xyz; + lighting_input.V = V; + lighting_input.diffuse_color = diffuse_color; + lighting_input.F0_ = F0; + lighting_input.F_ab = F_ab; +#ifdef STANDARD_MATERIAL_CLEARCOAT + lighting_input.layers[LAYER_CLEARCOAT].NdotV = clearcoat_NdotV; + lighting_input.layers[LAYER_CLEARCOAT].N = clearcoat_N; + lighting_input.layers[LAYER_CLEARCOAT].R = clearcoat_R; + lighting_input.layers[LAYER_CLEARCOAT].perceptual_roughness = clearcoat_perceptual_roughness; + lighting_input.layers[LAYER_CLEARCOAT].roughness = clearcoat_roughness; + lighting_input.clearcoat_strength = clearcoat; +#endif // STANDARD_MATERIAL_CLEARCOAT + + // Sample the environment map. + let environment_light = environment_map::environment_map_light(&lighting_input, false); + + // Accumulate the environment map light. + indirect_light += view.exposure * + (environment_light.diffuse * diffuse_occlusion + + environment_light.specular * specular_occlusion); +#endif + + // Write the results. + return vec4(fragment.rgb + indirect_light, 1.0); +} + +``` + +### bevy/crates/bevy_pbr/src/prepass/prepass_utils + +```rust +#define_import_path bevy_pbr::prepass_utils + +#import bevy_pbr::mesh_view_bindings as view_bindings + +#ifdef DEPTH_PREPASS +fn prepass_depth(frag_coord: vec4, sample_index: u32) -> f32 { +#ifdef MULTISAMPLED + return textureLoad(view_bindings::depth_prepass_texture, vec2(frag_coord.xy), i32(sample_index)); +#else // MULTISAMPLED + return textureLoad(view_bindings::depth_prepass_texture, vec2(frag_coord.xy), 0); +#endif // MULTISAMPLED +} +#endif // DEPTH_PREPASS + +#ifdef NORMAL_PREPASS +fn prepass_normal(frag_coord: vec4, sample_index: u32) -> vec3 { +#ifdef MULTISAMPLED + let normal_sample = textureLoad(view_bindings::normal_prepass_texture, vec2(frag_coord.xy), i32(sample_index)); +#else + let normal_sample = textureLoad(view_bindings::normal_prepass_texture, vec2(frag_coord.xy), 0); +#endif // MULTISAMPLED + return normalize(normal_sample.xyz * 2.0 - vec3(1.0)); +} +#endif // NORMAL_PREPASS + +#ifdef MOTION_VECTOR_PREPASS +fn prepass_motion_vector(frag_coord: vec4, sample_index: u32) -> vec2 { +#ifdef MULTISAMPLED + let motion_vector_sample = textureLoad(view_bindings::motion_vector_prepass_texture, vec2(frag_coord.xy), i32(sample_index)); +#else + let motion_vector_sample = textureLoad(view_bindings::motion_vector_prepass_texture, vec2(frag_coord.xy), 0); +#endif + return motion_vector_sample.rg; +} +#endif // MOTION_VECTOR_PREPASS + +``` + +### bevy/crates/bevy_pbr/src/prepass/prepass_bindings + +```rust +#define_import_path bevy_pbr::prepass_bindings + +struct PreviousViewUniforms { + view_from_world: mat4x4, + clip_from_world: mat4x4, +} + +#ifdef MOTION_VECTOR_PREPASS +@group(0) @binding(2) var previous_view_uniforms: PreviousViewUniforms; +#endif // MOTION_VECTOR_PREPASS + +// Material bindings will be in @group(2) + +``` + +### bevy/crates/bevy_pbr/src/prepass/prepass_io + +```rust +#define_import_path bevy_pbr::prepass_io + +// Most of these attributes are not used in the default prepass fragment shader, but they are still needed so we can +// pass them to custom prepass shaders like pbr_prepass.wgsl. +struct Vertex { + @builtin(instance_index) instance_index: u32, + @location(0) position: vec3, + +#ifdef VERTEX_UVS_A + @location(1) uv: vec2, +#endif + +#ifdef VERTEX_UVS_B + @location(2) uv_b: vec2, +#endif + +#ifdef NORMAL_PREPASS_OR_DEFERRED_PREPASS + @location(3) normal: vec3, +#ifdef VERTEX_TANGENTS + @location(4) tangent: vec4, +#endif +#endif // NORMAL_PREPASS_OR_DEFERRED_PREPASS + +#ifdef SKINNED + @location(5) joint_indices: vec4, + @location(6) joint_weights: vec4, +#endif + +#ifdef VERTEX_COLORS + @location(7) color: vec4, +#endif + +#ifdef MORPH_TARGETS + @builtin(vertex_index) index: u32, +#endif // MORPH_TARGETS +} + +struct VertexOutput { + // This is `clip position` when the struct is used as a vertex stage output + // and `frag coord` when used as a fragment stage input + @builtin(position) position: vec4, + +#ifdef VERTEX_UVS_A + @location(0) uv: vec2, +#endif + +#ifdef VERTEX_UVS_B + @location(1) uv_b: vec2, +#endif + +#ifdef NORMAL_PREPASS_OR_DEFERRED_PREPASS + @location(2) world_normal: vec3, +#ifdef VERTEX_TANGENTS + @location(3) world_tangent: vec4, +#endif +#endif // NORMAL_PREPASS_OR_DEFERRED_PREPASS + + @location(4) world_position: vec4, +#ifdef MOTION_VECTOR_PREPASS + @location(5) previous_world_position: vec4, +#endif + +#ifdef DEPTH_CLAMP_ORTHO + @location(6) clip_position_unclamped: vec4, +#endif // DEPTH_CLAMP_ORTHO +#ifdef VERTEX_OUTPUT_INSTANCE_INDEX + @location(7) instance_index: u32, +#endif + +#ifdef VERTEX_COLORS + @location(8) color: vec4, +#endif +} + +#ifdef PREPASS_FRAGMENT +struct FragmentOutput { +#ifdef NORMAL_PREPASS + @location(0) normal: vec4, +#endif + +#ifdef MOTION_VECTOR_PREPASS + @location(1) motion_vector: vec2, +#endif + +#ifdef DEFERRED_PREPASS + @location(2) deferred: vec4, + @location(3) deferred_lighting_pass_id: u32, +#endif + +#ifdef DEPTH_CLAMP_ORTHO + @builtin(frag_depth) frag_depth: f32, +#endif // DEPTH_CLAMP_ORTHO +} +#endif //PREPASS_FRAGMENT + +``` + +### bevy/crates/bevy_pbr/src/prepass/prepass + +```rust +#import bevy_pbr::{ + prepass_bindings, + mesh_functions, + prepass_io::{Vertex, VertexOutput, FragmentOutput}, + skinning, + morph, + mesh_view_bindings::view, + view_transformations::position_world_to_clip, +} + +#ifdef DEFERRED_PREPASS +#import bevy_pbr::rgb9e5 +#endif + +#ifdef MORPH_TARGETS +fn morph_vertex(vertex_in: Vertex) -> Vertex { + var vertex = vertex_in; + let weight_count = morph::layer_count(); + for (var i: u32 = 0u; i < weight_count; i ++) { + let weight = morph::weight_at(i); + if weight == 0.0 { + continue; + } + vertex.position += weight * morph::morph(vertex.index, morph::position_offset, i); +#ifdef VERTEX_NORMALS + vertex.normal += weight * morph::morph(vertex.index, morph::normal_offset, i); +#endif +#ifdef VERTEX_TANGENTS + vertex.tangent += vec4(weight * morph::morph(vertex.index, morph::tangent_offset, i), 0.0); +#endif + } + return vertex; +} + +// Returns the morphed position of the given vertex from the previous frame. +// +// This function is used for motion vector calculation, and, as such, it doesn't +// bother morphing the normals and tangents. +fn morph_prev_vertex(vertex_in: Vertex) -> Vertex { + var vertex = vertex_in; + let weight_count = morph::layer_count(); + for (var i: u32 = 0u; i < weight_count; i ++) { + let weight = morph::prev_weight_at(i); + if weight == 0.0 { + continue; + } + vertex.position += weight * morph::morph(vertex.index, morph::position_offset, i); + // Don't bother morphing normals and tangents; we don't need them for + // motion vector calculation. + } + return vertex; +} +#endif // MORPH_TARGETS + +@vertex +fn vertex(vertex_no_morph: Vertex) -> VertexOutput { + var out: VertexOutput; + +#ifdef MORPH_TARGETS + var vertex = morph_vertex(vertex_no_morph); +#else + var vertex = vertex_no_morph; +#endif + +#ifdef SKINNED + var world_from_local = skinning::skin_model(vertex.joint_indices, vertex.joint_weights); +#else // SKINNED + // Use vertex_no_morph.instance_index instead of vertex.instance_index to work around a wgpu dx12 bug. + // See https://github.com/gfx-rs/naga/issues/2416 + var world_from_local = mesh_functions::get_world_from_local(vertex_no_morph.instance_index); +#endif // SKINNED + + out.world_position = mesh_functions::mesh_position_local_to_world(world_from_local, vec4(vertex.position, 1.0)); + out.position = position_world_to_clip(out.world_position.xyz); +#ifdef DEPTH_CLAMP_ORTHO + out.clip_position_unclamped = out.position; + out.position.z = min(out.position.z, 1.0); +#endif // DEPTH_CLAMP_ORTHO + +#ifdef VERTEX_UVS_A + out.uv = vertex.uv; +#endif // VERTEX_UVS_A + +#ifdef VERTEX_UVS_B + out.uv_b = vertex.uv_b; +#endif // VERTEX_UVS_B + +#ifdef NORMAL_PREPASS_OR_DEFERRED_PREPASS +#ifdef SKINNED + out.world_normal = skinning::skin_normals(world_from_local, vertex.normal); +#else // SKINNED + out.world_normal = mesh_functions::mesh_normal_local_to_world( + vertex.normal, + // Use vertex_no_morph.instance_index instead of vertex.instance_index to work around a wgpu dx12 bug. + // See https://github.com/gfx-rs/naga/issues/2416 + vertex_no_morph.instance_index + ); +#endif // SKINNED + +#ifdef VERTEX_TANGENTS + out.world_tangent = mesh_functions::mesh_tangent_local_to_world( + world_from_local, + vertex.tangent, + // Use vertex_no_morph.instance_index instead of vertex.instance_index to work around a wgpu dx12 bug. + // See https://github.com/gfx-rs/naga/issues/2416 + vertex_no_morph.instance_index + ); +#endif // VERTEX_TANGENTS +#endif // NORMAL_PREPASS_OR_DEFERRED_PREPASS + +#ifdef VERTEX_COLORS + out.color = vertex.color; +#endif + + // Compute the motion vector for TAA among other purposes. For this we need + // to know where the vertex was last frame. +#ifdef MOTION_VECTOR_PREPASS + + // Take morph targets into account. +#ifdef MORPH_TARGETS + +#ifdef HAS_PREVIOUS_MORPH + let prev_vertex = morph_prev_vertex(vertex_no_morph); +#else // HAS_PREVIOUS_MORPH + let prev_vertex = vertex_no_morph; +#endif // HAS_PREVIOUS_MORPH + +#else // MORPH_TARGETS + let prev_vertex = vertex_no_morph; +#endif // MORPH_TARGETS + + // Take skinning into account. +#ifdef SKINNED + +#ifdef HAS_PREVIOUS_SKIN + let prev_model = skinning::skin_prev_model( + prev_vertex.joint_indices, + prev_vertex.joint_weights, + ); +#else // HAS_PREVIOUS_SKIN + let prev_model = mesh_functions::get_previous_world_from_local(prev_vertex.instance_index); +#endif // HAS_PREVIOUS_SKIN + +#else // SKINNED + let prev_model = mesh_functions::get_previous_world_from_local(prev_vertex.instance_index); +#endif // SKINNED + + out.previous_world_position = mesh_functions::mesh_position_local_to_world( + prev_model, + vec4(prev_vertex.position, 1.0) + ); +#endif // MOTION_VECTOR_PREPASS + +#ifdef VERTEX_OUTPUT_INSTANCE_INDEX + // Use vertex_no_morph.instance_index instead of vertex.instance_index to work around a wgpu dx12 bug. + // See https://github.com/gfx-rs/naga/issues/2416 + out.instance_index = vertex_no_morph.instance_index; +#endif + + return out; +} + +#ifdef PREPASS_FRAGMENT +@fragment +fn fragment(in: VertexOutput) -> FragmentOutput { + var out: FragmentOutput; + +#ifdef NORMAL_PREPASS + out.normal = vec4(in.world_normal * 0.5 + vec3(0.5), 1.0); +#endif + +#ifdef DEPTH_CLAMP_ORTHO + out.frag_depth = in.clip_position_unclamped.z; +#endif // DEPTH_CLAMP_ORTHO + +#ifdef MOTION_VECTOR_PREPASS + let clip_position_t = view.unjittered_clip_from_world * in.world_position; + let clip_position = clip_position_t.xy / clip_position_t.w; + let previous_clip_position_t = prepass_bindings::previous_view_uniforms.clip_from_world * in.previous_world_position; + let previous_clip_position = previous_clip_position_t.xy / previous_clip_position_t.w; + // These motion vectors are used as offsets to UV positions and are stored + // in the range -1,1 to allow offsetting from the one corner to the + // diagonally-opposite corner in UV coordinates, in either direction. + // A difference between diagonally-opposite corners of clip space is in the + // range -2,2, so this needs to be scaled by 0.5. And the V direction goes + // down where clip space y goes up, so y needs to be flipped. + out.motion_vector = (clip_position - previous_clip_position) * vec2(0.5, -0.5); +#endif // MOTION_VECTOR_PREPASS + +#ifdef DEFERRED_PREPASS + // There isn't any material info available for this default prepass shader so we are just writing  + // emissive magenta out to the deferred gbuffer to be rendered by the first deferred lighting pass layer. + // This is here so if the default prepass fragment is used for deferred magenta will be rendered, and also + // as an example to show that a user could write to the deferred gbuffer if they were to start from this shader. + out.deferred = vec4(0u, bevy_pbr::rgb9e5::vec3_to_rgb9e5_(vec3(1.0, 0.0, 1.0)), 0u, 0u); + out.deferred_lighting_pass_id = 1u; +#endif + + return out; +} +#endif // PREPASS_FRAGMENT + +``` + +### bevy/crates/bevy_pbr/src/ssao/gtao + +```rust +// Ground Truth-based Ambient Occlusion (GTAO) +// Paper: https://www.activision.com/cdn/research/Practical_Real_Time_Strategies_for_Accurate_Indirect_Occlusion_NEW%20VERSION_COLOR.pdf +// Presentation: https://blog.selfshadow.com/publications/s2016-shading-course/activision/s2016_pbs_activision_occlusion.pdf + +// Source code heavily based on XeGTAO v1.30 from Intel +// https://github.com/GameTechDev/XeGTAO/blob/0d177ce06bfa642f64d8af4de1197ad1bcb862d4/Source/Rendering/Shaders/XeGTAO.hlsli + +#import bevy_pbr::gtao_utils::fast_acos + +#import bevy_render::{ + view::View, + globals::Globals, + maths::{PI, HALF_PI}, +} + +@group(0) @binding(0) var preprocessed_depth: texture_2d; +@group(0) @binding(1) var normals: texture_2d; +@group(0) @binding(2) var hilbert_index_lut: texture_2d; +@group(0) @binding(3) var ambient_occlusion: texture_storage_2d; +@group(0) @binding(4) var depth_differences: texture_storage_2d; +@group(0) @binding(5) var globals: Globals; +@group(1) @binding(0) var point_clamp_sampler: sampler; +@group(1) @binding(1) var view: View; + +fn load_noise(pixel_coordinates: vec2) -> vec2 { + var index = textureLoad(hilbert_index_lut, pixel_coordinates % 64, 0).r; + +#ifdef TEMPORAL_JITTER + index += 288u * (globals.frame_count % 64u); +#endif + + // R2 sequence - http://extremelearning.com.au/unreasonable-effectiveness-of-quasirandom-sequences + return fract(0.5 + f32(index) * vec2(0.75487766624669276005, 0.5698402909980532659114)); +} + +// Calculate differences in depth between neighbor pixels (later used by the spatial denoiser pass to preserve object edges) +fn calculate_neighboring_depth_differences(pixel_coordinates: vec2) -> f32 { + // Sample the pixel's depth and 4 depths around it + let uv = vec2(pixel_coordinates) / view.viewport.zw; + let depths_upper_left = textureGather(0, preprocessed_depth, point_clamp_sampler, uv); + let depths_bottom_right = textureGather(0, preprocessed_depth, point_clamp_sampler, uv, vec2(1i, 1i)); + let depth_center = depths_upper_left.y; + let depth_left = depths_upper_left.x; + let depth_top = depths_upper_left.z; + let depth_bottom = depths_bottom_right.x; + let depth_right = depths_bottom_right.z; + + // Calculate the depth differences (large differences represent object edges) + var edge_info = vec4(depth_left, depth_right, depth_top, depth_bottom) - depth_center; + let slope_left_right = (edge_info.y - edge_info.x) * 0.5; + let slope_top_bottom = (edge_info.w - edge_info.z) * 0.5; + let edge_info_slope_adjusted = edge_info + vec4(slope_left_right, -slope_left_right, slope_top_bottom, -slope_top_bottom); + edge_info = min(abs(edge_info), abs(edge_info_slope_adjusted)); + let bias = 0.25; // Using the bias and then saturating nudges the values a bit + let scale = depth_center * 0.011; // Weight the edges by their distance from the camera + edge_info = saturate((1.0 + bias) - edge_info / scale); // Apply the bias and scale, and invert edge_info so that small values become large, and vice versa + + // Pack the edge info into the texture + let edge_info_packed = vec4(pack4x8unorm(edge_info), 0u, 0u, 0u); + textureStore(depth_differences, pixel_coordinates, edge_info_packed); + + return depth_center; +} + +fn load_normal_view_space(uv: vec2) -> vec3 { + var world_normal = textureSampleLevel(normals, point_clamp_sampler, uv, 0.0).xyz; + world_normal = (world_normal * 2.0) - 1.0; + let view_from_world = mat3x3( + view.view_from_world[0].xyz, + view.view_from_world[1].xyz, + view.view_from_world[2].xyz, + ); + return view_from_world * world_normal; +} + +fn reconstruct_view_space_position(depth: f32, uv: vec2) -> vec3 { + let clip_xy = vec2(uv.x * 2.0 - 1.0, 1.0 - 2.0 * uv.y); + let t = view.view_from_clip * vec4(clip_xy, depth, 1.0); + let view_xyz = t.xyz / t.w; + return view_xyz; +} + +fn load_and_reconstruct_view_space_position(uv: vec2, sample_mip_level: f32) -> vec3 { + let depth = textureSampleLevel(preprocessed_depth, point_clamp_sampler, uv, sample_mip_level).r; + return reconstruct_view_space_position(depth, uv); +} + +@compute +@workgroup_size(8, 8, 1) +fn gtao(@builtin(global_invocation_id) global_id: vec3) { + let slice_count = f32(#SLICE_COUNT); + let samples_per_slice_side = f32(#SAMPLES_PER_SLICE_SIDE); + let effect_radius = 0.5 * 1.457; + let falloff_range = 0.615 * effect_radius; + let falloff_from = effect_radius * (1.0 - 0.615); + let falloff_mul = -1.0 / falloff_range; + let falloff_add = falloff_from / falloff_range + 1.0; + + let pixel_coordinates = vec2(global_id.xy); + let uv = (vec2(pixel_coordinates) + 0.5) / view.viewport.zw; + + var pixel_depth = calculate_neighboring_depth_differences(pixel_coordinates); + pixel_depth += 0.00001; // Avoid depth precision issues + + let pixel_position = reconstruct_view_space_position(pixel_depth, uv); + let pixel_normal = load_normal_view_space(uv); + let view_vec = normalize(-pixel_position); + + let noise = load_noise(pixel_coordinates); + let sample_scale = (-0.5 * effect_radius * view.clip_from_view[0][0]) / pixel_position.z; + + var visibility = 0.0; + for (var slice_t = 0.0; slice_t < slice_count; slice_t += 1.0) { + let slice = slice_t + noise.x; + let phi = (PI / slice_count) * slice; + let omega = vec2(cos(phi), sin(phi)); + + let direction = vec3(omega.xy, 0.0); + let orthographic_direction = direction - (dot(direction, view_vec) * view_vec); + let axis = cross(direction, view_vec); + let projected_normal = pixel_normal - axis * dot(pixel_normal, axis); + let projected_normal_length = length(projected_normal); + + let sign_norm = sign(dot(orthographic_direction, projected_normal)); + let cos_norm = saturate(dot(projected_normal, view_vec) / projected_normal_length); + let n = sign_norm * fast_acos(cos_norm); + + let min_cos_horizon_1 = cos(n + HALF_PI); + let min_cos_horizon_2 = cos(n - HALF_PI); + var cos_horizon_1 = min_cos_horizon_1; + var cos_horizon_2 = min_cos_horizon_2; + let sample_mul = vec2(omega.x, -omega.y) * sample_scale; + for (var sample_t = 0.0; sample_t < samples_per_slice_side; sample_t += 1.0) { + var sample_noise = (slice_t + sample_t * samples_per_slice_side) * 0.6180339887498948482; + sample_noise = fract(noise.y + sample_noise); + + var s = (sample_t + sample_noise) / samples_per_slice_side; + s *= s; // https://github.com/GameTechDev/XeGTAO#sample-distribution + let sample = s * sample_mul; + + // * view.viewport.zw gets us from [0, 1] to [0, viewport_size], which is needed for this to get the correct mip levels + let sample_mip_level = clamp(log2(length(sample * view.viewport.zw)) - 3.3, 0.0, 5.0); // https://github.com/GameTechDev/XeGTAO#memory-bandwidth-bottleneck + let sample_position_1 = load_and_reconstruct_view_space_position(uv + sample, sample_mip_level); + let sample_position_2 = load_and_reconstruct_view_space_position(uv - sample, sample_mip_level); + + let sample_difference_1 = sample_position_1 - pixel_position; + let sample_difference_2 = sample_position_2 - pixel_position; + let sample_distance_1 = length(sample_difference_1); + let sample_distance_2 = length(sample_difference_2); + var sample_cos_horizon_1 = dot(sample_difference_1 / sample_distance_1, view_vec); + var sample_cos_horizon_2 = dot(sample_difference_2 / sample_distance_2, view_vec); + + let weight_1 = saturate(sample_distance_1 * falloff_mul + falloff_add); + let weight_2 = saturate(sample_distance_2 * falloff_mul + falloff_add); + sample_cos_horizon_1 = mix(min_cos_horizon_1, sample_cos_horizon_1, weight_1); + sample_cos_horizon_2 = mix(min_cos_horizon_2, sample_cos_horizon_2, weight_2); + + cos_horizon_1 = max(cos_horizon_1, sample_cos_horizon_1); + cos_horizon_2 = max(cos_horizon_2, sample_cos_horizon_2); + } + + let horizon_1 = fast_acos(cos_horizon_1); + let horizon_2 = -fast_acos(cos_horizon_2); + let v1 = (cos_norm + 2.0 * horizon_1 * sin(n) - cos(2.0 * horizon_1 - n)) / 4.0; + let v2 = (cos_norm + 2.0 * horizon_2 * sin(n) - cos(2.0 * horizon_2 - n)) / 4.0; + visibility += projected_normal_length * (v1 + v2); + } + visibility /= slice_count; + visibility = clamp(visibility, 0.03, 1.0); + + textureStore(ambient_occlusion, pixel_coordinates, vec4(visibility, 0.0, 0.0, 0.0)); +} + +``` + +### bevy/crates/bevy_pbr/src/ssao/gtao_utils + +```rust +#define_import_path bevy_pbr::gtao_utils + +#import bevy_render::maths::{PI, HALF_PI} + +// Approximates single-bounce ambient occlusion to multi-bounce ambient occlusion +// https://blog.selfshadow.com/publications/s2016-shading-course/activision/s2016_pbs_activision_occlusion.pdf#page=78 +fn gtao_multibounce(visibility: f32, base_color: vec3) -> vec3 { + let a = 2.0404 * base_color - 0.3324; + let b = -4.7951 * base_color + 0.6417; + let c = 2.7552 * base_color + 0.6903; + let x = vec3(visibility); + return max(x, ((x * a + b) * x + c) * x); +} + +fn fast_sqrt(x: f32) -> f32 { + return bitcast(0x1fbd1df5 + (bitcast(x) >> 1u)); +} + +fn fast_acos(in_x: f32) -> f32 { + let x = abs(in_x); + var res = -0.156583 * x + HALF_PI; + res *= fast_sqrt(1.0 - x); + return select(PI - res, res, in_x >= 0.0); +} + +``` + +### bevy/crates/bevy_pbr/src/ssao/spatial_denoise + +```rust +// 3x3 bilaterial filter (edge-preserving blur) +// https://people.csail.mit.edu/sparis/bf_course/course_notes.pdf + +// Note: Does not use the Gaussian kernel part of a typical bilateral blur +// From the paper: "use the information gathered on a neighborhood of 4 × 4 using a bilateral filter for +// reconstruction, using _uniform_ convolution weights" + +// Note: The paper does a 4x4 (not quite centered) filter, offset by +/- 1 pixel every other frame +// XeGTAO does a 3x3 filter, on two pixels at a time per compute thread, applied twice +// We do a 3x3 filter, on 1 pixel per compute thread, applied once + +#import bevy_render::view::View + +@group(0) @binding(0) var ambient_occlusion_noisy: texture_2d; +@group(0) @binding(1) var depth_differences: texture_2d; +@group(0) @binding(2) var ambient_occlusion: texture_storage_2d; +@group(1) @binding(0) var point_clamp_sampler: sampler; +@group(1) @binding(1) var view: View; + +@compute +@workgroup_size(8, 8, 1) +fn spatial_denoise(@builtin(global_invocation_id) global_id: vec3) { + let pixel_coordinates = vec2(global_id.xy); + let uv = vec2(pixel_coordinates) / view.viewport.zw; + + let edges0 = textureGather(0, depth_differences, point_clamp_sampler, uv); + let edges1 = textureGather(0, depth_differences, point_clamp_sampler, uv, vec2(2i, 0i)); + let edges2 = textureGather(0, depth_differences, point_clamp_sampler, uv, vec2(1i, 2i)); + let visibility0 = textureGather(0, ambient_occlusion_noisy, point_clamp_sampler, uv); + let visibility1 = textureGather(0, ambient_occlusion_noisy, point_clamp_sampler, uv, vec2(2i, 0i)); + let visibility2 = textureGather(0, ambient_occlusion_noisy, point_clamp_sampler, uv, vec2(0i, 2i)); + let visibility3 = textureGather(0, ambient_occlusion_noisy, point_clamp_sampler, uv, vec2(2i, 2i)); + + let left_edges = unpack4x8unorm(edges0.x); + let right_edges = unpack4x8unorm(edges1.x); + let top_edges = unpack4x8unorm(edges0.z); + let bottom_edges = unpack4x8unorm(edges2.w); + var center_edges = unpack4x8unorm(edges0.y); + center_edges *= vec4(left_edges.y, right_edges.x, top_edges.w, bottom_edges.z); + + let center_weight = 1.2; + let left_weight = center_edges.x; + let right_weight = center_edges.y; + let top_weight = center_edges.z; + let bottom_weight = center_edges.w; + let top_left_weight = 0.425 * (top_weight * top_edges.x + left_weight * left_edges.z); + let top_right_weight = 0.425 * (top_weight * top_edges.y + right_weight * right_edges.z); + let bottom_left_weight = 0.425 * (bottom_weight * bottom_edges.x + left_weight * left_edges.w); + let bottom_right_weight = 0.425 * (bottom_weight * bottom_edges.y + right_weight * right_edges.w); + + let center_visibility = visibility0.y; + let left_visibility = visibility0.x; + let right_visibility = visibility0.z; + let top_visibility = visibility1.x; + let bottom_visibility = visibility2.z; + let top_left_visibility = visibility0.w; + let top_right_visibility = visibility1.w; + let bottom_left_visibility = visibility2.w; + let bottom_right_visibility = visibility3.w; + + var sum = center_visibility; + sum += left_visibility * left_weight; + sum += right_visibility * right_weight; + sum += top_visibility * top_weight; + sum += bottom_visibility * bottom_weight; + sum += top_left_visibility * top_left_weight; + sum += top_right_visibility * top_right_weight; + sum += bottom_left_visibility * bottom_left_weight; + sum += bottom_right_visibility * bottom_right_weight; + + var sum_weight = center_weight; + sum_weight += left_weight; + sum_weight += right_weight; + sum_weight += top_weight; + sum_weight += bottom_weight; + sum_weight += top_left_weight; + sum_weight += top_right_weight; + sum_weight += bottom_left_weight; + sum_weight += bottom_right_weight; + + let denoised_visibility = sum / sum_weight; + + textureStore(ambient_occlusion, pixel_coordinates, vec4(denoised_visibility, 0.0, 0.0, 0.0)); +} + +``` + +### bevy/crates/bevy_pbr/src/ssao/preprocess_depth + +```rust +// Inputs a depth texture and outputs a MIP-chain of depths. +// +// Because SSAO's performance is bound by texture reads, this increases +// performance over using the full resolution depth for every sample. + +// Reference: https://research.nvidia.com/sites/default/files/pubs/2012-06_Scalable-Ambient-Obscurance/McGuire12SAO.pdf, section 2.2 + +#import bevy_render::view::View + +@group(0) @binding(0) var input_depth: texture_depth_2d; +@group(0) @binding(1) var preprocessed_depth_mip0: texture_storage_2d; +@group(0) @binding(2) var preprocessed_depth_mip1: texture_storage_2d; +@group(0) @binding(3) var preprocessed_depth_mip2: texture_storage_2d; +@group(0) @binding(4) var preprocessed_depth_mip3: texture_storage_2d; +@group(0) @binding(5) var preprocessed_depth_mip4: texture_storage_2d; +@group(1) @binding(0) var point_clamp_sampler: sampler; +@group(1) @binding(1) var view: View; + + +// Using 4 depths from the previous MIP, compute a weighted average for the depth of the current MIP +fn weighted_average(depth0: f32, depth1: f32, depth2: f32, depth3: f32) -> f32 { + let depth_range_scale_factor = 0.75; + let effect_radius = depth_range_scale_factor * 0.5 * 1.457; + let falloff_range = 0.615 * effect_radius; + let falloff_from = effect_radius * (1.0 - 0.615); + let falloff_mul = -1.0 / falloff_range; + let falloff_add = falloff_from / falloff_range + 1.0; + + let min_depth = min(min(depth0, depth1), min(depth2, depth3)); + let weight0 = saturate((depth0 - min_depth) * falloff_mul + falloff_add); + let weight1 = saturate((depth1 - min_depth) * falloff_mul + falloff_add); + let weight2 = saturate((depth2 - min_depth) * falloff_mul + falloff_add); + let weight3 = saturate((depth3 - min_depth) * falloff_mul + falloff_add); + let weight_total = weight0 + weight1 + weight2 + weight3; + + return ((weight0 * depth0) + (weight1 * depth1) + (weight2 * depth2) + (weight3 * depth3)) / weight_total; +} + +// Used to share the depths from the previous MIP level between all invocations in a workgroup +var previous_mip_depth: array, 8>; + +@compute +@workgroup_size(8, 8, 1) +fn preprocess_depth(@builtin(global_invocation_id) global_id: vec3, @builtin(local_invocation_id) local_id: vec3) { + let base_coordinates = vec2(global_id.xy); + + // MIP 0 - Copy 4 texels from the input depth (per invocation, 8x8 invocations per workgroup) + let pixel_coordinates0 = base_coordinates * 2i; + let pixel_coordinates1 = pixel_coordinates0 + vec2(1i, 0i); + let pixel_coordinates2 = pixel_coordinates0 + vec2(0i, 1i); + let pixel_coordinates3 = pixel_coordinates0 + vec2(1i, 1i); + let depths_uv = vec2(pixel_coordinates0) / view.viewport.zw; + let depths = textureGather(0, input_depth, point_clamp_sampler, depths_uv, vec2(1i, 1i)); + textureStore(preprocessed_depth_mip0, pixel_coordinates0, vec4(depths.w, 0.0, 0.0, 0.0)); + textureStore(preprocessed_depth_mip0, pixel_coordinates1, vec4(depths.z, 0.0, 0.0, 0.0)); + textureStore(preprocessed_depth_mip0, pixel_coordinates2, vec4(depths.x, 0.0, 0.0, 0.0)); + textureStore(preprocessed_depth_mip0, pixel_coordinates3, vec4(depths.y, 0.0, 0.0, 0.0)); + + // MIP 1 - Weighted average of MIP 0's depth values (per invocation, 8x8 invocations per workgroup) + let depth_mip1 = weighted_average(depths.w, depths.z, depths.x, depths.y); + textureStore(preprocessed_depth_mip1, base_coordinates, vec4(depth_mip1, 0.0, 0.0, 0.0)); + previous_mip_depth[local_id.x][local_id.y] = depth_mip1; + + workgroupBarrier(); + + // MIP 2 - Weighted average of MIP 1's depth values (per invocation, 4x4 invocations per workgroup) + if all(local_id.xy % vec2(2u) == vec2(0u)) { + let depth0 = previous_mip_depth[local_id.x + 0u][local_id.y + 0u]; + let depth1 = previous_mip_depth[local_id.x + 1u][local_id.y + 0u]; + let depth2 = previous_mip_depth[local_id.x + 0u][local_id.y + 1u]; + let depth3 = previous_mip_depth[local_id.x + 1u][local_id.y + 1u]; + let depth_mip2 = weighted_average(depth0, depth1, depth2, depth3); + textureStore(preprocessed_depth_mip2, base_coordinates / 2i, vec4(depth_mip2, 0.0, 0.0, 0.0)); + previous_mip_depth[local_id.x][local_id.y] = depth_mip2; + } + + workgroupBarrier(); + + // MIP 3 - Weighted average of MIP 2's depth values (per invocation, 2x2 invocations per workgroup) + if all(local_id.xy % vec2(4u) == vec2(0u)) { + let depth0 = previous_mip_depth[local_id.x + 0u][local_id.y + 0u]; + let depth1 = previous_mip_depth[local_id.x + 2u][local_id.y + 0u]; + let depth2 = previous_mip_depth[local_id.x + 0u][local_id.y + 2u]; + let depth3 = previous_mip_depth[local_id.x + 2u][local_id.y + 2u]; + let depth_mip3 = weighted_average(depth0, depth1, depth2, depth3); + textureStore(preprocessed_depth_mip3, base_coordinates / 4i, vec4(depth_mip3, 0.0, 0.0, 0.0)); + previous_mip_depth[local_id.x][local_id.y] = depth_mip3; + } + + workgroupBarrier(); + + // MIP 4 - Weighted average of MIP 3's depth values (per invocation, 1 invocation per workgroup) + if all(local_id.xy % vec2(8u) == vec2(0u)) { + let depth0 = previous_mip_depth[local_id.x + 0u][local_id.y + 0u]; + let depth1 = previous_mip_depth[local_id.x + 4u][local_id.y + 0u]; + let depth2 = previous_mip_depth[local_id.x + 0u][local_id.y + 4u]; + let depth3 = previous_mip_depth[local_id.x + 4u][local_id.y + 4u]; + let depth_mip4 = weighted_average(depth0, depth1, depth2, depth3); + textureStore(preprocessed_depth_mip4, base_coordinates / 8i, vec4(depth_mip4, 0.0, 0.0, 0.0)); + } +} + +``` + +### bevy/crates/bevy_sprite/src/mesh2d/mesh2d_view_types + +```rust +#define_import_path bevy_sprite::mesh2d_view_types + +#import bevy_render::view +#import bevy_render::globals + +``` + +### bevy/crates/bevy_sprite/src/mesh2d/mesh2d_types + +```rust +#define_import_path bevy_sprite::mesh2d_types + +struct Mesh2d { + // Affine 4x3 matrix transposed to 3x4 + // Use bevy_render::maths::affine3_to_square to unpack + world_from_local: mat3x4, + // 3x3 matrix packed in mat2x4 and f32 as: + // [0].xyz, [1].x, + // [1].yz, [2].xy + // [2].z + // Use bevy_render::maths::mat2x4_f32_to_mat3x3_unpack to unpack + local_from_world_transpose_a: mat2x4, + local_from_world_transpose_b: f32, + // 'flags' is a bit field indicating various options. u32 is 32 bits so we have up to 32 options. + flags: u32, +}; + +``` + +### bevy/crates/bevy_sprite/src/mesh2d/mesh2d_bindings + +```rust +#define_import_path bevy_sprite::mesh2d_bindings + +#import bevy_sprite::mesh2d_types::Mesh2d + +#ifdef PER_OBJECT_BUFFER_BATCH_SIZE +@group(1) @binding(0) var mesh: array; +#else +@group(1) @binding(0) var mesh: array; +#endif // PER_OBJECT_BUFFER_BATCH_SIZE + +``` + +### bevy/crates/bevy_sprite/src/mesh2d/mesh2d_view_bindings + +```rust +#define_import_path bevy_sprite::mesh2d_view_bindings + +#import bevy_render::view::View +#import bevy_render::globals::Globals + +@group(0) @binding(0) var view: View; + +@group(0) @binding(1) var globals: Globals; + +@group(0) @binding(2) var dt_lut_texture: texture_3d; +@group(0) @binding(3) var dt_lut_sampler: sampler; + +``` + +### bevy/crates/bevy_sprite/src/mesh2d/mesh2d_vertex_output + +```rust +#define_import_path bevy_sprite::mesh2d_vertex_output + +struct VertexOutput { + // this is `clip position` when the struct is used as a vertex stage output + // and `frag coord` when used as a fragment stage input + @builtin(position) position: vec4, + @location(0) world_position: vec4, + @location(1) world_normal: vec3, + @location(2) uv: vec2, + #ifdef VERTEX_TANGENTS + @location(3) world_tangent: vec4, + #endif + #ifdef VERTEX_COLORS + @location(4) color: vec4, + #endif +} + +``` + +### bevy/crates/bevy_sprite/src/mesh2d/wireframe2d + +```rust +#import bevy_sprite::mesh2d_vertex_output::VertexOutput + +struct WireframeMaterial { + color: vec4, +}; + +@group(2) @binding(0) var material: WireframeMaterial; +@fragment +fn fragment(in: VertexOutput) -> @location(0) vec4 { + return material.color; +} + +``` + +### bevy/crates/bevy_sprite/src/mesh2d/color_material + +```rust +#import bevy_sprite::{ + mesh2d_vertex_output::VertexOutput, + mesh2d_view_bindings::view, +} + +#ifdef TONEMAP_IN_SHADER +#import bevy_core_pipeline::tonemapping +#endif + +struct ColorMaterial { + color: vec4, + // 'flags' is a bit field indicating various options. u32 is 32 bits so we have up to 32 options. + flags: u32, +}; +const COLOR_MATERIAL_FLAGS_TEXTURE_BIT: u32 = 1u; + +@group(2) @binding(0) var material: ColorMaterial; +@group(2) @binding(1) var texture: texture_2d; +@group(2) @binding(2) var texture_sampler: sampler; + +@fragment +fn fragment( + mesh: VertexOutput, +) -> @location(0) vec4 { + var output_color: vec4 = material.color; +#ifdef VERTEX_COLORS + output_color = output_color * mesh.color; +#endif + if ((material.flags & COLOR_MATERIAL_FLAGS_TEXTURE_BIT) != 0u) { + output_color = output_color * textureSample(texture, texture_sampler, mesh.uv); + } +#ifdef TONEMAP_IN_SHADER + output_color = tonemapping::tone_mapping(output_color, view.color_grading); +#endif + return output_color; +} + +``` + +### bevy/crates/bevy_sprite/src/mesh2d/mesh2d + +```rust +#import bevy_sprite::{ + mesh2d_functions as mesh_functions, + mesh2d_vertex_output::VertexOutput, + mesh2d_view_bindings::view, +} + +#ifdef TONEMAP_IN_SHADER +#import bevy_core_pipeline::tonemapping +#endif + +struct Vertex { + @builtin(instance_index) instance_index: u32, +#ifdef VERTEX_POSITIONS + @location(0) position: vec3, +#endif +#ifdef VERTEX_NORMALS + @location(1) normal: vec3, +#endif +#ifdef VERTEX_UVS + @location(2) uv: vec2, +#endif +#ifdef VERTEX_TANGENTS + @location(3) tangent: vec4, +#endif +#ifdef VERTEX_COLORS + @location(4) color: vec4, +#endif +}; + +@vertex +fn vertex(vertex: Vertex) -> VertexOutput { + var out: VertexOutput; +#ifdef VERTEX_UVS + out.uv = vertex.uv; +#endif + +#ifdef VERTEX_POSITIONS + var world_from_local = mesh_functions::get_world_from_local(vertex.instance_index); + out.world_position = mesh_functions::mesh2d_position_local_to_world( + world_from_local, + vec4(vertex.position, 1.0) + ); + out.position = mesh_functions::mesh2d_position_world_to_clip(out.world_position); +#endif + +#ifdef VERTEX_NORMALS + out.world_normal = mesh_functions::mesh2d_normal_local_to_world(vertex.normal, vertex.instance_index); +#endif + +#ifdef VERTEX_TANGENTS + out.world_tangent = mesh_functions::mesh2d_tangent_local_to_world( + world_from_local, + vertex.tangent + ); +#endif + +#ifdef VERTEX_COLORS + out.color = vertex.color; +#endif + return out; +} + +@fragment +fn fragment( + in: VertexOutput, +) -> @location(0) vec4 { +#ifdef VERTEX_COLORS + var color = in.color; +#ifdef TONEMAP_IN_SHADER + color = tonemapping::tone_mapping(color, view.color_grading); +#endif + return color; +#else + return vec4(1.0, 0.0, 1.0, 1.0); +#endif +} + +``` + +### bevy/crates/bevy_sprite/src/mesh2d/mesh2d_functions + +```rust +#define_import_path bevy_sprite::mesh2d_functions + +#import bevy_sprite::{ + mesh2d_view_bindings::view, + mesh2d_bindings::mesh, +} +#import bevy_render::maths::{affine3_to_square, mat2x4_f32_to_mat3x3_unpack} + +fn get_world_from_local(instance_index: u32) -> mat4x4 { + return affine3_to_square(mesh[instance_index].world_from_local); +} + +fn mesh2d_position_local_to_world(world_from_local: mat4x4, vertex_position: vec4) -> vec4 { + return world_from_local * vertex_position; +} + +fn mesh2d_position_world_to_clip(world_position: vec4) -> vec4 { + return view.clip_from_world * world_position; +} + +// NOTE: The intermediate world_position assignment is important +// for precision purposes when using the 'equals' depth comparison +// function. +fn mesh2d_position_local_to_clip(world_from_local: mat4x4, vertex_position: vec4) -> vec4 { + let world_position = mesh2d_position_local_to_world(world_from_local, vertex_position); + return mesh2d_position_world_to_clip(world_position); +} + +fn mesh2d_normal_local_to_world(vertex_normal: vec3, instance_index: u32) -> vec3 { + return mat2x4_f32_to_mat3x3_unpack( + mesh[instance_index].local_from_world_transpose_a, + mesh[instance_index].local_from_world_transpose_b, + ) * vertex_normal; +} + +fn mesh2d_tangent_local_to_world(world_from_local: mat4x4, vertex_tangent: vec4) -> vec4 { + return vec4( + mat3x3( + world_from_local[0].xyz, + world_from_local[1].xyz, + world_from_local[2].xyz + ) * vertex_tangent.xyz, + vertex_tangent.w + ); +} + +``` + +### bevy/crates/bevy_sprite/src/render/sprite + +```rust +#ifdef TONEMAP_IN_SHADER +#import bevy_core_pipeline::tonemapping +#endif + +#import bevy_render::{ + maths::affine3_to_square, + view::View, +} + +#import bevy_sprite::sprite_view_bindings::view + +struct VertexInput { + @builtin(vertex_index) index: u32, + // NOTE: Instance-rate vertex buffer members prefixed with i_ + // NOTE: i_model_transpose_colN are the 3 columns of a 3x4 matrix that is the transpose of the + // affine 4x3 model matrix. + @location(0) i_model_transpose_col0: vec4, + @location(1) i_model_transpose_col1: vec4, + @location(2) i_model_transpose_col2: vec4, + @location(3) i_color: vec4, + @location(4) i_uv_offset_scale: vec4, +} + +struct VertexOutput { + @builtin(position) clip_position: vec4, + @location(0) uv: vec2, + @location(1) @interpolate(flat) color: vec4, +}; + +@vertex +fn vertex(in: VertexInput) -> VertexOutput { + var out: VertexOutput; + + let vertex_position = vec3( + f32(in.index & 0x1u), + f32((in.index & 0x2u) >> 1u), + 0.0 + ); + + out.clip_position = view.clip_from_world * affine3_to_square(mat3x4( + in.i_model_transpose_col0, + in.i_model_transpose_col1, + in.i_model_transpose_col2, + )) * vec4(vertex_position, 1.0); + out.uv = vec2(vertex_position.xy) * in.i_uv_offset_scale.zw + in.i_uv_offset_scale.xy; + out.color = in.i_color; + + return out; +} + +@group(1) @binding(0) var sprite_texture: texture_2d; +@group(1) @binding(1) var sprite_sampler: sampler; + +@fragment +fn fragment(in: VertexOutput) -> @location(0) vec4 { + var color = in.color * textureSample(sprite_texture, sprite_sampler, in.uv); + +#ifdef TONEMAP_IN_SHADER + color = tonemapping::tone_mapping(color, view.color_grading); +#endif + + return color; +} + +``` + +### bevy/crates/bevy_sprite/src/render/sprite_view_bindings + +```rust +#define_import_path bevy_sprite::sprite_view_bindings + +#import bevy_render::view::View + +@group(0) @binding(0) var view: View; + +@group(0) @binding(1) var dt_lut_texture: texture_3d; +@group(0) @binding(2) var dt_lut_sampler: sampler; + + +``` + +### bevy/crates/bevy_gizmos/src/line_joints + +```rust +#import bevy_render::view::View + +@group(0) @binding(0) var view: View; + + +struct LineGizmoUniform { + line_width: f32, + depth_bias: f32, + resolution: u32, +#ifdef SIXTEEN_BYTE_ALIGNMENT + // WebGL2 structs must be 16 byte aligned. + _padding: f32, +#endif +} + +@group(1) @binding(0) var joints_gizmo: LineGizmoUniform; + +struct VertexInput { + @location(0) position_a: vec3, + @location(1) position_b: vec3, + @location(2) position_c: vec3, + @location(3) color: vec4, + @builtin(vertex_index) index: u32, +}; + +struct VertexOutput { + @builtin(position) clip_position: vec4, + @location(0) color: vec4, +}; + +const EPSILON: f32 = 4.88e-04; + +@vertex +fn vertex_bevel(vertex: VertexInput) -> VertexOutput { + var positions = array, 3>( + vec2(0, 0), + vec2(0, 0.5), + vec2(0.5, 0), + ); + var position = positions[vertex.index]; + + var clip_a = view.clip_from_world * vec4(vertex.position_a, 1.); + var clip_b = view.clip_from_world * vec4(vertex.position_b, 1.); + var clip_c = view.clip_from_world * vec4(vertex.position_c, 1.); + + // Manual near plane clipping to avoid errors when doing the perspective divide inside this shader. + clip_a = clip_near_plane(clip_a, clip_c); + clip_b = clip_near_plane(clip_b, clip_a); + clip_c = clip_near_plane(clip_c, clip_b); + clip_a = clip_near_plane(clip_a, clip_c); + + let resolution = view.viewport.zw; + let screen_a = resolution * (0.5 * clip_a.xy / clip_a.w + 0.5); + let screen_b = resolution * (0.5 * clip_b.xy / clip_b.w + 0.5); + let screen_c = resolution * (0.5 * clip_c.xy / clip_c.w + 0.5); + + var color = vertex.color; + var line_width = joints_gizmo.line_width; + +#ifdef PERSPECTIVE + line_width /= clip_b.w; +#endif + + // Line thinness fade from https://acegikmo.com/shapes/docs/#anti-aliasing + if line_width > 0.0 && line_width < 1. { + color.a *= line_width; + line_width = 1.; + } + + let ab = normalize(screen_b - screen_a); + let cb = normalize(screen_b - screen_c); + let ab_norm = vec2(-ab.y, ab.x); + let cb_norm = vec2(cb.y, -cb.x); + let tangent = normalize(ab - cb); + let normal = vec2(-tangent.y, tangent.x); + let sigma = sign(dot(ab + cb, normal)); + + var p0 = line_width * sigma * ab_norm; + var p1 = line_width * sigma * cb_norm; + + let screen = screen_b + position.x * p0 + position.y * p1; + + let depth = depth(clip_b); + + var clip_position = vec4(clip_b.w * ((2. * screen) / resolution - 1.), depth, clip_b.w); + return VertexOutput(clip_position, color); +} + +@vertex +fn vertex_miter(vertex: VertexInput) -> VertexOutput { + var positions = array, 6>( + vec3(0, 0, 0), + vec3(0.5, 0, 0), + vec3(0, 0.5, 0), + vec3(0, 0, 0), + vec3(0, 0.5, 0), + vec3(0, 0, 0.5), + ); + var position = positions[vertex.index]; + + var clip_a = view.clip_from_world * vec4(vertex.position_a, 1.); + var clip_b = view.clip_from_world * vec4(vertex.position_b, 1.); + var clip_c = view.clip_from_world * vec4(vertex.position_c, 1.); + + // Manual near plane clipping to avoid errors when doing the perspective divide inside this shader. + clip_a = clip_near_plane(clip_a, clip_c); + clip_b = clip_near_plane(clip_b, clip_a); + clip_c = clip_near_plane(clip_c, clip_b); + clip_a = clip_near_plane(clip_a, clip_c); + + let resolution = view.viewport.zw; + let screen_a = resolution * (0.5 * clip_a.xy / clip_a.w + 0.5); + let screen_b = resolution * (0.5 * clip_b.xy / clip_b.w + 0.5); + let screen_c = resolution * (0.5 * clip_c.xy / clip_c.w + 0.5); + + var color = vertex.color; + var line_width = joints_gizmo.line_width; + +#ifdef PERSPECTIVE + line_width /= clip_b.w; +#endif + + // Line thinness fade from https://acegikmo.com/shapes/docs/#anti-aliasing + if line_width > 0.0 && line_width < 1. { + color.a *= line_width; + line_width = 1.; + } + + let ab = normalize(screen_b - screen_a); + let cb = normalize(screen_b - screen_c); + let ab_norm = vec2(-ab.y, ab.x); + let cb_norm = vec2(cb.y, -cb.x); + let tangent = normalize(ab - cb); + let normal = vec2(-tangent.y, tangent.x); + let sigma = sign(dot(ab + cb, normal)); + + var p0 = line_width * sigma * ab_norm; + var p1 = line_width * sigma * normal / dot(normal, ab_norm); + var p2 = line_width * sigma * cb_norm; + + var screen = screen_b + position.x * p0 + position.y * p1 + position.z * p2; + + var depth = depth(clip_b); + + var clip_position = vec4(clip_b.w * ((2. * screen) / resolution - 1.), depth, clip_b.w); + return VertexOutput(clip_position, color); +} + +@vertex +fn vertex_round(vertex: VertexInput) -> VertexOutput { + var clip_a = view.clip_from_world * vec4(vertex.position_a, 1.); + var clip_b = view.clip_from_world * vec4(vertex.position_b, 1.); + var clip_c = view.clip_from_world * vec4(vertex.position_c, 1.); + + // Manual near plane clipping to avoid errors when doing the perspective divide inside this shader. + clip_a = clip_near_plane(clip_a, clip_c); + clip_b = clip_near_plane(clip_b, clip_a); + clip_c = clip_near_plane(clip_c, clip_b); + clip_a = clip_near_plane(clip_a, clip_c); + + let resolution = view.viewport.zw; + let screen_a = resolution * (0.5 * clip_a.xy / clip_a.w + 0.5); + let screen_b = resolution * (0.5 * clip_b.xy / clip_b.w + 0.5); + let screen_c = resolution * (0.5 * clip_c.xy / clip_c.w + 0.5); + + var color = vertex.color; + var line_width = joints_gizmo.line_width; + +#ifdef PERSPECTIVE + line_width /= clip_b.w; +#endif + + // Line thinness fade from https://acegikmo.com/shapes/docs/#anti-aliasing + if line_width > 0.0 && line_width < 1. { + color.a *= line_width; + line_width = 1.; + } + + let ab = normalize(screen_b - screen_a); + let cb = normalize(screen_b - screen_c); + let ab_norm = vec2(-ab.y, ab.x); + let cb_norm = vec2(cb.y, -cb.x); + + // We render `joints_gizmo.resolution`triangles. The vertices in each triangle are ordered as follows: + // - 0: The 'center' vertex at `screen_b`. + // - 1: The vertex closer to the ab line. + // - 2: The vertex closer to the cb line. + var in_triangle_index = f32(vertex.index) % 3.0; + var tri_index = floor(f32(vertex.index) / 3.0); + var radius = sign(in_triangle_index) * 0.5 * line_width; + var theta = acos(dot(ab_norm, cb_norm)); + let sigma = sign(dot(ab_norm, cb)); + var angle = theta * (tri_index + in_triangle_index - 1) / f32(joints_gizmo.resolution); + var position_x = sigma * radius * cos(angle); + var position_y = radius * sin(angle); + + var screen = screen_b + position_x * ab_norm + position_y * ab; + + var depth = depth(clip_b); + + var clip_position = vec4(clip_b.w * ((2. * screen) / resolution - 1.), depth, clip_b.w); + return VertexOutput(clip_position, color); +} + +fn clip_near_plane(a: vec4, b: vec4) -> vec4 { + // Move a if a is behind the near plane and b is in front. + if a.z > a.w && b.z <= b.w { + // Interpolate a towards b until it's at the near plane. + let distance_a = a.z - a.w; + let distance_b = b.z - b.w; + // Add an epsilon to the interpolator to ensure that the point is + // not just behind the clip plane due to floating-point imprecision. + let t = distance_a / (distance_a - distance_b) + EPSILON; + return mix(a, b, t); + } + return a; +} + +fn depth(clip: vec4) -> f32 { + var depth: f32; + if joints_gizmo.depth_bias >= 0. { + depth = clip.z * (1. - joints_gizmo.depth_bias); + } else { + // depth * (clip.w / depth)^-depth_bias. So that when -depth_bias is 1.0, this is equal to clip.w + // and when equal to 0.0, it is exactly equal to depth. + // the epsilon is here to prevent the depth from exceeding clip.w when -depth_bias = 1.0 + // clip.w represents the near plane in homogeneous clip space in bevy, having a depth + // of this value means nothing can be in front of this + // The reason this uses an exponential function is that it makes it much easier for the + // user to chose a value that is convenient for them + depth = clip.z * exp2(-joints_gizmo.depth_bias * log2(clip.w / clip.z - EPSILON)); + } + return depth; +} + +struct FragmentInput { + @location(0) color: vec4, +}; + +struct FragmentOutput { + @location(0) color: vec4, +}; + +@fragment +fn fragment(in: FragmentInput) -> FragmentOutput { + // return FragmentOutput(vec4(1, 1, 1, 1)); + return FragmentOutput(in.color); +} + +``` + +### bevy/crates/bevy_gizmos/src/lines + +```rust +// TODO use common view binding +#import bevy_render::view::View + +@group(0) @binding(0) var view: View; + + +struct LineGizmoUniform { + line_width: f32, + depth_bias: f32, +#ifdef SIXTEEN_BYTE_ALIGNMENT + // WebGL2 structs must be 16 byte aligned. + _padding: vec2, +#endif +} + +@group(1) @binding(0) var line_gizmo: LineGizmoUniform; + +struct VertexInput { + @location(0) position_a: vec3, + @location(1) position_b: vec3, + @location(2) color_a: vec4, + @location(3) color_b: vec4, + @builtin(vertex_index) index: u32, +}; + +struct VertexOutput { + @builtin(position) clip_position: vec4, + @location(0) color: vec4, + @location(1) uv: f32, +}; + +const EPSILON: f32 = 4.88e-04; + +@vertex +fn vertex(vertex: VertexInput) -> VertexOutput { + var positions = array, 6>( + vec2(-0.5, 0.), + vec2(-0.5, 1.), + vec2(0.5, 1.), + vec2(-0.5, 0.), + vec2(0.5, 1.), + vec2(0.5, 0.) + ); + let position = positions[vertex.index]; + + // algorithm based on https://wwwtyro.net/2019/11/18/instanced-lines.html + var clip_a = view.clip_from_world * vec4(vertex.position_a, 1.); + var clip_b = view.clip_from_world * vec4(vertex.position_b, 1.); + + // Manual near plane clipping to avoid errors when doing the perspective divide inside this shader. + clip_a = clip_near_plane(clip_a, clip_b); + clip_b = clip_near_plane(clip_b, clip_a); + let clip = mix(clip_a, clip_b, position.y); + + let resolution = view.viewport.zw; + let screen_a = resolution * (0.5 * clip_a.xy / clip_a.w + 0.5); + let screen_b = resolution * (0.5 * clip_b.xy / clip_b.w + 0.5); + + let y_basis = normalize(screen_b - screen_a); + let x_basis = vec2(-y_basis.y, y_basis.x); + + var color = mix(vertex.color_a, vertex.color_b, position.y); + + var line_width = line_gizmo.line_width; + var alpha = 1.; + + var uv: f32; +#ifdef PERSPECTIVE + line_width /= clip.w; + + // get height of near clipping plane in world space + let pos0 = view.view_from_clip * vec4(0, -1, 0, 1); // Bottom of the screen + let pos1 = view.view_from_clip * vec4(0, 1, 0, 1); // Top of the screen + let near_clipping_plane_height = length(pos0.xyz - pos1.xyz); + + // We can't use vertex.position_X because we may have changed the clip positions with clip_near_plane + let position_a = view.world_from_clip * clip_a; + let position_b = view.world_from_clip * clip_b; + let world_distance = length(position_a.xyz - position_b.xyz); + + // Offset to compensate for moved clip positions. If removed dots on lines will slide when position a is ofscreen. + let clipped_offset = length(position_a.xyz - vertex.position_a); + + uv = (clipped_offset + position.y * world_distance) * resolution.y / near_clipping_plane_height / line_gizmo.line_width; +#else + // Get the distance of b to the camera along camera axes + let camera_b = view.view_from_clip * clip_b; + + // This differentiates between orthographic and perspective cameras. + // For orthographic cameras no depth adaptment (depth_adaptment = 1) is needed. + var depth_adaptment: f32; + if (clip_b.w == 1.0) { + depth_adaptment = 1.0; + } + else { + depth_adaptment = -camera_b.z; + } + uv = position.y * depth_adaptment * length(screen_b - screen_a) / line_gizmo.line_width; +#endif + + // Line thinness fade from https://acegikmo.com/shapes/docs/#anti-aliasing + if line_width > 0.0 && line_width < 1. { + color.a *= line_width; + line_width = 1.; + } + + let x_offset = line_width * position.x * x_basis; + let screen = mix(screen_a, screen_b, position.y) + x_offset; + + var depth: f32; + if line_gizmo.depth_bias >= 0. { + depth = clip.z * (1. - line_gizmo.depth_bias); + } else { + // depth * (clip.w / depth)^-depth_bias. So that when -depth_bias is 1.0, this is equal to clip.w + // and when equal to 0.0, it is exactly equal to depth. + // the epsilon is here to prevent the depth from exceeding clip.w when -depth_bias = 1.0 + // clip.w represents the near plane in homogeneous clip space in bevy, having a depth + // of this value means nothing can be in front of this + // The reason this uses an exponential function is that it makes it much easier for the + // user to chose a value that is convenient for them + depth = clip.z * exp2(-line_gizmo.depth_bias * log2(clip.w / clip.z - EPSILON)); + } + + var clip_position = vec4(clip.w * ((2. * screen) / resolution - 1.), depth, clip.w); + + return VertexOutput(clip_position, color, uv); +} + +fn clip_near_plane(a: vec4, b: vec4) -> vec4 { + // Move a if a is behind the near plane and b is in front. + if a.z > a.w && b.z <= b.w { + // Interpolate a towards b until it's at the near plane. + let distance_a = a.z - a.w; + let distance_b = b.z - b.w; + // Add an epsilon to the interpolator to ensure that the point is + // not just behind the clip plane due to floating-point imprecision. + let t = distance_a / (distance_a - distance_b) + EPSILON; + return mix(a, b, t); + } + return a; +} + +struct FragmentInput { + @builtin(position) position: vec4, + @location(0) color: vec4, + @location(1) uv: f32, +}; + +struct FragmentOutput { + @location(0) color: vec4, +}; + +@fragment +fn fragment_solid(in: FragmentInput) -> FragmentOutput { + return FragmentOutput(in.color); +} +@fragment +fn fragment_dotted(in: FragmentInput) -> FragmentOutput { + var alpha: f32; +#ifdef PERSPECTIVE + alpha = 1 - floor(in.uv % 2.0); +#else + alpha = 1 - floor((in.uv * in.position.w) % 2.0); +#endif + + return FragmentOutput(vec4(in.color.xyz, in.color.w * alpha)); +} + +``` + +### bevy/crates/bevy_render/src/globals + +```rust +#define_import_path bevy_render::globals + +struct Globals { + // The time since startup in seconds + // Wraps to 0 after 1 hour. + time: f32, + // The delta time since the previous frame in seconds + delta_time: f32, + // Frame count since the start of the app. + // It wraps to zero when it reaches the maximum value of a u32. + frame_count: u32, +#ifdef SIXTEEN_BYTE_ALIGNMENT + // WebGL2 structs must be 16 byte aligned. + _webgl2_padding: f32 +#endif +}; + +``` + +### bevy/crates/bevy_render/src/maths + +```rust +#define_import_path bevy_render::maths + +const PI: f32 = 3.141592653589793; // π +const PI_2: f32 = 6.283185307179586; // 2π +const HALF_PI: f32 = 1.57079632679; // π/2 +const FRAC_PI_3: f32 = 1.0471975512; // π/3 +const E: f32 = 2.718281828459045; // exp(1) + +fn affine2_to_square(affine: mat3x2) -> mat3x3 { + return mat3x3( + vec3(affine[0].xy, 0.0), + vec3(affine[1].xy, 0.0), + vec3(affine[2].xy, 1.0), + ); +} + +fn affine3_to_square(affine: mat3x4) -> mat4x4 { + return transpose(mat4x4( + affine[0], + affine[1], + affine[2], + vec4(0.0, 0.0, 0.0, 1.0), + )); +} + +fn mat2x4_f32_to_mat3x3_unpack( + a: mat2x4, + b: f32, +) -> mat3x3 { + return mat3x3( + a[0].xyz, + vec3(a[0].w, a[1].xy), + vec3(a[1].zw, b), + ); +} + +// Extracts the square portion of an affine matrix: i.e. discards the +// translation. +fn affine3_to_mat3x3(affine: mat4x3) -> mat3x3 { + return mat3x3(affine[0].xyz, affine[1].xyz, affine[2].xyz); +} + +// Returns the inverse of a 3x3 matrix. +fn inverse_mat3x3(matrix: mat3x3) -> mat3x3 { + let tmp0 = cross(matrix[1], matrix[2]); + let tmp1 = cross(matrix[2], matrix[0]); + let tmp2 = cross(matrix[0], matrix[1]); + let inv_det = 1.0 / dot(matrix[2], tmp2); + return transpose(mat3x3(tmp0 * inv_det, tmp1 * inv_det, tmp2 * inv_det)); +} + +// Returns the inverse of an affine matrix. +// +// https://en.wikipedia.org/wiki/Affine_transformation#Groups +fn inverse_affine3(affine: mat4x3) -> mat4x3 { + let matrix3 = affine3_to_mat3x3(affine); + let inv_matrix3 = inverse_mat3x3(matrix3); + return mat4x3(inv_matrix3[0], inv_matrix3[1], inv_matrix3[2], -(inv_matrix3 * affine[3])); +} + +// Extracts the upper 3x3 portion of a 4x4 matrix. +fn mat4x4_to_mat3x3(m: mat4x4) -> mat3x3 { + return mat3x3(m[0].xyz, m[1].xyz, m[2].xyz); +} + +// Creates an orthonormal basis given a Z vector and an up vector (which becomes +// Y after orthonormalization). +// +// The results are equivalent to the Gram-Schmidt process [1]. +// +// [1]: https://math.stackexchange.com/a/1849294 +fn orthonormalize(z_unnormalized: vec3, up: vec3) -> mat3x3 { + let z_basis = normalize(z_unnormalized); + let x_basis = normalize(cross(z_basis, up)); + let y_basis = cross(z_basis, x_basis); + return mat3x3(x_basis, y_basis, z_basis); +} + +// Returns true if any part of a sphere is on the positive side of a plane. +// +// `sphere_center.w` should be 1.0. +// +// This is used for frustum culling. +fn sphere_intersects_plane_half_space( + plane: vec4, + sphere_center: vec4, + sphere_radius: f32 +) -> bool { + return dot(plane, sphere_center) + sphere_radius > 0.0; +} + +// pow() but safe for NaNs/negatives +fn powsafe(color: vec3, power: f32) -> vec3 { + return pow(abs(color), vec3(power)) * sign(color); +} + +``` + +### bevy/crates/bevy_render/src/color_operations + +```rust +#define_import_path bevy_render::color_operations + +#import bevy_render::maths::FRAC_PI_3 + +// Converts HSV to RGB. +// +// Input: H ∈ [0, 2π), S ∈ [0, 1], V ∈ [0, 1]. +// Output: R ∈ [0, 1], G ∈ [0, 1], B ∈ [0, 1]. +// +// +fn hsv_to_rgb(hsv: vec3) -> vec3 { + let n = vec3(5.0, 3.0, 1.0); + let k = (n + hsv.x / FRAC_PI_3) % 6.0; + return hsv.z - hsv.z * hsv.y * max(vec3(0.0), min(k, min(4.0 - k, vec3(1.0)))); +} + +// Converts RGB to HSV. +// +// Input: R ∈ [0, 1], G ∈ [0, 1], B ∈ [0, 1]. +// Output: H ∈ [0, 2π), S ∈ [0, 1], V ∈ [0, 1]. +// +// +fn rgb_to_hsv(rgb: vec3) -> vec3 { + let x_max = max(rgb.r, max(rgb.g, rgb.b)); // i.e. V + let x_min = min(rgb.r, min(rgb.g, rgb.b)); + let c = x_max - x_min; // chroma + + var swizzle = vec3(0.0); + if (x_max == rgb.r) { + swizzle = vec3(rgb.gb, 0.0); + } else if (x_max == rgb.g) { + swizzle = vec3(rgb.br, 2.0); + } else { + swizzle = vec3(rgb.rg, 4.0); + } + + let h = FRAC_PI_3 * (((swizzle.x - swizzle.y) / c + swizzle.z) % 6.0); + + // Avoid division by zero. + var s = 0.0; + if (x_max > 0.0) { + s = c / x_max; + } + + return vec3(h, s, x_max); +} + + +``` + +### bevy/crates/bevy_render/src/view/view + +```rust +#define_import_path bevy_render::view + +struct ColorGrading { + balance: mat3x3, + saturation: vec3, + contrast: vec3, + gamma: vec3, + gain: vec3, + lift: vec3, + midtone_range: vec2, + exposure: f32, + hue: f32, + post_saturation: f32, +} + +struct View { + clip_from_world: mat4x4, + unjittered_clip_from_world: mat4x4, + world_from_clip: mat4x4, + world_from_view: mat4x4, + view_from_world: mat4x4, + clip_from_view: mat4x4, + view_from_clip: mat4x4, + world_position: vec3, + exposure: f32, + // viewport(x_origin, y_origin, width, height) + viewport: vec4, + frustum: array, 6>, + color_grading: ColorGrading, + mip_bias: f32, +}; + +``` + +### bevy/crates/bevy_render/src/view/window/screenshot + +```rust +// This vertex shader will create a triangle that will cover the entire screen +// with minimal effort, avoiding the need for a vertex buffer etc. +@vertex +fn vs_main(@builtin(vertex_index) in_vertex_index: u32) -> @builtin(position) vec4 { + let x = f32((in_vertex_index & 1u) << 2u); + let y = f32((in_vertex_index & 2u) << 1u); + return vec4(x - 1.0, y - 1.0, 0.0, 1.0); +} + +@group(0) @binding(0) var t: texture_2d; + +@fragment +fn fs_main(@builtin(position) pos: vec4) -> @location(0) vec4 { + let coords = floor(pos.xy); + return textureLoad(t, vec2(coords), 0i); +} + +``` + +### bevy_shaders/cull_clusters + +```rust +#import bevy_pbr::meshlet_bindings::{ + meshlet_cluster_meshlet_ids, + meshlet_bounding_spheres, + meshlet_cluster_instance_ids, + meshlet_instance_uniforms, + meshlet_second_pass_candidates, + depth_pyramid, + view, + previous_view, + should_cull_instance, + cluster_is_second_pass_candidate, + meshlets, + draw_indirect_args, + draw_triangle_buffer, +} +#import bevy_render::maths::affine3_to_square + +/// Culls individual clusters (1 per thread) in two passes (two pass occlusion culling), and outputs a bitmask of which clusters survived. +/// 1. The first pass tests instance visibility, frustum culling, LOD selection, and finally occlusion culling using last frame's depth pyramid. +/// 2. The second pass performs occlusion culling (using the depth buffer generated from the first pass) on all clusters that passed +/// the instance, frustum, and LOD tests in the first pass, but were not visible last frame according to the occlusion culling. + +@compute +@workgroup_size(128, 1, 1) // 128 threads per workgroup, 1 cluster per thread +fn cull_clusters( + @builtin(workgroup_id) workgroup_id: vec3, + @builtin(num_workgroups) num_workgroups: vec3, + @builtin(local_invocation_id) local_invocation_id: vec3, +) { + // Calculate the cluster ID for this thread + let cluster_id = local_invocation_id.x + 128u * dot(workgroup_id, vec3(num_workgroups.x * num_workgroups.x, num_workgroups.x, 1u)); + if cluster_id >= arrayLength(&meshlet_cluster_meshlet_ids) { return; } + +#ifdef MESHLET_SECOND_CULLING_PASS + if !cluster_is_second_pass_candidate(cluster_id) { return; } +#endif + + // Check for instance culling + let instance_id = meshlet_cluster_instance_ids[cluster_id]; +#ifdef MESHLET_FIRST_CULLING_PASS + if should_cull_instance(instance_id) { return; } +#endif + + // Calculate world-space culling bounding sphere for the cluster + let instance_uniform = meshlet_instance_uniforms[instance_id]; + let meshlet_id = meshlet_cluster_meshlet_ids[cluster_id]; + let world_from_local = affine3_to_square(instance_uniform.world_from_local); + let world_scale = max(length(world_from_local[0]), max(length(world_from_local[1]), length(world_from_local[2]))); + let bounding_spheres = meshlet_bounding_spheres[meshlet_id]; + var culling_bounding_sphere_center = world_from_local * vec4(bounding_spheres.self_culling.center, 1.0); + var culling_bounding_sphere_radius = world_scale * bounding_spheres.self_culling.radius; + +#ifdef MESHLET_FIRST_CULLING_PASS + // Frustum culling + // TODO: Faster method from https://vkguide.dev/docs/gpudriven/compute_culling/#frustum-culling-function + for (var i = 0u; i < 6u; i++) { + if dot(view.frustum[i], culling_bounding_sphere_center) + culling_bounding_sphere_radius <= 0.0 { + return; + } + } + + // Calculate view-space LOD bounding sphere for the meshlet + let lod_bounding_sphere_center = world_from_local * vec4(bounding_spheres.self_lod.center, 1.0); + let lod_bounding_sphere_radius = world_scale * bounding_spheres.self_lod.radius; + let lod_bounding_sphere_center_view_space = (view.view_from_world * vec4(lod_bounding_sphere_center.xyz, 1.0)).xyz; + + // Calculate view-space LOD bounding sphere for the meshlet's parent + let parent_lod_bounding_sphere_center = world_from_local * vec4(bounding_spheres.parent_lod.center, 1.0); + let parent_lod_bounding_sphere_radius = world_scale * bounding_spheres.parent_lod.radius; + let parent_lod_bounding_sphere_center_view_space = (view.view_from_world * vec4(parent_lod_bounding_sphere_center.xyz, 1.0)).xyz; + + // Check LOD cut (meshlet error imperceptible, and parent error not imperceptible) + let lod_is_ok = lod_error_is_imperceptible(lod_bounding_sphere_center_view_space, lod_bounding_sphere_radius); + let parent_lod_is_ok = lod_error_is_imperceptible(parent_lod_bounding_sphere_center_view_space, parent_lod_bounding_sphere_radius); + if !lod_is_ok || parent_lod_is_ok { return; } +#endif + + // Project the culling bounding sphere to view-space for occlusion culling +#ifdef MESHLET_FIRST_CULLING_PASS + let previous_world_from_local = affine3_to_square(instance_uniform.previous_world_from_local); + let previous_world_from_local_scale = max(length(previous_world_from_local[0]), max(length(previous_world_from_local[1]), length(previous_world_from_local[2]))); + culling_bounding_sphere_center = previous_world_from_local * vec4(bounding_spheres.self_culling.center, 1.0); + culling_bounding_sphere_radius = previous_world_from_local_scale * bounding_spheres.self_culling.radius; +#endif + let culling_bounding_sphere_center_view_space = (view.view_from_world * vec4(culling_bounding_sphere_center.xyz, 1.0)).xyz; + + let aabb = project_view_space_sphere_to_screen_space_aabb(culling_bounding_sphere_center_view_space, culling_bounding_sphere_radius); + let depth_pyramid_size_mip_0 = vec2(textureDimensions(depth_pyramid, 0)); + let width = (aabb.z - aabb.x) * depth_pyramid_size_mip_0.x; + let height = (aabb.w - aabb.y) * depth_pyramid_size_mip_0.y; + let depth_level = max(0, i32(ceil(log2(max(width, height))))); // TODO: Naga doesn't like this being a u32 + let depth_pyramid_size = vec2(textureDimensions(depth_pyramid, depth_level)); + let aabb_top_left = vec2(aabb.xy * depth_pyramid_size); + + let depth_quad_a = textureLoad(depth_pyramid, aabb_top_left, depth_level).x; + let depth_quad_b = textureLoad(depth_pyramid, aabb_top_left + vec2(1u, 0u), depth_level).x; + let depth_quad_c = textureLoad(depth_pyramid, aabb_top_left + vec2(0u, 1u), depth_level).x; + let depth_quad_d = textureLoad(depth_pyramid, aabb_top_left + vec2(1u, 1u), depth_level).x; + let occluder_depth = min(min(depth_quad_a, depth_quad_b), min(depth_quad_c, depth_quad_d)); + + // Check whether or not the cluster would be occluded if drawn + var cluster_visible: bool; + if view.clip_from_view[3][3] == 1.0 { + // Orthographic + let sphere_depth = view.clip_from_view[3][2] + (culling_bounding_sphere_center_view_space.z + culling_bounding_sphere_radius) * view.clip_from_view[2][2]; + cluster_visible = sphere_depth >= occluder_depth; + } else { + // Perspective + let sphere_depth = -view.clip_from_view[3][2] / (culling_bounding_sphere_center_view_space.z + culling_bounding_sphere_radius); + cluster_visible = sphere_depth >= occluder_depth; + } + + // Write if the cluster should be occlusion tested in the second pass +#ifdef MESHLET_FIRST_CULLING_PASS + if !cluster_visible { + let bit = 1u << cluster_id % 32u; + atomicOr(&meshlet_second_pass_candidates[cluster_id / 32u], bit); + } +#endif + + // Append a list of this cluster's triangles to draw if not culled + if cluster_visible { + let meshlet_triangle_count = meshlets[meshlet_id].triangle_count; + let buffer_start = atomicAdd(&draw_indirect_args.vertex_count, meshlet_triangle_count * 3u) / 3u; + let cluster_id_packed = cluster_id << 6u; + for (var triangle_id = 0u; triangle_id < meshlet_triangle_count; triangle_id++) { + draw_triangle_buffer[buffer_start + triangle_id] = cluster_id_packed | triangle_id; + } + } +} + +// https://stackoverflow.com/questions/21648630/radius-of-projected-sphere-in-screen-space/21649403#21649403 +fn lod_error_is_imperceptible(cp: vec3, r: f32) -> bool { + let d2 = dot(cp, cp); + let r2 = r * r; + let sphere_diameter_uv = view.clip_from_view[0][0] * r / sqrt(d2 - r2); + let view_size = f32(max(view.viewport.z, view.viewport.w)); + let sphere_diameter_pixels = sphere_diameter_uv * view_size; + return sphere_diameter_pixels < 1.0; +} + +// https://zeux.io/2023/01/12/approximate-projected-bounds +fn project_view_space_sphere_to_screen_space_aabb(cp: vec3, r: f32) -> vec4 { + let inv_width = view.clip_from_view[0][0] * 0.5; + let inv_height = view.clip_from_view[1][1] * 0.5; + if view.clip_from_view[3][3] == 1.0 { + // Orthographic + let min_x = cp.x - r; + let max_x = cp.x + r; + + let min_y = cp.y - r; + let max_y = cp.y + r; + + return vec4(min_x * inv_width, 1.0 - max_y * inv_height, max_x * inv_width, 1.0 - min_y * inv_height); + } else { + // Perspective + let c = vec3(cp.xy, -cp.z); + let cr = c * r; + let czr2 = c.z * c.z - r * r; + + let vx = sqrt(c.x * c.x + czr2); + let min_x = (vx * c.x - cr.z) / (vx * c.z + cr.x); + let max_x = (vx * c.x + cr.z) / (vx * c.z - cr.x); + + let vy = sqrt(c.y * c.y + czr2); + let min_y = (vy * c.y - cr.z) / (vy * c.z + cr.y); + let max_y = (vy * c.y + cr.z) / (vy * c.z - cr.y); + + return vec4(min_x * inv_width, -max_y * inv_height, max_x * inv_width, -min_y * inv_height) + vec4(0.5); + } +} + +``` + +### bevy_shaders/smaa + +```rust +/** + * Copyright (C) 2013 Jorge Jimenez (jorge@iryoku.com) + * Copyright (C) 2013 Jose I. Echevarria (joseignacioechevarria@gmail.com) + * Copyright (C) 2013 Belen Masia (bmasia@unizar.es) + * Copyright (C) 2013 Fernando Navarro (fernandn@microsoft.com) + * Copyright (C) 2013 Diego Gutierrez (diegog@unizar.es) + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies + * of the Software, and to permit persons to whom the Software is furnished to + * do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. As clarification, there + * is no requirement that the copyright notice and permission be included in + * binary distributions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +/** + * _______ ___ ___ ___ ___ + * / || \/ | / \ / \ + * | (---- | \ / | / ^ \ / ^ \ + * \ \ | |\/| | / /_\ \ / /_\ \ + * ----) | | | | | / _____ \ / _____ \ + * |_______/ |__| |__| /__/ \__\ /__/ \__\ + * + * E N H A N C E D + * S U B P I X E L M O R P H O L O G I C A L A N T I A L I A S I N G + * + * http://www.iryoku.com/smaa/ + * + * Hi, welcome aboard! + * + * Here you'll find instructions to get the shader up and running as fast as + * possible. + * + * IMPORTANTE NOTICE: when updating, remember to update both this file and the + * precomputed textures! They may change from version to version. + * + * The shader has three passes, chained together as follows: + * + * |input|------------------� + * v | + * [ SMAA*EdgeDetection ] | + * v | + * |edgesTex| | + * v | + * [ SMAABlendingWeightCalculation ] | + * v | + * |blendTex| | + * v | + * [ SMAANeighborhoodBlending ] <------� + * v + * |output| + * + * Note that each [pass] has its own vertex and pixel shader. Remember to use + * oversized triangles instead of quads to avoid overshading along the + * diagonal. + * + * You've three edge detection methods to choose from: luma, color or depth. + * They represent different quality/performance and anti-aliasing/sharpness + * tradeoffs, so our recommendation is for you to choose the one that best + * suits your particular scenario: + * + * - Depth edge detection is usually the fastest but it may miss some edges. + * + * - Luma edge detection is usually more expensive than depth edge detection, + * but catches visible edges that depth edge detection can miss. + * + * - Color edge detection is usually the most expensive one but catches + * chroma-only edges. + * + * For quickstarters: just use luma edge detection. + * + * The general advice is to not rush the integration process and ensure each + * step is done correctly (don't try to integrate SMAA T2x with predicated edge + * detection from the start!). Ok then, let's go! + * + * 1. The first step is to create two RGBA temporal render targets for holding + * |edgesTex| and |blendTex|. + * + * In DX10 or DX11, you can use a RG render target for the edges texture. + * In the case of NVIDIA GPUs, using RG render targets seems to actually be + * slower. + * + * On the Xbox 360, you can use the same render target for resolving both + * |edgesTex| and |blendTex|, as they aren't needed simultaneously. + * + * 2. Both temporal render targets |edgesTex| and |blendTex| must be cleared + * each frame. Do not forget to clear the alpha channel! + * + * 3. The next step is loading the two supporting precalculated textures, + * 'areaTex' and 'searchTex'. You'll find them in the 'Textures' folder as + * C++ headers, and also as regular DDS files. They'll be needed for the + * 'SMAABlendingWeightCalculation' pass. + * + * If you use the C++ headers, be sure to load them in the format specified + * inside of them. + * + * You can also compress 'areaTex' and 'searchTex' using BC5 and BC4 + * respectively, if you have that option in your content processor pipeline. + * When compressing then, you get a non-perceptible quality decrease, and a + * marginal performance increase. + * + * 4. All samplers must be set to linear filtering and clamp. + * + * After you get the technique working, remember that 64-bit inputs have + * half-rate linear filtering on GCN. + * + * If SMAA is applied to 64-bit color buffers, switching to point filtering + * when accessing them will increase the performance. Search for + * 'SMAASamplePoint' to see which textures may benefit from point + * filtering, and where (which is basically the color input in the edge + * detection and resolve passes). + * + * 5. All texture reads and buffer writes must be non-sRGB, with the exception + * of the input read and the output write in + * 'SMAANeighborhoodBlending' (and only in this pass!). If sRGB reads in + * this last pass are not possible, the technique will work anyway, but + * will perform antialiasing in gamma space. + * + * IMPORTANT: for best results the input read for the color/luma edge + * detection should *NOT* be sRGB. + * + * 6. Before including SMAA.h you'll have to setup the render target metrics, + * the target and any optional configuration defines. Optionally you can + * use a preset. + * + * You have the following targets available: + * SMAA_HLSL_3 + * SMAA_HLSL_4 + * SMAA_HLSL_4_1 + * SMAA_GLSL_3 * + * SMAA_GLSL_4 * + * + * * (See SMAA_INCLUDE_VS and SMAA_INCLUDE_PS below). + * + * And four presets: + * SMAA_PRESET_LOW (%60 of the quality) + * SMAA_PRESET_MEDIUM (%80 of the quality) + * SMAA_PRESET_HIGH (%95 of the quality) + * SMAA_PRESET_ULTRA (%99 of the quality) + * + * For example: + * #define SMAA_RT_METRICS float4(1.0 / 1280.0, 1.0 / 720.0, 1280.0, 720.0) + * #define SMAA_HLSL_4 + * #define SMAA_PRESET_HIGH + * #include "SMAA.h" + * + * Note that SMAA_RT_METRICS doesn't need to be a macro, it can be a + * uniform variable. The code is designed to minimize the impact of not + * using a constant value, but it is still better to hardcode it. + * + * Depending on how you encoded 'areaTex' and 'searchTex', you may have to + * add (and customize) the following defines before including SMAA.h: + * #define SMAA_AREATEX_SELECT(sample) sample.rg + * #define SMAA_SEARCHTEX_SELECT(sample) sample.r + * + * If your engine is already using porting macros, you can define + * SMAA_CUSTOM_SL, and define the porting functions by yourself. + * + * 7. Then, you'll have to setup the passes as indicated in the scheme above. + * You can take a look into SMAA.fx, to see how we did it for our demo. + * Checkout the function wrappers, you may want to copy-paste them! + * + * 8. It's recommended to validate the produced |edgesTex| and |blendTex|. + * You can use a screenshot from your engine to compare the |edgesTex| + * and |blendTex| produced inside of the engine with the results obtained + * with the reference demo. + * + * 9. After you get the last pass to work, it's time to optimize. You'll have + * to initialize a stencil buffer in the first pass (discard is already in + * the code), then mask execution by using it the second pass. The last + * pass should be executed in all pixels. + * + * + * After this point you can choose to enable predicated thresholding, + * temporal supersampling and motion blur integration: + * + * a) If you want to use predicated thresholding, take a look into + * SMAA_PREDICATION; you'll need to pass an extra texture in the edge + * detection pass. + * + * b) If you want to enable temporal supersampling (SMAA T2x): + * + * 1. The first step is to render using subpixel jitters. I won't go into + * detail, but it's as simple as moving each vertex position in the + * vertex shader, you can check how we do it in our DX10 demo. + * + * 2. Then, you must setup the temporal resolve. You may want to take a look + * into SMAAResolve for resolving 2x modes. After you get it working, you'll + * probably see ghosting everywhere. But fear not, you can enable the + * CryENGINE temporal reprojection by setting the SMAA_REPROJECTION macro. + * Check out SMAA_DECODE_VELOCITY if your velocity buffer is encoded. + * + * 3. The next step is to apply SMAA to each subpixel jittered frame, just as + * done for 1x. + * + * 4. At this point you should already have something usable, but for best + * results the proper area textures must be set depending on current jitter. + * For this, the parameter 'subsampleIndices' of + * 'SMAABlendingWeightCalculationPS' must be set as follows, for our T2x + * mode: + * + * @SUBSAMPLE_INDICES + * + * | S# | Camera Jitter | subsampleIndices | + * +----+------------------+---------------------+ + * | 0 | ( 0.25, -0.25) | float4(1, 1, 1, 0) | + * | 1 | (-0.25, 0.25) | float4(2, 2, 2, 0) | + * + * These jitter positions assume a bottom-to-top y axis. S# stands for the + * sample number. + * + * More information about temporal supersampling here: + * http://iryoku.com/aacourse/downloads/13-Anti-Aliasing-Methods-in-CryENGINE-3.pdf + * + * c) If you want to enable spatial multisampling (SMAA S2x): + * + * 1. The scene must be rendered using MSAA 2x. The MSAA 2x buffer must be + * created with: + * - DX10: see below (*) + * - DX10.1: D3D10_STANDARD_MULTISAMPLE_PATTERN or + * - DX11: D3D11_STANDARD_MULTISAMPLE_PATTERN + * + * This allows to ensure that the subsample order matches the table in + * @SUBSAMPLE_INDICES. + * + * (*) In the case of DX10, we refer the reader to: + * - SMAA::detectMSAAOrder and + * - SMAA::msaaReorder + * + * These functions allow to match the standard multisample patterns by + * detecting the subsample order for a specific GPU, and reordering + * them appropriately. + * + * 2. A shader must be run to output each subsample into a separate buffer + * (DX10 is required). You can use SMAASeparate for this purpose, or just do + * it in an existing pass (for example, in the tone mapping pass, which has + * the advantage of feeding tone mapped subsamples to SMAA, which will yield + * better results). + * + * 3. The full SMAA 1x pipeline must be run for each separated buffer, storing + * the results in the final buffer. The second run should alpha blend with + * the existing final buffer using a blending factor of 0.5. + * 'subsampleIndices' must be adjusted as in the SMAA T2x case (see point + * b). + * + * d) If you want to enable temporal supersampling on top of SMAA S2x + * (which actually is SMAA 4x): + * + * 1. SMAA 4x consists on temporally jittering SMAA S2x, so the first step is + * to calculate SMAA S2x for current frame. In this case, 'subsampleIndices' + * must be set as follows: + * + * | F# | S# | Camera Jitter | Net Jitter | subsampleIndices | + * +----+----+--------------------+-------------------+----------------------+ + * | 0 | 0 | ( 0.125, 0.125) | ( 0.375, -0.125) | float4(5, 3, 1, 3) | + * | 0 | 1 | ( 0.125, 0.125) | (-0.125, 0.375) | float4(4, 6, 2, 3) | + * +----+----+--------------------+-------------------+----------------------+ + * | 1 | 2 | (-0.125, -0.125) | ( 0.125, -0.375) | float4(3, 5, 1, 4) | + * | 1 | 3 | (-0.125, -0.125) | (-0.375, 0.125) | float4(6, 4, 2, 4) | + * + * These jitter positions assume a bottom-to-top y axis. F# stands for the + * frame number. S# stands for the sample number. + * + * 2. After calculating SMAA S2x for current frame (with the new subsample + * indices), previous frame must be reprojected as in SMAA T2x mode (see + * point b). + * + * e) If motion blur is used, you may want to do the edge detection pass + * together with motion blur. This has two advantages: + * + * 1. Pixels under heavy motion can be omitted from the edge detection process. + * For these pixels we can just store "no edge", as motion blur will take + * care of them. + * 2. The center pixel tap is reused. + * + * Note that in this case depth testing should be used instead of stenciling, + * as we have to write all the pixels in the motion blur pass. + * + * That's it! + */ + +struct SmaaInfo { + rt_metrics: vec4, +} + +struct VertexVaryings { + clip_coord: vec2, + tex_coord: vec2, +} + +struct EdgeDetectionVaryings { + @builtin(position) position: vec4, + @location(0) offset_0: vec4, + @location(1) offset_1: vec4, + @location(2) offset_2: vec4, + @location(3) tex_coord: vec2, +} + +struct BlendingWeightCalculationVaryings { + @builtin(position) position: vec4, + @location(0) offset_0: vec4, + @location(1) offset_1: vec4, + @location(2) offset_2: vec4, + @location(3) tex_coord: vec2, +} + +struct NeighborhoodBlendingVaryings { + @builtin(position) position: vec4, + @location(0) offset: vec4, + @location(1) tex_coord: vec2, +} + +@group(0) @binding(0) var color_texture: texture_2d; +@group(0) @binding(1) var smaa_info: SmaaInfo; + +#ifdef SMAA_EDGE_DETECTION +@group(1) @binding(0) var color_sampler: sampler; +#endif // SMAA_EDGE_DETECTION + +#ifdef SMAA_BLENDING_WEIGHT_CALCULATION +@group(1) @binding(0) var edges_texture: texture_2d; +@group(1) @binding(1) var edges_sampler: sampler; +@group(1) @binding(2) var search_texture: texture_2d; +@group(1) @binding(3) var area_texture: texture_2d; +#endif // SMAA_BLENDING_WEIGHT_CALCULATION + +#ifdef SMAA_NEIGHBORHOOD_BLENDING +@group(1) @binding(0) var blend_texture: texture_2d; +@group(1) @binding(1) var blend_sampler: sampler; +#endif // SMAA_NEIGHBORHOOD_BLENDING + +//----------------------------------------------------------------------------- +// SMAA Presets + +#ifdef SMAA_PRESET_LOW +const SMAA_THRESHOLD: f32 = 0.15; +const SMAA_MAX_SEARCH_STEPS: u32 = 4u; +#define SMAA_DISABLE_DIAG_DETECTION +#define SMAA_DISABLE_CORNER_DETECTION +#else ifdef SMAA_PRESET_MEDIUM // SMAA_PRESET_LOW +const SMAA_THRESHOLD: f32 = 0.1; +const SMAA_MAX_SEARCH_STEPS: u32 = 8u; +#define SMAA_DISABLE_DIAG_DETECTION +#define SMAA_DISABLE_CORNER_DETECTION +#else ifdef SMAA_PRESET_HIGH // SMAA_PRESET_MEDIUM +const SMAA_THRESHOLD: f32 = 0.1; +const SMAA_MAX_SEARCH_STEPS: u32 = 16u; +const SMAA_MAX_SEARCH_STEPS_DIAG: u32 = 8u; +const SMAA_CORNER_ROUNDING: u32 = 25u; +#else ifdef SMAA_PRESET_ULTRA // SMAA_PRESET_HIGH +const SMAA_THRESHOLD: f32 = 0.05; +const SMAA_MAX_SEARCH_STEPS: u32 = 32u; +const SMAA_MAX_SEARCH_STEPS_DIAG: u32 = 16u; +const SMAA_CORNER_ROUNDING: u32 = 25u; +#else // SMAA_PRESET_ULTRA +const SMAA_THRESHOLD: f32 = 0.1; +const SMAA_MAX_SEARCH_STEPS: u32 = 16u; +const SMAA_MAX_SEARCH_STEPS_DIAG: u32 = 8u; +const SMAA_CORNER_ROUNDING: u32 = 25u; +#endif // SMAA_PRESET_ULTRA + +//----------------------------------------------------------------------------- +// Configurable Defines + +/** + * SMAA_THRESHOLD specifies the threshold or sensitivity to edges. + * Lowering this value you will be able to detect more edges at the expense of + * performance. + * + * Range: [0, 0.5] + * 0.1 is a reasonable value, and allows to catch most visible edges. + * 0.05 is a rather overkill value, that allows to catch 'em all. + * + * If temporal supersampling is used, 0.2 could be a reasonable value, as low + * contrast edges are properly filtered by just 2x. + */ +// (In the WGSL version of this shader, `SMAA_THRESHOLD` is set above, in "SMAA +// Presets".) + +/** + * SMAA_MAX_SEARCH_STEPS specifies the maximum steps performed in the + * horizontal/vertical pattern searches, at each side of the pixel. + * + * In number of pixels, it's actually the double. So the maximum line length + * perfectly handled by, for example 16, is 64 (by perfectly, we meant that + * longer lines won't look as good, but still antialiased). + * + * Range: [0, 112] + */ +// (In the WGSL version of this shader, `SMAA_MAX_SEARCH_STEPS` is set above, in +// "SMAA Presets".) + +/** + * SMAA_MAX_SEARCH_STEPS_DIAG specifies the maximum steps performed in the + * diagonal pattern searches, at each side of the pixel. In this case we jump + * one pixel at time, instead of two. + * + * Range: [0, 20] + * + * On high-end machines it is cheap (between a 0.8x and 0.9x slower for 16 + * steps), but it can have a significant impact on older machines. + * + * Define SMAA_DISABLE_DIAG_DETECTION to disable diagonal processing. + */ +// (In the WGSL version of this shader, `SMAA_MAX_SEARCH_STEPS_DIAG` is set +// above, in "SMAA Presets".) + +/** + * SMAA_CORNER_ROUNDING specifies how much sharp corners will be rounded. + * + * Range: [0, 100] + * + * Define SMAA_DISABLE_CORNER_DETECTION to disable corner processing. + */ +// (In the WGSL version of this shader, `SMAA_CORNER_ROUNDING` is set above, in +// "SMAA Presets".) + +/** + * If there is an neighbor edge that has SMAA_LOCAL_CONTRAST_FACTOR times + * bigger contrast than current edge, current edge will be discarded. + * + * This allows to eliminate spurious crossing edges, and is based on the fact + * that, if there is too much contrast in a direction, that will hide + * perceptually contrast in the other neighbors. + */ +const SMAA_LOCAL_CONTRAST_ADAPTATION_FACTOR: f32 = 2.0; + +//----------------------------------------------------------------------------- +// Non-Configurable Defines + +const SMAA_AREATEX_MAX_DISTANCE: f32 = 16.0; +const SMAA_AREATEX_MAX_DISTANCE_DIAG: f32 = 20.0; +const SMAA_AREATEX_PIXEL_SIZE: vec2 = (1.0 / vec2(160.0, 560.0)); +const SMAA_AREATEX_SUBTEX_SIZE: f32 = (1.0 / 7.0); +const SMAA_SEARCHTEX_SIZE: vec2 = vec2(66.0, 33.0); +const SMAA_SEARCHTEX_PACKED_SIZE: vec2 = vec2(64.0, 16.0); + +#ifndef SMAA_DISABLE_CORNER_DETECTION +const SMAA_CORNER_ROUNDING_NORM: f32 = f32(SMAA_CORNER_ROUNDING) / 100.0; +#endif // SMAA_DISABLE_CORNER_DETECTION + +//----------------------------------------------------------------------------- +// WGSL-Specific Functions + +// This vertex shader produces the following, when drawn using indices 0..3: +// +// 1 | 0-----x.....2 +// 0 | | s | . ´ +// -1 | x_____x´ +// -2 | : .´ +// -3 | 1´ +// +--------------- +// -1 0 1 2 3 +// +// The axes are clip-space x and y. The region marked s is the visible region. +// The digits in the corners of the right-angled triangle are the vertex +// indices. +// +// The top-left has UV 0,0, the bottom-left has 0,2, and the top-right has 2,0. +// This means that the UV gets interpolated to 1,1 at the bottom-right corner +// of the clip-space rectangle that is at 1,-1 in clip space. +fn calculate_vertex_varyings(vertex_index: u32) -> VertexVaryings { + // See the explanation above for how this works + let uv = vec2(f32(vertex_index >> 1u), f32(vertex_index & 1u)) * 2.0; + let clip_position = vec2(uv * vec2(2.0, -2.0) + vec2(-1.0, 1.0)); + + return VertexVaryings(clip_position, uv); +} + +//----------------------------------------------------------------------------- +// Vertex Shaders + +#ifdef SMAA_EDGE_DETECTION + +/** + * Edge Detection Vertex Shader + */ +@vertex +fn edge_detection_vertex_main(@builtin(vertex_index) vertex_index: u32) -> EdgeDetectionVaryings { + let varyings = calculate_vertex_varyings(vertex_index); + + var edge_detection_varyings = EdgeDetectionVaryings(); + edge_detection_varyings.position = vec4(varyings.clip_coord, 0.0, 1.0); + edge_detection_varyings.tex_coord = varyings.tex_coord; + + edge_detection_varyings.offset_0 = smaa_info.rt_metrics.xyxy * vec4(-1.0, 0.0, 0.0, -1.0) + + varyings.tex_coord.xyxy; + edge_detection_varyings.offset_1 = smaa_info.rt_metrics.xyxy * vec4(1.0, 0.0, 0.0, 1.0) + + varyings.tex_coord.xyxy; + edge_detection_varyings.offset_2 = smaa_info.rt_metrics.xyxy * vec4(-2.0, 0.0, 0.0, -2.0) + + varyings.tex_coord.xyxy; + + return edge_detection_varyings; +} + +#endif // SMAA_EDGE_DETECTION + +#ifdef SMAA_BLENDING_WEIGHT_CALCULATION + +/** + * Blend Weight Calculation Vertex Shader + */ +@vertex +fn blending_weight_calculation_vertex_main(@builtin(vertex_index) vertex_index: u32) + -> BlendingWeightCalculationVaryings { + let varyings = calculate_vertex_varyings(vertex_index); + + var weight_varyings = BlendingWeightCalculationVaryings(); + weight_varyings.position = vec4(varyings.clip_coord, 0.0, 1.0); + weight_varyings.tex_coord = varyings.tex_coord; + + // We will use these offsets for the searches later on (see @PSEUDO_GATHER4): + weight_varyings.offset_0 = smaa_info.rt_metrics.xyxy * vec4(-0.25, -0.125, 1.25, -0.125) + + varyings.tex_coord.xyxy; + weight_varyings.offset_1 = smaa_info.rt_metrics.xyxy * vec4(-0.125, -0.25, -0.125, 1.25) + + varyings.tex_coord.xyxy; + + // And these for the searches, they indicate the ends of the loops: + weight_varyings.offset_2 = + smaa_info.rt_metrics.xxyy * vec4(-2.0, 2.0, -2.0, 2.0) * f32(SMAA_MAX_SEARCH_STEPS) + + vec4(weight_varyings.offset_0.xz, weight_varyings.offset_1.yw); + + return weight_varyings; +} + +#endif // SMAA_BLENDING_WEIGHT_CALCULATION + +#ifdef SMAA_NEIGHBORHOOD_BLENDING + +/** + * Neighborhood Blending Vertex Shader + */ +@vertex +fn neighborhood_blending_vertex_main(@builtin(vertex_index) vertex_index: u32) + -> NeighborhoodBlendingVaryings { + let varyings = calculate_vertex_varyings(vertex_index); + let offset = smaa_info.rt_metrics.xyxy * vec4(1.0, 0.0, 0.0, 1.0) + varyings.tex_coord.xyxy; + return NeighborhoodBlendingVaryings( + vec4(varyings.clip_coord, 0.0, 1.0), + offset, + varyings.tex_coord + ); +} + +#endif // SMAA_NEIGHBORHOOD_BLENDING + +//----------------------------------------------------------------------------- +// Edge Detection Pixel Shaders (First Pass) + +#ifdef SMAA_EDGE_DETECTION + +/** + * Luma Edge Detection + * + * IMPORTANT NOTICE: luma edge detection requires gamma-corrected colors, and + * thus 'color_texture' should be a non-sRGB texture. + */ +@fragment +fn luma_edge_detection_fragment_main(in: EdgeDetectionVaryings) -> @location(0) vec4 { + // Calculate the threshold: + // TODO: Predication. + let threshold = vec2(SMAA_THRESHOLD); + + // Calculate luma: + let weights = vec3(0.2126, 0.7152, 0.0722); + let L = dot(textureSample(color_texture, color_sampler, in.tex_coord).rgb, weights); + + let Lleft = dot(textureSample(color_texture, color_sampler, in.offset_0.xy).rgb, weights); + let Ltop = dot(textureSample(color_texture, color_sampler, in.offset_0.zw).rgb, weights); + + // We do the usual threshold: + var delta: vec4 = vec4(abs(L - vec2(Lleft, Ltop)), 0.0, 0.0); + var edges = step(threshold, delta.xy); + + // Then discard if there is no edge: + if (dot(edges, vec2(1.0)) == 0.0) { + discard; + } + + // Calculate right and bottom deltas: + let Lright = dot(textureSample(color_texture, color_sampler, in.offset_1.xy).rgb, weights); + let Lbottom = dot(textureSample(color_texture, color_sampler, in.offset_1.zw).rgb, weights); + delta = vec4(delta.xy, abs(L - vec2(Lright, Lbottom))); + + // Calculate the maximum delta in the direct neighborhood: + var max_delta = max(delta.xy, delta.zw); + + // Calculate left-left and top-top deltas: + let Lleftleft = dot(textureSample(color_texture, color_sampler, in.offset_2.xy).rgb, weights); + let Ltoptop = dot(textureSample(color_texture, color_sampler, in.offset_2.zw).rgb, weights); + delta = vec4(delta.xy, abs(vec2(Lleft, Ltop) - vec2(Lleftleft, Ltoptop))); + + // Calculate the final maximum delta: + max_delta = max(max_delta.xy, delta.zw); + let final_delta = max(max_delta.x, max_delta.y); + + // Local contrast adaptation: + edges *= step(vec2(final_delta), SMAA_LOCAL_CONTRAST_ADAPTATION_FACTOR * delta.xy); + + return vec4(edges, 0.0, 1.0); +} + +#endif // SMAA_EDGE_DETECTION + +#ifdef SMAA_BLENDING_WEIGHT_CALCULATION + +//----------------------------------------------------------------------------- +// Diagonal Search Functions + +#ifndef SMAA_DISABLE_DIAG_DETECTION + +/** + * Allows to decode two binary values from a bilinear-filtered access. + */ +fn decode_diag_bilinear_access_2(in_e: vec2) -> vec2 { + // Bilinear access for fetching 'e' have a 0.25 offset, and we are + // interested in the R and G edges: + // + // +---G---+-------+ + // | x o R x | + // +-------+-------+ + // + // Then, if one of these edge is enabled: + // Red: (0.75 * X + 0.25 * 1) => 0.25 or 1.0 + // Green: (0.75 * 1 + 0.25 * X) => 0.75 or 1.0 + // + // This function will unpack the values (mad + mul + round): + // wolframalpha.com: round(x * abs(5 * x - 5 * 0.75)) plot 0 to 1 + var e = in_e; + e.r = e.r * abs(5.0 * e.r - 5.0 * 0.75); + return round(e); +} + +fn decode_diag_bilinear_access_4(e: vec4) -> vec4 { + let e_rb = e.rb * abs(5.0 * e.rb - 5.0 * 0.75); + return round(vec4(e_rb.x, e.g, e_rb.y, e.a)); +} + +/** + * These functions allows to perform diagonal pattern searches. + */ +fn search_diag_1(tex_coord: vec2, dir: vec2, e: ptr>) -> vec2 { + var coord = vec4(tex_coord, -1.0, 1.0); + let t = vec3(smaa_info.rt_metrics.xy, 1.0); + while (coord.z < f32(SMAA_MAX_SEARCH_STEPS_DIAG - 1u) && coord.w > 0.9) { + coord = vec4(t * vec3(dir, 1.0) + coord.xyz, coord.w); + *e = textureSampleLevel(edges_texture, edges_sampler, coord.xy, 0.0).rg; + coord.w = dot(*e, vec2(0.5)); + } + return coord.zw; +} + +fn search_diag_2(tex_coord: vec2, dir: vec2, e: ptr>) -> vec2 { + var coord = vec4(tex_coord, -1.0, 1.0); + coord.x += 0.25 * smaa_info.rt_metrics.x; // See @SearchDiag2Optimization + let t = vec3(smaa_info.rt_metrics.xy, 1.0); + while (coord.z < f32(SMAA_MAX_SEARCH_STEPS_DIAG - 1u) && coord.w > 0.9) { + coord = vec4(t * vec3(dir, 1.0) + coord.xyz, coord.w); + + // @SearchDiag2Optimization + // Fetch both edges at once using bilinear filtering: + *e = textureSampleLevel(edges_texture, edges_sampler, coord.xy, 0.0).rg; + *e = decode_diag_bilinear_access_2(*e); + + // Non-optimized version: + // e.g = SMAASampleLevelZero(edgesTex, coord.xy).g; + // e.r = SMAASampleLevelZeroOffset(edgesTex, coord.xy, int2(1, 0)).r; + + coord.w = dot(*e, vec2(0.5)); + } + return coord.zw; +} + +/** + * Similar to SMAAArea, this calculates the area corresponding to a certain + * diagonal distance and crossing edges 'e'. + */ +fn area_diag(dist: vec2, e: vec2, offset: f32) -> vec2 { + var tex_coord = vec2(SMAA_AREATEX_MAX_DISTANCE_DIAG) * e + dist; + + // We do a scale and bias for mapping to texel space: + tex_coord = SMAA_AREATEX_PIXEL_SIZE * tex_coord + 0.5 * SMAA_AREATEX_PIXEL_SIZE; + + // Diagonal areas are on the second half of the texture: + tex_coord.x += 0.5; + + // Move to proper place, according to the subpixel offset: + tex_coord.y += SMAA_AREATEX_SUBTEX_SIZE * offset; + + // Do it! + return textureSampleLevel(area_texture, edges_sampler, tex_coord, 0.0).rg; +} + +/** + * This searches for diagonal patterns and returns the corresponding weights. + */ +fn calculate_diag_weights(tex_coord: vec2, e: vec2, subsample_indices: vec4) + -> vec2 { + var weights = vec2(0.0, 0.0); + + // Search for the line ends: + var d = vec4(0.0); + var end = vec2(0.0); + if (e.r > 0.0) { + let d_xz = search_diag_1(tex_coord, vec2(-1.0, 1.0), &end); + d = vec4(d_xz.x, d.y, d_xz.y, d.w); + d.x += f32(end.y > 0.9); + } else { + d = vec4(0.0, d.y, 0.0, d.w); + } + let d_yw = search_diag_1(tex_coord, vec2(1.0, -1.0), &end); + d = vec4(d.x, d_yw.x, d.y, d_yw.y); + + if (d.x + d.y > 2.0) { // d.x + d.y + 1 > 3 + // Fetch the crossing edges: + let coords = vec4(-d.x + 0.25, d.x, d.y, -d.y - 0.25) * smaa_info.rt_metrics.xyxy + + tex_coord.xyxy; + var c = vec4( + textureSampleLevel(edges_texture, edges_sampler, coords.xy, 0.0, vec2(-1, 0)).rg, + textureSampleLevel(edges_texture, edges_sampler, coords.zw, 0.0, vec2( 1, 0)).rg, + ); + let c_yxwz = decode_diag_bilinear_access_4(c.xyzw); + c = c_yxwz.yxwz; + + // Non-optimized version: + // float4 coords = mad(float4(-d.x, d.x, d.y, -d.y), SMAA_RT_METRICS.xyxy, texcoord.xyxy); + // float4 c; + // c.x = SMAASampleLevelZeroOffset(edgesTex, coords.xy, int2(-1, 0)).g; + // c.y = SMAASampleLevelZeroOffset(edgesTex, coords.xy, int2( 0, 0)).r; + // c.z = SMAASampleLevelZeroOffset(edgesTex, coords.zw, int2( 1, 0)).g; + // c.w = SMAASampleLevelZeroOffset(edgesTex, coords.zw, int2( 1, -1)).r; + + // Merge crossing edges at each side into a single value: + var cc = vec2(2.0) * c.xz + c.yw; + + // Remove the crossing edge if we didn't found the end of the line: + cc = select(cc, vec2(0.0, 0.0), vec2(step(vec2(0.9), d.zw))); + + // Fetch the areas for this line: + weights += area_diag(d.xy, cc, subsample_indices.z); + } + + // Search for the line ends: + let d_xz = search_diag_2(tex_coord, vec2(-1.0, -1.0), &end); + if (textureSampleLevel(edges_texture, edges_sampler, tex_coord, 0.0, vec2(1, 0)).r > 0.0) { + let d_yw = search_diag_2(tex_coord, vec2(1.0, 1.0), &end); + d = vec4(d.x, d_yw.x, d.z, d_yw.y); + d.y += f32(end.y > 0.9); + } else { + d = vec4(d.x, 0.0, d.z, 0.0); + } + + if (d.x + d.y > 2.0) { // d.x + d.y + 1 > 3 + // Fetch the crossing edges: + let coords = vec4(-d.x, -d.x, d.y, d.y) * smaa_info.rt_metrics.xyxy + tex_coord.xyxy; + let c = vec4( + textureSampleLevel(edges_texture, edges_sampler, coords.xy, 0.0, vec2(-1, 0)).g, + textureSampleLevel(edges_texture, edges_sampler, coords.xy, 0.0, vec2( 0, -1)).r, + textureSampleLevel(edges_texture, edges_sampler, coords.zw, 0.0, vec2( 1, 0)).gr, + ); + var cc = vec2(2.0) * c.xz + c.yw; + + // Remove the crossing edge if we didn't found the end of the line: + cc = select(cc, vec2(0.0, 0.0), vec2(step(vec2(0.9), d.zw))); + + // Fetch the areas for this line: + weights += area_diag(d.xy, cc, subsample_indices.w).gr; + } + + return weights; +} + +#endif // SMAA_DISABLE_DIAG_DETECTION + +//----------------------------------------------------------------------------- +// Horizontal/Vertical Search Functions + +/** + * This allows to determine how much length should we add in the last step + * of the searches. It takes the bilinearly interpolated edge (see + * @PSEUDO_GATHER4), and adds 0, 1 or 2, depending on which edges and + * crossing edges are active. + */ +fn search_length(e: vec2, offset: f32) -> f32 { + // The texture is flipped vertically, with left and right cases taking half + // of the space horizontally: + var scale = SMAA_SEARCHTEX_SIZE * vec2(0.5, -1.0); + var bias = SMAA_SEARCHTEX_SIZE * vec2(offset, 1.0); + + // Scale and bias to access texel centers: + scale += vec2(-1.0, 1.0); + bias += vec2( 0.5, -0.5); + + // Convert from pixel coordinates to texcoords: + // (We use SMAA_SEARCHTEX_PACKED_SIZE because the texture is cropped) + scale *= 1.0 / SMAA_SEARCHTEX_PACKED_SIZE; + bias *= 1.0 / SMAA_SEARCHTEX_PACKED_SIZE; + + // Lookup the search texture: + return textureSampleLevel(search_texture, edges_sampler, scale * e + bias, 0.0).r; +} + +/** + * Horizontal/vertical search functions for the 2nd pass. + */ +fn search_x_left(in_tex_coord: vec2, end: f32) -> f32 { + var tex_coord = in_tex_coord; + + /** + * @PSEUDO_GATHER4 + * This texcoord has been offset by (-0.25, -0.125) in the vertex shader to + * sample between edge, thus fetching four edges in a row. + * Sampling with different offsets in each direction allows to disambiguate + * which edges are active from the four fetched ones. + */ + var e = vec2(0.0, 1.0); + while (tex_coord.x > end && + e.g > 0.8281 && // Is there some edge not activated? + e.r == 0.0) { // Or is there a crossing edge that breaks the line? + e = textureSampleLevel(edges_texture, edges_sampler, tex_coord, 0.0).rg; + tex_coord += -vec2(2.0, 0.0) * smaa_info.rt_metrics.xy; + } + let offset = -(255.0 / 127.0) * search_length(e, 0.0) + 3.25; + return smaa_info.rt_metrics.x * offset + tex_coord.x; +} + +fn search_x_right(in_tex_coord: vec2, end: f32) -> f32 { + var tex_coord = in_tex_coord; + + var e = vec2(0.0, 1.0); + while (tex_coord.x < end && + e.g > 0.8281 && // Is there some edge not activated? + e.r == 0.0) { // Or is there a crossing edge that breaks the line? + e = textureSampleLevel(edges_texture, edges_sampler, tex_coord, 0.0).rg; + tex_coord += vec2(2.0, 0.0) * smaa_info.rt_metrics.xy; + } + let offset = -(255.0 / 127.0) * search_length(e, 0.5) + 3.25; + return -smaa_info.rt_metrics.x * offset + tex_coord.x; +} + +fn search_y_up(in_tex_coord: vec2, end: f32) -> f32 { + var tex_coord = in_tex_coord; + + var e = vec2(1.0, 0.0); + while (tex_coord.y > end && + e.r > 0.8281 && // Is there some edge not activated? + e.g == 0.0) { // Or is there a crossing edge that breaks the line? + e = textureSampleLevel(edges_texture, edges_sampler, tex_coord, 0.0).rg; + tex_coord += -vec2(0.0, 2.0) * smaa_info.rt_metrics.xy; + } + let offset = -(255.0 / 127.0) * search_length(e.gr, 0.0) + 3.25; + return smaa_info.rt_metrics.y * offset + tex_coord.y; +} + +fn search_y_down(in_tex_coord: vec2, end: f32) -> f32 { + var tex_coord = in_tex_coord; + + var e = vec2(1.0, 0.0); + while (tex_coord.y < end && + e.r > 0.8281 && // Is there some edge not activated? + e.g == 0.0) { // Or is there a crossing edge that breaks the line? + e = textureSampleLevel(edges_texture, edges_sampler, tex_coord, 0.0).rg; + tex_coord += vec2(0.0, 2.0) * smaa_info.rt_metrics.xy; + } + let offset = -(255.0 / 127.0) * search_length(e.gr, 0.5) + 3.25; + return -smaa_info.rt_metrics.y * offset + tex_coord.y; +} + +/** + * Ok, we have the distance and both crossing edges. So, what are the areas + * at each side of current edge? + */ +fn area(dist: vec2, e1: f32, e2: f32, offset: f32) -> vec2 { + // Rounding prevents precision errors of bilinear filtering: + var tex_coord = SMAA_AREATEX_MAX_DISTANCE * round(4.0 * vec2(e1, e2)) + dist; + + // We do a scale and bias for mapping to texel space: + tex_coord = SMAA_AREATEX_PIXEL_SIZE * tex_coord + 0.5 * SMAA_AREATEX_PIXEL_SIZE; + + // Move to proper place, according to the subpixel offset: + tex_coord.y += SMAA_AREATEX_SUBTEX_SIZE * offset; + + // Do it! + return textureSample(area_texture, edges_sampler, tex_coord).rg; +} + +//----------------------------------------------------------------------------- +// Corner Detection Functions + +fn detect_horizontal_corner_pattern(weights: vec2, tex_coord: vec4, d: vec2) + -> vec2 { +#ifndef SMAA_DISABLE_CORNER_DETECTION + let left_right = step(d.xy, d.yx); + var rounding = (1.0 - SMAA_CORNER_ROUNDING_NORM) * left_right; + + rounding /= left_right.x + left_right.y; // Reduce blending for pixels in the center of a line. + + var factor = vec2(1.0, 1.0); + factor.x -= rounding.x * + textureSampleLevel(edges_texture, edges_sampler, tex_coord.xy, 0.0, vec2(0, 1)).r; + factor.x -= rounding.y * + textureSampleLevel(edges_texture, edges_sampler, tex_coord.zw, 0.0, vec2(1, 1)).r; + factor.y -= rounding.x * + textureSampleLevel(edges_texture, edges_sampler, tex_coord.xy, 0.0, vec2(0, -2)).r; + factor.y -= rounding.y * + textureSampleLevel(edges_texture, edges_sampler, tex_coord.zw, 0.0, vec2(1, -2)).r; + + return weights * saturate(factor); +#else // SMAA_DISABLE_CORNER_DETECTION + return weights; +#endif // SMAA_DISABLE_CORNER_DETECTION +} + +fn detect_vertical_corner_pattern(weights: vec2, tex_coord: vec4, d: vec2) + -> vec2 { +#ifndef SMAA_DISABLE_CORNER_DETECTION + let left_right = step(d.xy, d.yx); + var rounding = (1.0 - SMAA_CORNER_ROUNDING_NORM) * left_right; + + rounding /= left_right.x + left_right.y; + + var factor = vec2(1.0, 1.0); + factor.x -= rounding.x * + textureSampleLevel(edges_texture, edges_sampler, tex_coord.xy, 0.0, vec2( 1, 0)).g; + factor.x -= rounding.y * + textureSampleLevel(edges_texture, edges_sampler, tex_coord.zw, 0.0, vec2( 1, 1)).g; + factor.y -= rounding.x * + textureSampleLevel(edges_texture, edges_sampler, tex_coord.xy, 0.0, vec2(-2, 0)).g; + factor.y -= rounding.y * + textureSampleLevel(edges_texture, edges_sampler, tex_coord.zw, 0.0, vec2(-2, 1)).g; + + return weights * saturate(factor); +#else // SMAA_DISABLE_CORNER_DETECTION + return weights; +#endif // SMAA_DISABLE_CORNER_DETECTION +} + +//----------------------------------------------------------------------------- +// Blending Weight Calculation Pixel Shader (Second Pass) + +@fragment +fn blending_weight_calculation_fragment_main(in: BlendingWeightCalculationVaryings) + -> @location(0) vec4 { + let subsample_indices = vec4(0.0); // Just pass zero for SMAA 1x, see @SUBSAMPLE_INDICES. + + var weights = vec4(0.0); + + var e = textureSample(edges_texture, edges_sampler, in.tex_coord).rg; + + if (e.g > 0.0) { // Edge at north +#ifndef SMAA_DISABLE_DIAG_DETECTION + // Diagonals have both north and west edges, so searching for them in + // one of the boundaries is enough. + weights = vec4(calculate_diag_weights(in.tex_coord, e, subsample_indices), weights.ba); + + // We give priority to diagonals, so if we find a diagonal we skip + // horizontal/vertical processing. + if (weights.r + weights.g != 0.0) { + return weights; + } +#endif // SMAA_DISABLE_DIAG_DETECTION + + var d: vec2; + + // Find the distance to the left: + var coords: vec3; + coords.x = search_x_left(in.offset_0.xy, in.offset_2.x); + // in.offset_1.y = in.tex_coord.y - 0.25 * smaa_info.rt_metrics.y (@CROSSING_OFFSET) + coords.y = in.offset_1.y; + d.x = coords.x; + + // Now fetch the left crossing edges, two at a time using bilinear + // filtering. Sampling at -0.25 (see @CROSSING_OFFSET) enables to + // discern what value each edge has: + let e1 = textureSampleLevel(edges_texture, edges_sampler, coords.xy, 0.0).r; + + // Find the distance to the right: + coords.z = search_x_right(in.offset_0.zw, in.offset_2.y); + d.y = coords.z; + + // We want the distances to be in pixel units (doing this here allow to + // better interleave arithmetic and memory accesses): + d = abs(round(smaa_info.rt_metrics.zz * d - in.position.xx)); + + // SMAAArea below needs a sqrt, as the areas texture is compressed + // quadratically: + let sqrt_d = sqrt(d); + + // Fetch the right crossing edges: + let e2 = textureSampleLevel( + edges_texture, edges_sampler, coords.zy, 0.0, vec2(1, 0)).r; + + // Ok, we know how this pattern looks like, now it is time for getting + // the actual area: + weights = vec4(area(sqrt_d, e1, e2, subsample_indices.y), weights.ba); + + // Fix corners: + coords.y = in.tex_coord.y; + weights = vec4( + detect_horizontal_corner_pattern(weights.rg, coords.xyzy, d), + weights.ba + ); + } + + if (e.r > 0.0) { // Edge at west + var d: vec2; + + // Find the distance to the top: + var coords: vec3; + coords.y = search_y_up(in.offset_1.xy, in.offset_2.z); + // in.offset_1.x = in.tex_coord.x - 0.25 * smaa_info.rt_metrics.x + coords.x = in.offset_0.x; + d.x = coords.y; + + // Fetch the top crossing edges: + let e1 = textureSampleLevel(edges_texture, edges_sampler, coords.xy, 0.0).g; + + // Find the distance to the bottom: + coords.z = search_y_down(in.offset_1.zw, in.offset_2.w); + d.y = coords.z; + + // We want the distances to be in pixel units: + d = abs(round(smaa_info.rt_metrics.ww * d - in.position.yy)); + + // SMAAArea below needs a sqrt, as the areas texture is compressed + // quadratically: + let sqrt_d = sqrt(d); + + // Fetch the bottom crossing edges: + let e2 = textureSampleLevel( + edges_texture, edges_sampler, coords.xz, 0.0, vec2(0, 1)).g; + + // Get the area for this direction: + weights = vec4(weights.rg, area(sqrt_d, e1, e2, subsample_indices.x)); + + // Fix corners: + coords.x = in.tex_coord.x; + weights = vec4(weights.rg, detect_vertical_corner_pattern(weights.ba, coords.xyxz, d)); + } + + return weights; +} + +#endif // SMAA_BLENDING_WEIGHT_CALCULATION + +#ifdef SMAA_NEIGHBORHOOD_BLENDING + +//----------------------------------------------------------------------------- +// Neighborhood Blending Pixel Shader (Third Pass) + +@fragment +fn neighborhood_blending_fragment_main(in: NeighborhoodBlendingVaryings) -> @location(0) vec4 { + // Fetch the blending weights for current pixel: + let a = vec4( + textureSample(blend_texture, blend_sampler, in.offset.xy).a, // Right + textureSample(blend_texture, blend_sampler, in.offset.zw).g, // Top + textureSample(blend_texture, blend_sampler, in.tex_coord).zx, // Bottom / Left + ); + + // Is there any blending weight with a value greater than 0.0? + if (dot(a, vec4(1.0)) < 1.0e-5) { + let color = textureSampleLevel(color_texture, blend_sampler, in.tex_coord, 0.0); + // TODO: Reprojection + return color; + } else { + let h = max(a.x, a.z) > max(a.y, a.w); // max(horizontal) > max(vertical) + + // Calculate the blending offsets: + var blending_offset = vec4(0.0, a.y, 0.0, a.w); + var blending_weight = a.yw; + blending_offset = select(blending_offset, vec4(a.x, 0.0, a.z, 0.0), h); + blending_weight = select(blending_weight, a.xz, h); + blending_weight /= dot(blending_weight, vec2(1.0)); + + // Calculate the texture coordinates: + let blending_coord = + blending_offset * vec4(smaa_info.rt_metrics.xy, -smaa_info.rt_metrics.xy) + + in.tex_coord.xyxy; + + // We exploit bilinear filtering to mix current pixel with the chosen + // neighbor: + var color = blending_weight.x * + textureSampleLevel(color_texture, blend_sampler, blending_coord.xy, 0.0); + color += blending_weight.y * + textureSampleLevel(color_texture, blend_sampler, blending_coord.zw, 0.0); + + // TODO: Reprojection + + return color; + } +} + +#endif // SMAA_NEIGHBORHOOD_BLENDING + +``` + +### bevy_shaders/sprite + +```rust +#ifdef TONEMAP_IN_SHADER +#import bevy_core_pipeline::tonemapping +#endif + +#import bevy_render::{ + maths::affine3_to_square, + view::View, +} + +#import bevy_sprite::sprite_view_bindings::view + +struct VertexInput { + @builtin(vertex_index) index: u32, + // NOTE: Instance-rate vertex buffer members prefixed with i_ + // NOTE: i_model_transpose_colN are the 3 columns of a 3x4 matrix that is the transpose of the + // affine 4x3 model matrix. + @location(0) i_model_transpose_col0: vec4, + @location(1) i_model_transpose_col1: vec4, + @location(2) i_model_transpose_col2: vec4, + @location(3) i_color: vec4, + @location(4) i_uv_offset_scale: vec4, +} + +struct VertexOutput { + @builtin(position) clip_position: vec4, + @location(0) uv: vec2, + @location(1) @interpolate(flat) color: vec4, +}; + +@vertex +fn vertex(in: VertexInput) -> VertexOutput { + var out: VertexOutput; + + let vertex_position = vec3( + f32(in.index & 0x1u), + f32((in.index & 0x2u) >> 1u), + 0.0 + ); + + out.clip_position = view.clip_from_world * affine3_to_square(mat3x4( + in.i_model_transpose_col0, + in.i_model_transpose_col1, + in.i_model_transpose_col2, + )) * vec4(vertex_position, 1.0); + out.uv = vec2(vertex_position.xy) * in.i_uv_offset_scale.zw + in.i_uv_offset_scale.xy; + out.color = in.i_color; + + return out; +} + +@group(1) @binding(0) var sprite_texture: texture_2d; +@group(1) @binding(1) var sprite_sampler: sampler; + +@fragment +fn fragment(in: VertexOutput) -> @location(0) vec4 { + var color = in.color * textureSample(sprite_texture, sprite_sampler, in.uv); + +#ifdef TONEMAP_IN_SHADER + color = tonemapping::tone_mapping(color, view.color_grading); +#endif + + return color; +} + +``` + +### bevy_shaders/taa + +```rust +// References: +// https://www.elopezr.com/temporal-aa-and-the-quest-for-the-holy-trail +// http://behindthepixels.io/assets/files/TemporalAA.pdf +// http://leiy.cc/publications/TAA/TAA_EG2020_Talk.pdf +// https://advances.realtimerendering.com/s2014/index.html#_HIGH-QUALITY_TEMPORAL_SUPERSAMPLING + +// Controls how much to blend between the current and past samples +// Lower numbers = less of the current sample and more of the past sample = more smoothing +// Values chosen empirically +const DEFAULT_HISTORY_BLEND_RATE: f32 = 0.1; // Default blend rate to use when no confidence in history +const MIN_HISTORY_BLEND_RATE: f32 = 0.015; // Minimum blend rate allowed, to ensure at least some of the current sample is used + +@group(0) @binding(0) var view_target: texture_2d; +@group(0) @binding(1) var history: texture_2d; +@group(0) @binding(2) var motion_vectors: texture_2d; +@group(0) @binding(3) var depth: texture_depth_2d; +@group(0) @binding(4) var nearest_sampler: sampler; +@group(0) @binding(5) var linear_sampler: sampler; + +struct Output { + @location(0) view_target: vec4, + @location(1) history: vec4, +}; + +// TAA is ideally applied after tonemapping, but before post processing +// Post processing wants to go before tonemapping, which conflicts +// Solution: Put TAA before tonemapping, tonemap TAA input, apply TAA, invert-tonemap TAA output +// https://advances.realtimerendering.com/s2014/index.html#_HIGH-QUALITY_TEMPORAL_SUPERSAMPLING, slide 20 +// https://gpuopen.com/learn/optimized-reversible-tonemapper-for-resolve +fn rcp(x: f32) -> f32 { return 1.0 / x; } +fn max3(x: vec3) -> f32 { return max(x.r, max(x.g, x.b)); } +fn tonemap(color: vec3) -> vec3 { return color * rcp(max3(color) + 1.0); } +fn reverse_tonemap(color: vec3) -> vec3 { return color * rcp(1.0 - max3(color)); } + +// The following 3 functions are from Playdead (MIT-licensed) +// https://github.com/playdeadgames/temporal/blob/master/Assets/Shaders/TemporalReprojection.shader +fn RGB_to_YCoCg(rgb: vec3) -> vec3 { + let y = (rgb.r / 4.0) + (rgb.g / 2.0) + (rgb.b / 4.0); + let co = (rgb.r / 2.0) - (rgb.b / 2.0); + let cg = (-rgb.r / 4.0) + (rgb.g / 2.0) - (rgb.b / 4.0); + return vec3(y, co, cg); +} + +fn YCoCg_to_RGB(ycocg: vec3) -> vec3 { + let r = ycocg.x + ycocg.y - ycocg.z; + let g = ycocg.x + ycocg.z; + let b = ycocg.x - ycocg.y - ycocg.z; + return saturate(vec3(r, g, b)); +} + +fn clip_towards_aabb_center(history_color: vec3, current_color: vec3, aabb_min: vec3, aabb_max: vec3) -> vec3 { + let p_clip = 0.5 * (aabb_max + aabb_min); + let e_clip = 0.5 * (aabb_max - aabb_min) + 0.00000001; + let v_clip = history_color - p_clip; + let v_unit = v_clip / e_clip; + let a_unit = abs(v_unit); + let ma_unit = max3(a_unit); + if ma_unit > 1.0 { + return p_clip + (v_clip / ma_unit); + } else { + return history_color; + } +} + +fn sample_history(u: f32, v: f32) -> vec3 { + return textureSample(history, linear_sampler, vec2(u, v)).rgb; +} + +fn sample_view_target(uv: vec2) -> vec3 { + var sample = textureSample(view_target, nearest_sampler, uv).rgb; +#ifdef TONEMAP + sample = tonemap(sample); +#endif + return RGB_to_YCoCg(sample); +} + +@fragment +fn taa(@location(0) uv: vec2) -> Output { + let texture_size = vec2(textureDimensions(view_target)); + let texel_size = 1.0 / texture_size; + + // Fetch the current sample + let original_color = textureSample(view_target, nearest_sampler, uv); + var current_color = original_color.rgb; +#ifdef TONEMAP + current_color = tonemap(current_color); +#endif + +#ifndef RESET + // Pick the closest motion_vector from 5 samples (reduces aliasing on the edges of moving entities) + // https://advances.realtimerendering.com/s2014/index.html#_HIGH-QUALITY_TEMPORAL_SUPERSAMPLING, slide 27 + let offset = texel_size * 2.0; + let d_uv_tl = uv + vec2(-offset.x, offset.y); + let d_uv_tr = uv + vec2(offset.x, offset.y); + let d_uv_bl = uv + vec2(-offset.x, -offset.y); + let d_uv_br = uv + vec2(offset.x, -offset.y); + var closest_uv = uv; + let d_tl = textureSample(depth, nearest_sampler, d_uv_tl); + let d_tr = textureSample(depth, nearest_sampler, d_uv_tr); + var closest_depth = textureSample(depth, nearest_sampler, uv); + let d_bl = textureSample(depth, nearest_sampler, d_uv_bl); + let d_br = textureSample(depth, nearest_sampler, d_uv_br); + if d_tl > closest_depth { + closest_uv = d_uv_tl; + closest_depth = d_tl; + } + if d_tr > closest_depth { + closest_uv = d_uv_tr; + closest_depth = d_tr; + } + if d_bl > closest_depth { + closest_uv = d_uv_bl; + closest_depth = d_bl; + } + if d_br > closest_depth { + closest_uv = d_uv_br; + } + let closest_motion_vector = textureSample(motion_vectors, nearest_sampler, closest_uv).rg; + + // Reproject to find the equivalent sample from the past + // Uses 5-sample Catmull-Rom filtering (reduces blurriness) + // Catmull-Rom filtering: https://gist.github.com/TheRealMJP/c83b8c0f46b63f3a88a5986f4fa982b1 + // Ignoring corners: https://www.activision.com/cdn/research/Dynamic_Temporal_Antialiasing_and_Upsampling_in_Call_of_Duty_v4.pdf#page=68 + // Technically we should renormalize the weights since we're skipping the corners, but it's basically the same result + let history_uv = uv - closest_motion_vector; + let sample_position = history_uv * texture_size; + let texel_center = floor(sample_position - 0.5) + 0.5; + let f = sample_position - texel_center; + let w0 = f * (-0.5 + f * (1.0 - 0.5 * f)); + let w1 = 1.0 + f * f * (-2.5 + 1.5 * f); + let w2 = f * (0.5 + f * (2.0 - 1.5 * f)); + let w3 = f * f * (-0.5 + 0.5 * f); + let w12 = w1 + w2; + let texel_position_0 = (texel_center - 1.0) * texel_size; + let texel_position_3 = (texel_center + 2.0) * texel_size; + let texel_position_12 = (texel_center + (w2 / w12)) * texel_size; + var history_color = sample_history(texel_position_12.x, texel_position_0.y) * w12.x * w0.y; + history_color += sample_history(texel_position_0.x, texel_position_12.y) * w0.x * w12.y; + history_color += sample_history(texel_position_12.x, texel_position_12.y) * w12.x * w12.y; + history_color += sample_history(texel_position_3.x, texel_position_12.y) * w3.x * w12.y; + history_color += sample_history(texel_position_12.x, texel_position_3.y) * w12.x * w3.y; + + // Constrain past sample with 3x3 YCoCg variance clipping (reduces ghosting) + // YCoCg: https://advances.realtimerendering.com/s2014/index.html#_HIGH-QUALITY_TEMPORAL_SUPERSAMPLING, slide 33 + // Variance clipping: https://developer.download.nvidia.com/gameworks/events/GDC2016/msalvi_temporal_supersampling.pdf + let s_tl = sample_view_target(uv + vec2(-texel_size.x, texel_size.y)); + let s_tm = sample_view_target(uv + vec2( 0.0, texel_size.y)); + let s_tr = sample_view_target(uv + vec2( texel_size.x, texel_size.y)); + let s_ml = sample_view_target(uv + vec2(-texel_size.x, 0.0)); + let s_mm = RGB_to_YCoCg(current_color); + let s_mr = sample_view_target(uv + vec2( texel_size.x, 0.0)); + let s_bl = sample_view_target(uv + vec2(-texel_size.x, -texel_size.y)); + let s_bm = sample_view_target(uv + vec2( 0.0, -texel_size.y)); + let s_br = sample_view_target(uv + vec2( texel_size.x, -texel_size.y)); + let moment_1 = s_tl + s_tm + s_tr + s_ml + s_mm + s_mr + s_bl + s_bm + s_br; + let moment_2 = (s_tl * s_tl) + (s_tm * s_tm) + (s_tr * s_tr) + (s_ml * s_ml) + (s_mm * s_mm) + (s_mr * s_mr) + (s_bl * s_bl) + (s_bm * s_bm) + (s_br * s_br); + let mean = moment_1 / 9.0; + let variance = (moment_2 / 9.0) - (mean * mean); + let std_deviation = sqrt(max(variance, vec3(0.0))); + history_color = RGB_to_YCoCg(history_color); + history_color = clip_towards_aabb_center(history_color, s_mm, mean - std_deviation, mean + std_deviation); + history_color = YCoCg_to_RGB(history_color); + + // How confident we are that the history is representative of the current frame + var history_confidence = textureSample(history, nearest_sampler, uv).a; + let pixel_motion_vector = abs(closest_motion_vector) * texture_size; + if pixel_motion_vector.x < 0.01 && pixel_motion_vector.y < 0.01 { + // Increment when pixels are not moving + history_confidence += 10.0; + } else { + // Else reset + history_confidence = 1.0; + } + + // Blend current and past sample + // Use more of the history if we're confident in it (reduces noise when there is no motion) + // https://hhoppe.com/supersample.pdf, section 4.1 + var current_color_factor = clamp(1.0 / history_confidence, MIN_HISTORY_BLEND_RATE, DEFAULT_HISTORY_BLEND_RATE); + + // Reject history when motion vectors point off screen + if any(saturate(history_uv) != history_uv) { + current_color_factor = 1.0; + history_confidence = 1.0; + } + + current_color = mix(history_color, current_color, current_color_factor); +#endif // #ifndef RESET + + + // Write output to history and view target + var out: Output; +#ifdef RESET + let history_confidence = 1.0 / MIN_HISTORY_BLEND_RATE; +#endif + out.history = vec4(current_color, history_confidence); +#ifdef TONEMAP + current_color = reverse_tonemap(current_color); +#endif + out.view_target = vec4(current_color, original_color.a); + return out; +} + +``` + +### bevy_shaders/pbr_functions + +```rust +#define_import_path bevy_pbr::pbr_functions + +#import bevy_pbr::{ + pbr_types, + pbr_bindings, + mesh_view_bindings as view_bindings, + mesh_view_types, + lighting, + lighting::{LAYER_BASE, LAYER_CLEARCOAT}, + transmission, + clustered_forward as clustering, + shadows, + ambient, + irradiance_volume, + mesh_types::{MESH_FLAGS_SHADOW_RECEIVER_BIT, MESH_FLAGS_TRANSMITTED_SHADOW_RECEIVER_BIT}, +} +#import bevy_render::maths::{E, powsafe} + +#ifdef MESHLET_MESH_MATERIAL_PASS +#import bevy_pbr::meshlet_visibility_buffer_resolve::VertexOutput +#else ifdef PREPASS_PIPELINE +#import bevy_pbr::prepass_io::VertexOutput +#else // PREPASS_PIPELINE +#import bevy_pbr::forward_io::VertexOutput +#endif // PREPASS_PIPELINE + +#ifdef ENVIRONMENT_MAP +#import bevy_pbr::environment_map +#endif + +#ifdef TONEMAP_IN_SHADER +#import bevy_core_pipeline::tonemapping::{tone_mapping, screen_space_dither} +#endif + + +// Biasing info needed to sample from a texture when calling `sample_texture`. +// How this is done depends on whether we're rendering meshlets or regular +// meshes. +struct SampleBias { +#ifdef MESHLET_MESH_MATERIAL_PASS + ddx_uv: vec2, + ddy_uv: vec2, +#else // MESHLET_MESH_MATERIAL_PASS + mip_bias: f32, +#endif // MESHLET_MESH_MATERIAL_PASS +} + +// This is the standard 4x4 ordered dithering pattern from [1]. +// +// We can't use `array, 4>` because they can't be indexed dynamically +// due to Naga limitations. So instead we pack into a single `vec4` and extract +// individual bytes. +// +// [1]: https://en.wikipedia.org/wiki/Ordered_dithering#Threshold_map +const DITHER_THRESHOLD_MAP: vec4 = vec4( + 0x0a020800, + 0x060e040c, + 0x09010b03, + 0x050d070f +); + +// Processes a visibility range dither value and discards the fragment if +// needed. +// +// Visibility ranges, also known as HLODs, are crossfades between different +// levels of detail. +// +// The `dither` value ranges from [-16, 16]. When zooming out, positive values +// are used for meshes that are in the process of disappearing, while negative +// values are used for meshes that are in the process of appearing. In other +// words, when the camera is moving backwards, the `dither` value counts up from +// -16 to 0 when the object is fading in, stays at 0 while the object is +// visible, and then counts up to 16 while the object is fading out. +// Distinguishing between negative and positive values allows the dither +// patterns for different LOD levels of a single mesh to mesh together properly. +#ifdef VISIBILITY_RANGE_DITHER +fn visibility_range_dither(frag_coord: vec4, dither: i32) { + // If `dither` is 0, the object is visible. + if (dither == 0) { + return; + } + + // If `dither` is less than -15 or greater than 15, the object is culled. + if (dither <= -16 || dither >= 16) { + discard; + } + + // Otherwise, check the dither pattern. + let coords = vec2(floor(frag_coord.xy)) % 4u; + let threshold = i32((DITHER_THRESHOLD_MAP[coords.y] >> (coords.x * 8)) & 0xff); + if ((dither >= 0 && dither + threshold >= 16) || (dither < 0 && 1 + dither + threshold <= 0)) { + discard; + } +} +#endif + +fn alpha_discard(material: pbr_types::StandardMaterial, output_color: vec4) -> vec4 { + var color = output_color; + let alpha_mode = material.flags & pbr_types::STANDARD_MATERIAL_FLAGS_ALPHA_MODE_RESERVED_BITS; + if alpha_mode == pbr_types::STANDARD_MATERIAL_FLAGS_ALPHA_MODE_OPAQUE { + // NOTE: If rendering as opaque, alpha should be ignored so set to 1.0 + color.a = 1.0; + } + +#ifdef MAY_DISCARD + // NOTE: `MAY_DISCARD` is only defined in the alpha to coverage case if MSAA + // was off. This special situation causes alpha to coverage to fall back to + // alpha mask. + else if alpha_mode == pbr_types::STANDARD_MATERIAL_FLAGS_ALPHA_MODE_MASK || + alpha_mode == pbr_types::STANDARD_MATERIAL_FLAGS_ALPHA_MODE_ALPHA_TO_COVERAGE { + if color.a >= material.alpha_cutoff { + // NOTE: If rendering as masked alpha and >= the cutoff, render as fully opaque + color.a = 1.0; + } else { + // NOTE: output_color.a < in.material.alpha_cutoff should not be rendered + discard; + } + } +#endif + + return color; +} + +// Samples a texture using the appropriate biasing metric for the type of mesh +// in use (mesh vs. meshlet). +fn sample_texture( + texture: texture_2d, + samp: sampler, + uv: vec2, + bias: SampleBias, +) -> vec4 { +#ifdef MESHLET_MESH_MATERIAL_PASS + return textureSampleGrad(texture, samp, uv, bias.ddx_uv, bias.ddy_uv); +#else + return textureSampleBias(texture, samp, uv, bias.mip_bias); +#endif +} + +fn prepare_world_normal( + world_normal: vec3, + double_sided: bool, + is_front: bool, +) -> vec3 { + var output: vec3 = world_normal; +#ifndef VERTEX_TANGENTS +#ifndef STANDARD_MATERIAL_NORMAL_MAP + // NOTE: When NOT using normal-mapping, if looking at the back face of a double-sided + // material, the normal needs to be inverted. This is a branchless version of that. + output = (f32(!double_sided || is_front) * 2.0 - 1.0) * output; +#endif +#endif + return output; +} + +// Calculates the three TBN vectors according to [mikktspace]. Returns a matrix +// with T, B, N columns in that order. +// +// [mikktspace]: http://www.mikktspace.com/ +fn calculate_tbn_mikktspace(world_normal: vec3, world_tangent: vec4) -> mat3x3 { + // NOTE: The mikktspace method of normal mapping explicitly requires that the world normal NOT + // be re-normalized in the fragment shader. This is primarily to match the way mikktspace + // bakes vertex tangents and normal maps so that this is the exact inverse. Blender, Unity, + // Unreal Engine, Godot, and more all use the mikktspace method. Do not change this code + // unless you really know what you are doing. + // http://www.mikktspace.com/ + var N: vec3 = world_normal; + + // NOTE: The mikktspace method of normal mapping explicitly requires that these NOT be + // normalized nor any Gram-Schmidt applied to ensure the vertex normal is orthogonal to the + // vertex tangent! Do not change this code unless you really know what you are doing. + // http://www.mikktspace.com/ + var T: vec3 = world_tangent.xyz; + var B: vec3 = world_tangent.w * cross(N, T); + + return mat3x3(T, B, N); +} + +fn apply_normal_mapping( + standard_material_flags: u32, + TBN: mat3x3, + double_sided: bool, + is_front: bool, + in_Nt: vec3, +) -> vec3 { + // Unpack the TBN vectors. + var T = TBN[0]; + var B = TBN[1]; + var N = TBN[2]; + + // Nt is the tangent-space normal. + var Nt = in_Nt; + if (standard_material_flags & pbr_types::STANDARD_MATERIAL_FLAGS_TWO_COMPONENT_NORMAL_MAP) != 0u { + // Only use the xy components and derive z for 2-component normal maps. + Nt = vec3(Nt.rg * 2.0 - 1.0, 0.0); + Nt.z = sqrt(1.0 - Nt.x * Nt.x - Nt.y * Nt.y); + } else { + Nt = Nt * 2.0 - 1.0; + } + // Normal maps authored for DirectX require flipping the y component + if (standard_material_flags & pbr_types::STANDARD_MATERIAL_FLAGS_FLIP_NORMAL_MAP_Y) != 0u { + Nt.y = -Nt.y; + } + + if double_sided && !is_front { + Nt = -Nt; + } + + // NOTE: The mikktspace method of normal mapping applies maps the tangent-space normal from + // the normal map texture in this way to be an EXACT inverse of how the normal map baker + // calculates the normal maps so there is no error introduced. Do not change this code + // unless you really know what you are doing. + // http://www.mikktspace.com/ + N = Nt.x * T + Nt.y * B + Nt.z * N; + + return normalize(N); +} + +#ifdef STANDARD_MATERIAL_ANISOTROPY + +// Modifies the normal to achieve a better approximate direction from the +// environment map when using anisotropy. +// +// This follows the suggested implementation in the `KHR_materials_anisotropy` specification: +// https://github.com/KhronosGroup/glTF/blob/main/extensions/2.0/Khronos/KHR_materials_anisotropy/README.md#image-based-lighting +fn bend_normal_for_anisotropy(lighting_input: ptr) { + // Unpack. + let N = (*lighting_input).layers[LAYER_BASE].N; + let roughness = (*lighting_input).layers[LAYER_BASE].roughness; + let V = (*lighting_input).V; + let anisotropy = (*lighting_input).anisotropy; + let Ba = (*lighting_input).Ba; + + var bent_normal = normalize(cross(cross(Ba, V), Ba)); + + // The `KHR_materials_anisotropy` spec states: + // + // > This heuristic can probably be improved upon + let a = pow(2.0, pow(2.0, 1.0 - anisotropy * (1.0 - roughness))); + bent_normal = normalize(mix(bent_normal, N, a)); + + // The `KHR_materials_anisotropy` spec states: + // + // > Mixing the reflection with the normal is more accurate both with and + // > without anisotropy and keeps rough objects from gathering light from + // > behind their tangent plane. + let R = normalize(mix(reflect(-V, bent_normal), bent_normal, roughness * roughness)); + + (*lighting_input).layers[LAYER_BASE].N = bent_normal; + (*lighting_input).layers[LAYER_BASE].R = R; +} + +#endif // STANDARD_MATERIAL_ANISTROPY + +// NOTE: Correctly calculates the view vector depending on whether +// the projection is orthographic or perspective. +fn calculate_view( + world_position: vec4, + is_orthographic: bool, +) -> vec3 { + var V: vec3; + if is_orthographic { + // Orthographic view vector + V = normalize(vec3(view_bindings::view.clip_from_world[0].z, view_bindings::view.clip_from_world[1].z, view_bindings::view.clip_from_world[2].z)); + } else { + // Only valid for a perspective projection + V = normalize(view_bindings::view.world_position.xyz - world_position.xyz); + } + return V; +} + +// Diffuse strength is inversely related to metallicity, specular and diffuse transmission +fn calculate_diffuse_color( + base_color: vec3, + metallic: f32, + specular_transmission: f32, + diffuse_transmission: f32 +) -> vec3 { + return base_color * (1.0 - metallic) * (1.0 - specular_transmission) * + (1.0 - diffuse_transmission); +} + +// Remapping [0,1] reflectance to F0 +// See https://google.github.io/filament/Filament.html#materialsystem/parameterization/remapping +fn calculate_F0(base_color: vec3, metallic: f32, reflectance: f32) -> vec3 { + return 0.16 * reflectance * reflectance * (1.0 - metallic) + base_color * metallic; +} + +#ifndef PREPASS_FRAGMENT +fn apply_pbr_lighting( + in: pbr_types::PbrInput, +) -> vec4 { + var output_color: vec4 = in.material.base_color; + + let emissive = in.material.emissive; + + // calculate non-linear roughness from linear perceptualRoughness + let metallic = in.material.metallic; + let perceptual_roughness = in.material.perceptual_roughness; + let roughness = lighting::perceptualRoughnessToRoughness(perceptual_roughness); + let ior = in.material.ior; + let thickness = in.material.thickness; + let reflectance = in.material.reflectance; + let diffuse_transmission = in.material.diffuse_transmission; + let specular_transmission = in.material.specular_transmission; + + let specular_transmissive_color = specular_transmission * in.material.base_color.rgb; + + let diffuse_occlusion = in.diffuse_occlusion; + let specular_occlusion = in.specular_occlusion; + + // Neubelt and Pettineo 2013, "Crafting a Next-gen Material Pipeline for The Order: 1886" + let NdotV = max(dot(in.N, in.V), 0.0001); + let R = reflect(-in.V, in.N); + +#ifdef STANDARD_MATERIAL_CLEARCOAT + // Do the above calculations again for the clearcoat layer. Remember that + // the clearcoat can have its own roughness and its own normal. + let clearcoat = in.material.clearcoat; + let clearcoat_perceptual_roughness = in.material.clearcoat_perceptual_roughness; + let clearcoat_roughness = lighting::perceptualRoughnessToRoughness(clearcoat_perceptual_roughness); + let clearcoat_N = in.clearcoat_N; + let clearcoat_NdotV = max(dot(clearcoat_N, in.V), 0.0001); + let clearcoat_R = reflect(-in.V, clearcoat_N); +#endif // STANDARD_MATERIAL_CLEARCOAT + + let diffuse_color = calculate_diffuse_color( + output_color.rgb, + metallic, + specular_transmission, + diffuse_transmission + ); + + // Diffuse transmissive strength is inversely related to metallicity and specular transmission, but directly related to diffuse transmission + let diffuse_transmissive_color = output_color.rgb * (1.0 - metallic) * (1.0 - specular_transmission) * diffuse_transmission; + + // Calculate the world position of the second Lambertian lobe used for diffuse transmission, by subtracting material thickness + let diffuse_transmissive_lobe_world_position = in.world_position - vec4(in.world_normal, 0.0) * thickness; + + let F0 = calculate_F0(output_color.rgb, metallic, reflectance); + let F_ab = lighting::F_AB(perceptual_roughness, NdotV); + + var direct_light: vec3 = vec3(0.0); + + // Transmitted Light (Specular and Diffuse) + var transmitted_light: vec3 = vec3(0.0); + + // Pack all the values into a structure. + var lighting_input: lighting::LightingInput; + lighting_input.layers[LAYER_BASE].NdotV = NdotV; + lighting_input.layers[LAYER_BASE].N = in.N; + lighting_input.layers[LAYER_BASE].R = R; + lighting_input.layers[LAYER_BASE].perceptual_roughness = perceptual_roughness; + lighting_input.layers[LAYER_BASE].roughness = roughness; + lighting_input.P = in.world_position.xyz; + lighting_input.V = in.V; + lighting_input.diffuse_color = diffuse_color; + lighting_input.F0_ = F0; + lighting_input.F_ab = F_ab; +#ifdef STANDARD_MATERIAL_CLEARCOAT + lighting_input.layers[LAYER_CLEARCOAT].NdotV = clearcoat_NdotV; + lighting_input.layers[LAYER_CLEARCOAT].N = clearcoat_N; + lighting_input.layers[LAYER_CLEARCOAT].R = clearcoat_R; + lighting_input.layers[LAYER_CLEARCOAT].perceptual_roughness = clearcoat_perceptual_roughness; + lighting_input.layers[LAYER_CLEARCOAT].roughness = clearcoat_roughness; + lighting_input.clearcoat_strength = clearcoat; +#endif // STANDARD_MATERIAL_CLEARCOAT +#ifdef STANDARD_MATERIAL_ANISOTROPY + lighting_input.anisotropy = in.anisotropy_strength; + lighting_input.Ta = in.anisotropy_T; + lighting_input.Ba = in.anisotropy_B; +#endif // STANDARD_MATERIAL_ANISOTROPY + + // And do the same for transmissive if we need to. +#ifdef STANDARD_MATERIAL_DIFFUSE_TRANSMISSION + var transmissive_lighting_input: lighting::LightingInput; + transmissive_lighting_input.layers[LAYER_BASE].NdotV = 1.0; + transmissive_lighting_input.layers[LAYER_BASE].N = -in.N; + transmissive_lighting_input.layers[LAYER_BASE].R = vec3(0.0); + transmissive_lighting_input.layers[LAYER_BASE].perceptual_roughness = 1.0; + transmissive_lighting_input.layers[LAYER_BASE].roughness = 1.0; + transmissive_lighting_input.P = diffuse_transmissive_lobe_world_position.xyz; + transmissive_lighting_input.V = -in.V; + transmissive_lighting_input.diffuse_color = diffuse_transmissive_color; + transmissive_lighting_input.F0_ = vec3(0.0); + transmissive_lighting_input.F_ab = vec2(0.1); +#ifdef STANDARD_MATERIAL_CLEARCOAT + transmissive_lighting_input.layers[LAYER_CLEARCOAT].NdotV = 0.0; + transmissive_lighting_input.layers[LAYER_CLEARCOAT].N = vec3(0.0); + transmissive_lighting_input.layers[LAYER_CLEARCOAT].R = vec3(0.0); + transmissive_lighting_input.layers[LAYER_CLEARCOAT].perceptual_roughness = 0.0; + transmissive_lighting_input.layers[LAYER_CLEARCOAT].roughness = 0.0; + transmissive_lighting_input.clearcoat_strength = 0.0; +#endif // STANDARD_MATERIAL_CLEARCOAT +#ifdef STANDARD_MATERIAL_ANISOTROPY + lighting_input.anisotropy = in.anisotropy_strength; + lighting_input.Ta = in.anisotropy_T; + lighting_input.Ba = in.anisotropy_B; +#endif // STANDARD_MATERIAL_ANISOTROPY +#endif // STANDARD_MATERIAL_DIFFUSE_TRANSMISSION + + let view_z = dot(vec4( + view_bindings::view.view_from_world[0].z, + view_bindings::view.view_from_world[1].z, + view_bindings::view.view_from_world[2].z, + view_bindings::view.view_from_world[3].z + ), in.world_position); + let cluster_index = clustering::fragment_cluster_index(in.frag_coord.xy, view_z, in.is_orthographic); + let offset_and_counts = clustering::unpack_offset_and_counts(cluster_index); + + // Point lights (direct) + for (var i: u32 = offset_and_counts[0]; i < offset_and_counts[0] + offset_and_counts[1]; i = i + 1u) { + let light_id = clustering::get_clusterable_object_id(i); + var shadow: f32 = 1.0; + if ((in.flags & MESH_FLAGS_SHADOW_RECEIVER_BIT) != 0u + && (view_bindings::clusterable_objects.data[light_id].flags & mesh_view_types::POINT_LIGHT_FLAGS_SHADOWS_ENABLED_BIT) != 0u) { + shadow = shadows::fetch_point_shadow(light_id, in.world_position, in.world_normal); + } + + let light_contrib = lighting::point_light(light_id, &lighting_input); + direct_light += light_contrib * shadow; + +#ifdef STANDARD_MATERIAL_DIFFUSE_TRANSMISSION + // NOTE: We use the diffuse transmissive color, the second Lambertian lobe's calculated + // world position, inverted normal and view vectors, and the following simplified + // values for a fully diffuse transmitted light contribution approximation: + // + // roughness = 1.0; + // NdotV = 1.0; + // R = vec3(0.0) // doesn't really matter + // F_ab = vec2(0.1) + // F0 = vec3(0.0) + var transmitted_shadow: f32 = 1.0; + if ((in.flags & (MESH_FLAGS_SHADOW_RECEIVER_BIT | MESH_FLAGS_TRANSMITTED_SHADOW_RECEIVER_BIT)) == (MESH_FLAGS_SHADOW_RECEIVER_BIT | MESH_FLAGS_TRANSMITTED_SHADOW_RECEIVER_BIT) + && (view_bindings::clusterable_objects.data[light_id].flags & mesh_view_types::POINT_LIGHT_FLAGS_SHADOWS_ENABLED_BIT) != 0u) { + transmitted_shadow = shadows::fetch_point_shadow(light_id, diffuse_transmissive_lobe_world_position, -in.world_normal); + } + + let transmitted_light_contrib = + lighting::point_light(light_id, &transmissive_lighting_input); + transmitted_light += transmitted_light_contrib * transmitted_shadow; +#endif + } + + // Spot lights (direct) + for (var i: u32 = offset_and_counts[0] + offset_and_counts[1]; i < offset_and_counts[0] + offset_and_counts[1] + offset_and_counts[2]; i = i + 1u) { + let light_id = clustering::get_clusterable_object_id(i); + + var shadow: f32 = 1.0; + if ((in.flags & MESH_FLAGS_SHADOW_RECEIVER_BIT) != 0u + && (view_bindings::clusterable_objects.data[light_id].flags & mesh_view_types::POINT_LIGHT_FLAGS_SHADOWS_ENABLED_BIT) != 0u) { + shadow = shadows::fetch_spot_shadow(light_id, in.world_position, in.world_normal); + } + + let light_contrib = lighting::spot_light(light_id, &lighting_input); + direct_light += light_contrib * shadow; + +#ifdef STANDARD_MATERIAL_DIFFUSE_TRANSMISSION + // NOTE: We use the diffuse transmissive color, the second Lambertian lobe's calculated + // world position, inverted normal and view vectors, and the following simplified + // values for a fully diffuse transmitted light contribution approximation: + // + // roughness = 1.0; + // NdotV = 1.0; + // R = vec3(0.0) // doesn't really matter + // F_ab = vec2(0.1) + // F0 = vec3(0.0) + var transmitted_shadow: f32 = 1.0; + if ((in.flags & (MESH_FLAGS_SHADOW_RECEIVER_BIT | MESH_FLAGS_TRANSMITTED_SHADOW_RECEIVER_BIT)) == (MESH_FLAGS_SHADOW_RECEIVER_BIT | MESH_FLAGS_TRANSMITTED_SHADOW_RECEIVER_BIT) + && (view_bindings::clusterable_objects.data[light_id].flags & mesh_view_types::POINT_LIGHT_FLAGS_SHADOWS_ENABLED_BIT) != 0u) { + transmitted_shadow = shadows::fetch_spot_shadow(light_id, diffuse_transmissive_lobe_world_position, -in.world_normal); + } + + let transmitted_light_contrib = + lighting::spot_light(light_id, &transmissive_lighting_input); + transmitted_light += transmitted_light_contrib * transmitted_shadow; +#endif + } + + // directional lights (direct) + let n_directional_lights = view_bindings::lights.n_directional_lights; + for (var i: u32 = 0u; i < n_directional_lights; i = i + 1u) { + // check if this light should be skipped, which occurs if this light does not intersect with the view + // note point and spot lights aren't skippable, as the relevant lights are filtered in `assign_lights_to_clusters` + let light = &view_bindings::lights.directional_lights[i]; + if (*light).skip != 0u { + continue; + } + + var shadow: f32 = 1.0; + if ((in.flags & MESH_FLAGS_SHADOW_RECEIVER_BIT) != 0u + && (view_bindings::lights.directional_lights[i].flags & mesh_view_types::DIRECTIONAL_LIGHT_FLAGS_SHADOWS_ENABLED_BIT) != 0u) { + shadow = shadows::fetch_directional_shadow(i, in.world_position, in.world_normal, view_z); + } + + var light_contrib = lighting::directional_light(i, &lighting_input); + +#ifdef DIRECTIONAL_LIGHT_SHADOW_MAP_DEBUG_CASCADES + light_contrib = shadows::cascade_debug_visualization(light_contrib, i, view_z); +#endif + direct_light += light_contrib * shadow; + +#ifdef STANDARD_MATERIAL_DIFFUSE_TRANSMISSION + // NOTE: We use the diffuse transmissive color, the second Lambertian lobe's calculated + // world position, inverted normal and view vectors, and the following simplified + // values for a fully diffuse transmitted light contribution approximation: + // + // roughness = 1.0; + // NdotV = 1.0; + // R = vec3(0.0) // doesn't really matter + // F_ab = vec2(0.1) + // F0 = vec3(0.0) + var transmitted_shadow: f32 = 1.0; + if ((in.flags & (MESH_FLAGS_SHADOW_RECEIVER_BIT | MESH_FLAGS_TRANSMITTED_SHADOW_RECEIVER_BIT)) == (MESH_FLAGS_SHADOW_RECEIVER_BIT | MESH_FLAGS_TRANSMITTED_SHADOW_RECEIVER_BIT) + && (view_bindings::lights.directional_lights[i].flags & mesh_view_types::DIRECTIONAL_LIGHT_FLAGS_SHADOWS_ENABLED_BIT) != 0u) { + transmitted_shadow = shadows::fetch_directional_shadow(i, diffuse_transmissive_lobe_world_position, -in.world_normal, view_z); + } + + let transmitted_light_contrib = + lighting::directional_light(i, &transmissive_lighting_input); + transmitted_light += transmitted_light_contrib * transmitted_shadow; +#endif + } + +#ifdef STANDARD_MATERIAL_DIFFUSE_TRANSMISSION + // NOTE: We use the diffuse transmissive color, the second Lambertian lobe's calculated + // world position, inverted normal and view vectors, and the following simplified + // values for a fully diffuse transmitted light contribution approximation: + // + // perceptual_roughness = 1.0; + // NdotV = 1.0; + // F0 = vec3(0.0) + // diffuse_occlusion = vec3(1.0) + transmitted_light += ambient::ambient_light(diffuse_transmissive_lobe_world_position, -in.N, -in.V, 1.0, diffuse_transmissive_color, vec3(0.0), 1.0, vec3(1.0)); +#endif + + // Diffuse indirect lighting can come from a variety of sources. The + // priority goes like this: + // + // 1. Lightmap (highest) + // 2. Irradiance volume + // 3. Environment map (lowest) + // + // When we find a source of diffuse indirect lighting, we stop accumulating + // any more diffuse indirect light. This avoids double-counting if, for + // example, both lightmaps and irradiance volumes are present. + + var indirect_light = vec3(0.0f); + +#ifdef LIGHTMAP + if (all(indirect_light == vec3(0.0f))) { + indirect_light += in.lightmap_light * diffuse_color; + } +#endif + +#ifdef IRRADIANCE_VOLUME { + // Irradiance volume light (indirect) + if (all(indirect_light == vec3(0.0f))) { + let irradiance_volume_light = irradiance_volume::irradiance_volume_light( + in.world_position.xyz, in.N); + indirect_light += irradiance_volume_light * diffuse_color * diffuse_occlusion; + } +#endif + + // Environment map light (indirect) +#ifdef ENVIRONMENT_MAP + +#ifdef STANDARD_MATERIAL_ANISOTROPY + var bent_normal_lighting_input = lighting_input; + bend_normal_for_anisotropy(&bent_normal_lighting_input); + let environment_map_lighting_input = &bent_normal_lighting_input; +#else // STANDARD_MATERIAL_ANISOTROPY + let environment_map_lighting_input = &lighting_input; +#endif // STANDARD_MATERIAL_ANISOTROPY + + let environment_light = environment_map::environment_map_light( + environment_map_lighting_input, + any(indirect_light != vec3(0.0f)) + ); + + // If screen space reflections are going to be used for this material, don't + // accumulate environment map light yet. The SSR shader will do it. +#ifdef SCREEN_SPACE_REFLECTIONS + let use_ssr = perceptual_roughness <= + view_bindings::ssr_settings.perceptual_roughness_threshold; +#else // SCREEN_SPACE_REFLECTIONS + let use_ssr = false; +#endif // SCREEN_SPACE_REFLECTIONS + + if (!use_ssr) { + let environment_light = environment_map::environment_map_light( + &lighting_input, + any(indirect_light != vec3(0.0f)) + ); + + indirect_light += environment_light.diffuse * diffuse_occlusion + + environment_light.specular * specular_occlusion; + } + +#endif // ENVIRONMENT_MAP + + // Ambient light (indirect) + indirect_light += ambient::ambient_light(in.world_position, in.N, in.V, NdotV, diffuse_color, F0, perceptual_roughness, diffuse_occlusion); + + // we'll use the specular component of the transmitted environment + // light in the call to `specular_transmissive_light()` below + var specular_transmitted_environment_light = vec3(0.0); + +#ifdef ENVIRONMENT_MAP + +#ifdef STANDARD_MATERIAL_DIFFUSE_OR_SPECULAR_TRANSMISSION + // NOTE: We use the diffuse transmissive color, inverted normal and view vectors, + // and the following simplified values for the transmitted environment light contribution + // approximation: + // + // diffuse_color = vec3(1.0) // later we use `diffuse_transmissive_color` and `specular_transmissive_color` + // NdotV = 1.0; + // R = T // see definition below + // F0 = vec3(1.0) + // diffuse_occlusion = 1.0 + // + // (This one is slightly different from the other light types above, because the environment + // map light returns both diffuse and specular components separately, and we want to use both) + + let T = -normalize( + in.V + // start with view vector at entry point + refract(in.V, -in.N, 1.0 / ior) * thickness // add refracted vector scaled by thickness, towards exit point + ); // normalize to find exit point view vector + + var transmissive_environment_light_input: lighting::LightingInput; + transmissive_environment_light_input.diffuse_color = vec3(1.0); + transmissive_environment_light_input.layers[LAYER_BASE].NdotV = 1.0; + transmissive_environment_light_input.P = in.world_position.xyz; + transmissive_environment_light_input.layers[LAYER_BASE].N = -in.N; + transmissive_environment_light_input.V = in.V; + transmissive_environment_light_input.layers[LAYER_BASE].R = T; + transmissive_environment_light_input.layers[LAYER_BASE].perceptual_roughness = perceptual_roughness; + transmissive_environment_light_input.layers[LAYER_BASE].roughness = roughness; + transmissive_environment_light_input.F0_ = vec3(1.0); + transmissive_environment_light_input.F_ab = vec2(0.1); +#ifdef STANDARD_MATERIAL_CLEARCOAT + // No clearcoat. + transmissive_environment_light_input.clearcoat_strength = 0.0; + transmissive_environment_light_input.layers[LAYER_CLEARCOAT].NdotV = 0.0; + transmissive_environment_light_input.layers[LAYER_CLEARCOAT].N = in.N; + transmissive_environment_light_input.layers[LAYER_CLEARCOAT].R = vec3(0.0); + transmissive_environment_light_input.layers[LAYER_CLEARCOAT].perceptual_roughness = 0.0; + transmissive_environment_light_input.layers[LAYER_CLEARCOAT].roughness = 0.0; +#endif // STANDARD_MATERIAL_CLEARCOAT + + let transmitted_environment_light = + environment_map::environment_map_light(&transmissive_environment_light_input, false); + +#ifdef STANDARD_MATERIAL_DIFFUSE_TRANSMISSION + transmitted_light += transmitted_environment_light.diffuse * diffuse_transmissive_color; +#endif // STANDARD_MATERIAL_DIFFUSE_TRANSMISSION +#ifdef STANDARD_MATERIAL_SPECULAR_TRANSMISSION + specular_transmitted_environment_light = transmitted_environment_light.specular * specular_transmissive_color; +#endif // STANDARD_MATERIAL_SPECULAR_TRANSMISSION + +#endif // STANDARD_MATERIAL_SPECULAR_OR_DIFFUSE_TRANSMISSION + +#endif // ENVIRONMENT_MAP + + var emissive_light = emissive.rgb * output_color.a; + + // "The clearcoat layer is on top of emission in the layering stack. + // Consequently, the emission is darkened by the Fresnel term." + // + // +#ifdef STANDARD_MATERIAL_CLEARCOAT + emissive_light = emissive_light * (0.04 + (1.0 - 0.04) * pow(1.0 - clearcoat_NdotV, 5.0)); +#endif + + emissive_light = emissive_light * mix(1.0, view_bindings::view.exposure, emissive.a); + +#ifdef STANDARD_MATERIAL_SPECULAR_TRANSMISSION + transmitted_light += transmission::specular_transmissive_light(in.world_position, in.frag_coord.xyz, view_z, in.N, in.V, F0, ior, thickness, perceptual_roughness, specular_transmissive_color, specular_transmitted_environment_light).rgb; + + if (in.material.flags & pbr_types::STANDARD_MATERIAL_FLAGS_ATTENUATION_ENABLED_BIT) != 0u { + // We reuse the `atmospheric_fog()` function here, as it's fundamentally + // equivalent to the attenuation that takes place inside the material volume, + // and will allow us to eventually hook up subsurface scattering more easily + var attenuation_fog: mesh_view_types::Fog; + attenuation_fog.base_color.a = 1.0; + attenuation_fog.be = pow(1.0 - in.material.attenuation_color.rgb, vec3(E)) / in.material.attenuation_distance; + // TODO: Add the subsurface scattering factor below + // attenuation_fog.bi = /* ... */ + transmitted_light = bevy_pbr::fog::atmospheric_fog( + attenuation_fog, vec4(transmitted_light, 1.0), thickness, + vec3(0.0) // TODO: Pass in (pre-attenuated) scattered light contribution here + ).rgb; + } +#endif + + // Total light + output_color = vec4( + (view_bindings::view.exposure * (transmitted_light + direct_light + indirect_light)) + emissive_light, + output_color.a + ); + + output_color = clustering::cluster_debug_visualization( + output_color, + view_z, + in.is_orthographic, + offset_and_counts, + cluster_index, + ); + + return output_color; +} +#endif // PREPASS_FRAGMENT + +fn apply_fog(fog_params: mesh_view_types::Fog, input_color: vec4, fragment_world_position: vec3, view_world_position: vec3) -> vec4 { + let view_to_world = fragment_world_position.xyz - view_world_position.xyz; + + // `length()` is used here instead of just `view_to_world.z` since that produces more + // high quality results, especially for denser/smaller fogs. we get a "curved" + // fog shape that remains consistent with camera rotation, instead of a "linear" + // fog shape that looks a bit fake + let distance = length(view_to_world); + + var scattering = vec3(0.0); + if fog_params.directional_light_color.a > 0.0 { + let view_to_world_normalized = view_to_world / distance; + let n_directional_lights = view_bindings::lights.n_directional_lights; + for (var i: u32 = 0u; i < n_directional_lights; i = i + 1u) { + let light = view_bindings::lights.directional_lights[i]; + scattering += pow( + max( + dot(view_to_world_normalized, light.direction_to_light), + 0.0 + ), + fog_params.directional_light_exponent + ) * light.color.rgb * view_bindings::view.exposure; + } + } + + if fog_params.mode == mesh_view_types::FOG_MODE_LINEAR { + return bevy_pbr::fog::linear_fog(fog_params, input_color, distance, scattering); + } else if fog_params.mode == mesh_view_types::FOG_MODE_EXPONENTIAL { + return bevy_pbr::fog::exponential_fog(fog_params, input_color, distance, scattering); + } else if fog_params.mode == mesh_view_types::FOG_MODE_EXPONENTIAL_SQUARED { + return bevy_pbr::fog::exponential_squared_fog(fog_params, input_color, distance, scattering); + } else if fog_params.mode == mesh_view_types::FOG_MODE_ATMOSPHERIC { + return bevy_pbr::fog::atmospheric_fog(fog_params, input_color, distance, scattering); + } else { + return input_color; + } +} + +#ifdef PREMULTIPLY_ALPHA +fn premultiply_alpha(standard_material_flags: u32, color: vec4) -> vec4 { +// `Blend`, `Premultiplied` and `Alpha` all share the same `BlendState`. Depending +// on the alpha mode, we premultiply the color channels by the alpha channel value, +// (and also optionally replace the alpha value with 0.0) so that the result produces +// the desired blend mode when sent to the blending operation. +#ifdef BLEND_PREMULTIPLIED_ALPHA + // For `BlendState::PREMULTIPLIED_ALPHA_BLENDING` the blend function is: + // + // result = 1 * src_color + (1 - src_alpha) * dst_color + let alpha_mode = standard_material_flags & pbr_types::STANDARD_MATERIAL_FLAGS_ALPHA_MODE_RESERVED_BITS; + if alpha_mode == pbr_types::STANDARD_MATERIAL_FLAGS_ALPHA_MODE_ADD { + // Here, we premultiply `src_color` by `src_alpha`, and replace `src_alpha` with 0.0: + // + // src_color *= src_alpha + // src_alpha = 0.0 + // + // We end up with: + // + // result = 1 * (src_alpha * src_color) + (1 - 0) * dst_color + // result = src_alpha * src_color + 1 * dst_color + // + // Which is the blend operation for additive blending + return vec4(color.rgb * color.a, 0.0); + } else { + // Here, we don't do anything, so that we get premultiplied alpha blending. (As expected) + return color.rgba; + } +#endif +// `Multiply` uses its own `BlendState`, but we still need to premultiply here in the +// shader so that we get correct results as we tweak the alpha channel +#ifdef BLEND_MULTIPLY + // The blend function is: + // + // result = dst_color * src_color + (1 - src_alpha) * dst_color + // + // We premultiply `src_color` by `src_alpha`: + // + // src_color *= src_alpha + // + // We end up with: + // + // result = dst_color * (src_color * src_alpha) + (1 - src_alpha) * dst_color + // result = src_alpha * (src_color * dst_color) + (1 - src_alpha) * dst_color + // + // Which is the blend operation for multiplicative blending with arbitrary mixing + // controlled by the source alpha channel + return vec4(color.rgb * color.a, color.a); +#endif +} +#endif + +// fog, alpha premultiply +// for non-hdr cameras, tonemapping and debanding +fn main_pass_post_lighting_processing( + pbr_input: pbr_types::PbrInput, + input_color: vec4, +) -> vec4 { + var output_color = input_color; + + // fog + if (view_bindings::fog.mode != mesh_view_types::FOG_MODE_OFF && (pbr_input.material.flags & pbr_types::STANDARD_MATERIAL_FLAGS_FOG_ENABLED_BIT) != 0u) { + output_color = apply_fog(view_bindings::fog, output_color, pbr_input.world_position.xyz, view_bindings::view.world_position.xyz); + } + +#ifdef TONEMAP_IN_SHADER + output_color = tone_mapping(output_color, view_bindings::view.color_grading); +#ifdef DEBAND_DITHER + var output_rgb = output_color.rgb; + output_rgb = powsafe(output_rgb, 1.0 / 2.2); + output_rgb += screen_space_dither(pbr_input.frag_coord.xy); + // This conversion back to linear space is required because our output texture format is + // SRGB; the GPU will assume our output is linear and will apply an SRGB conversion. + output_rgb = powsafe(output_rgb, 2.2); + output_color = vec4(output_rgb, output_color.a); +#endif +#endif +#ifdef PREMULTIPLY_ALPHA + output_color = premultiply_alpha(pbr_input.material.flags, output_color); +#endif + return output_color; +} + +``` + +### bevy_shaders/environment_map + +```rust +#define_import_path bevy_pbr::environment_map + +#import bevy_pbr::light_probe::query_light_probe +#import bevy_pbr::mesh_view_bindings as bindings +#import bevy_pbr::mesh_view_bindings::light_probes +#import bevy_pbr::lighting::{ + F_Schlick_vec, LayerLightingInput, LightingInput, LAYER_BASE, LAYER_CLEARCOAT +} + +struct EnvironmentMapLight { + diffuse: vec3, + specular: vec3, +}; + +struct EnvironmentMapRadiances { + irradiance: vec3, + radiance: vec3, +} + +// Define two versions of this function, one for the case in which there are +// multiple light probes and one for the case in which only the view light probe +// is present. + +#ifdef MULTIPLE_LIGHT_PROBES_IN_ARRAY + +fn compute_radiances( + input: ptr, + layer: u32, + world_position: vec3, + found_diffuse_indirect: bool, +) -> EnvironmentMapRadiances { + // Unpack. + let perceptual_roughness = (*input).layers[layer].perceptual_roughness; + let N = (*input).layers[layer].N; + let R = (*input).layers[layer].R; + + var radiances: EnvironmentMapRadiances; + + // Search for a reflection probe that contains the fragment. + var query_result = query_light_probe(world_position, /*is_irradiance_volume=*/ false); + + // If we didn't find a reflection probe, use the view environment map if applicable. + if (query_result.texture_index < 0) { + query_result.texture_index = light_probes.view_cubemap_index; + query_result.intensity = light_probes.intensity_for_view; + } + + // If there's no cubemap, bail out. + if (query_result.texture_index < 0) { + radiances.irradiance = vec3(0.0); + radiances.radiance = vec3(0.0); + return radiances; + } + + // Split-sum approximation for image based lighting: https://cdn2.unrealengine.com/Resources/files/2013SiggraphPresentationsNotes-26915738.pdf + let radiance_level = perceptual_roughness * f32(textureNumLevels( + bindings::specular_environment_maps[query_result.texture_index]) - 1u); + + if (!found_diffuse_indirect) { + radiances.irradiance = textureSampleLevel( + bindings::diffuse_environment_maps[query_result.texture_index], + bindings::environment_map_sampler, + vec3(N.xy, -N.z), + 0.0).rgb * query_result.intensity; + } + + radiances.radiance = textureSampleLevel( + bindings::specular_environment_maps[query_result.texture_index], + bindings::environment_map_sampler, + vec3(R.xy, -R.z), + radiance_level).rgb * query_result.intensity; + + return radiances; +} + +#else // MULTIPLE_LIGHT_PROBES_IN_ARRAY + +fn compute_radiances( + input: ptr, + layer: u32, + world_position: vec3, + found_diffuse_indirect: bool, +) -> EnvironmentMapRadiances { + // Unpack. + let perceptual_roughness = (*input).layers[layer].perceptual_roughness; + let N = (*input).layers[layer].N; + let R = (*input).layers[layer].R; + + var radiances: EnvironmentMapRadiances; + + if (light_probes.view_cubemap_index < 0) { + radiances.irradiance = vec3(0.0); + radiances.radiance = vec3(0.0); + return radiances; + } + + // Split-sum approximation for image based lighting: https://cdn2.unrealengine.com/Resources/files/2013SiggraphPresentationsNotes-26915738.pdf + // Technically we could use textureNumLevels(specular_environment_map) - 1 here, but we use a uniform + // because textureNumLevels() does not work on WebGL2 + let radiance_level = perceptual_roughness * f32(light_probes.smallest_specular_mip_level_for_view); + + let intensity = light_probes.intensity_for_view; + + if (!found_diffuse_indirect) { + radiances.irradiance = textureSampleLevel( + bindings::diffuse_environment_map, + bindings::environment_map_sampler, + vec3(N.xy, -N.z), + 0.0).rgb * intensity; + } + + radiances.radiance = textureSampleLevel( + bindings::specular_environment_map, + bindings::environment_map_sampler, + vec3(R.xy, -R.z), + radiance_level).rgb * intensity; + + return radiances; +} + +#endif // MULTIPLE_LIGHT_PROBES_IN_ARRAY + +#ifdef STANDARD_MATERIAL_CLEARCOAT + +// Adds the environment map light from the clearcoat layer to that of the base +// layer. +fn environment_map_light_clearcoat( + out: ptr, + input: ptr, + found_diffuse_indirect: bool, +) { + // Unpack. + let world_position = (*input).P; + let clearcoat_NdotV = (*input).layers[LAYER_CLEARCOAT].NdotV; + let clearcoat_strength = (*input).clearcoat_strength; + + // Calculate the Fresnel term `Fc` for the clearcoat layer. + // 0.04 is a hardcoded value for F0 from the Filament spec. + let clearcoat_F0 = vec3(0.04); + let Fc = F_Schlick_vec(clearcoat_F0, 1.0, clearcoat_NdotV) * clearcoat_strength; + let inv_Fc = 1.0 - Fc; + + let clearcoat_radiances = compute_radiances( + input, LAYER_CLEARCOAT, world_position, found_diffuse_indirect); + + // Composite the clearcoat layer on top of the existing one. + // These formulas are from Filament: + // + (*out).diffuse *= inv_Fc; + (*out).specular = (*out).specular * inv_Fc * inv_Fc + clearcoat_radiances.radiance * Fc; +} + +#endif // STANDARD_MATERIAL_CLEARCOAT + +fn environment_map_light( + input: ptr, + found_diffuse_indirect: bool, +) -> EnvironmentMapLight { + // Unpack. + let roughness = (*input).layers[LAYER_BASE].roughness; + let diffuse_color = (*input).diffuse_color; + let NdotV = (*input).layers[LAYER_BASE].NdotV; + let F_ab = (*input).F_ab; + let F0 = (*input).F0_; + let world_position = (*input).P; + + var out: EnvironmentMapLight; + + let radiances = compute_radiances(input, LAYER_BASE, world_position, found_diffuse_indirect); + if (all(radiances.irradiance == vec3(0.0)) && all(radiances.radiance == vec3(0.0))) { + out.diffuse = vec3(0.0); + out.specular = vec3(0.0); + return out; + } + + // No real world material has specular values under 0.02, so we use this range as a + // "pre-baked specular occlusion" that extinguishes the fresnel term, for artistic control. + // See: https://google.github.io/filament/Filament.html#specularocclusion + let specular_occlusion = saturate(dot(F0, vec3(50.0 * 0.33))); + + // Multiscattering approximation: https://www.jcgt.org/published/0008/01/03/paper.pdf + // Useful reference: https://bruop.github.io/ibl + let Fr = max(vec3(1.0 - roughness), F0) - F0; + let kS = F0 + Fr * pow(1.0 - NdotV, 5.0); + let Ess = F_ab.x + F_ab.y; + let FssEss = kS * Ess * specular_occlusion; + let Ems = 1.0 - Ess; + let Favg = F0 + (1.0 - F0) / 21.0; + let Fms = FssEss * Favg / (1.0 - Ems * Favg); + let FmsEms = Fms * Ems; + let Edss = 1.0 - (FssEss + FmsEms); + let kD = diffuse_color * Edss; + + if (!found_diffuse_indirect) { + out.diffuse = (FmsEms + kD) * radiances.irradiance; + } else { + out.diffuse = vec3(0.0); + } + + out.specular = FssEss * radiances.radiance; + +#ifdef STANDARD_MATERIAL_CLEARCOAT + environment_map_light_clearcoat(&out, input, found_diffuse_indirect); +#endif // STANDARD_MATERIAL_CLEARCOAT + + return out; +} + +``` + +### bevy_shaders/morph + +```rust +#define_import_path bevy_pbr::morph + +#ifdef MORPH_TARGETS + +#import bevy_pbr::mesh_types::MorphWeights; + +@group(1) @binding(2) var morph_weights: MorphWeights; +@group(1) @binding(3) var morph_targets: texture_3d; +@group(1) @binding(7) var prev_morph_weights: MorphWeights; + +// NOTE: Those are the "hardcoded" values found in `MorphAttributes` struct +// in crates/bevy_render/src/mesh/morph/visitors.rs +// In an ideal world, the offsets are established dynamically and passed as #defines +// to the shader, but it's out of scope for the initial implementation of morph targets. +const position_offset: u32 = 0u; +const normal_offset: u32 = 3u; +const tangent_offset: u32 = 6u; +const total_component_count: u32 = 9u; + +fn layer_count() -> u32 { + let dimensions = textureDimensions(morph_targets); + return u32(dimensions.z); +} +fn component_texture_coord(vertex_index: u32, component_offset: u32) -> vec2 { + let width = u32(textureDimensions(morph_targets).x); + let component_index = total_component_count * vertex_index + component_offset; + return vec2(component_index % width, component_index / width); +} +fn weight_at(weight_index: u32) -> f32 { + let i = weight_index; + return morph_weights.weights[i / 4u][i % 4u]; +} +fn prev_weight_at(weight_index: u32) -> f32 { + let i = weight_index; + return prev_morph_weights.weights[i / 4u][i % 4u]; +} +fn morph_pixel(vertex: u32, component: u32, weight: u32) -> f32 { + let coord = component_texture_coord(vertex, component); + // Due to https://gpuweb.github.io/gpuweb/wgsl/#texel-formats + // While the texture stores a f32, the textureLoad returns a vec4<>, where + // only the first component is set. + return textureLoad(morph_targets, vec3(coord, weight), 0).r; +} +fn morph(vertex_index: u32, component_offset: u32, weight_index: u32) -> vec3 { + return vec3( + morph_pixel(vertex_index, component_offset, weight_index), + morph_pixel(vertex_index, component_offset + 1u, weight_index), + morph_pixel(vertex_index, component_offset + 2u, weight_index), + ); +} + +#endif // MORPH_TARGETS + +``` + +### bevy_shaders/gtao + +```rust +// Ground Truth-based Ambient Occlusion (GTAO) +// Paper: https://www.activision.com/cdn/research/Practical_Real_Time_Strategies_for_Accurate_Indirect_Occlusion_NEW%20VERSION_COLOR.pdf +// Presentation: https://blog.selfshadow.com/publications/s2016-shading-course/activision/s2016_pbs_activision_occlusion.pdf + +// Source code heavily based on XeGTAO v1.30 from Intel +// https://github.com/GameTechDev/XeGTAO/blob/0d177ce06bfa642f64d8af4de1197ad1bcb862d4/Source/Rendering/Shaders/XeGTAO.hlsli + +#import bevy_pbr::gtao_utils::fast_acos + +#import bevy_render::{ + view::View, + globals::Globals, + maths::{PI, HALF_PI}, +} + +@group(0) @binding(0) var preprocessed_depth: texture_2d; +@group(0) @binding(1) var normals: texture_2d; +@group(0) @binding(2) var hilbert_index_lut: texture_2d; +@group(0) @binding(3) var ambient_occlusion: texture_storage_2d; +@group(0) @binding(4) var depth_differences: texture_storage_2d; +@group(0) @binding(5) var globals: Globals; +@group(1) @binding(0) var point_clamp_sampler: sampler; +@group(1) @binding(1) var view: View; + +fn load_noise(pixel_coordinates: vec2) -> vec2 { + var index = textureLoad(hilbert_index_lut, pixel_coordinates % 64, 0).r; + +#ifdef TEMPORAL_JITTER + index += 288u * (globals.frame_count % 64u); +#endif + + // R2 sequence - http://extremelearning.com.au/unreasonable-effectiveness-of-quasirandom-sequences + return fract(0.5 + f32(index) * vec2(0.75487766624669276005, 0.5698402909980532659114)); +} + +// Calculate differences in depth between neighbor pixels (later used by the spatial denoiser pass to preserve object edges) +fn calculate_neighboring_depth_differences(pixel_coordinates: vec2) -> f32 { + // Sample the pixel's depth and 4 depths around it + let uv = vec2(pixel_coordinates) / view.viewport.zw; + let depths_upper_left = textureGather(0, preprocessed_depth, point_clamp_sampler, uv); + let depths_bottom_right = textureGather(0, preprocessed_depth, point_clamp_sampler, uv, vec2(1i, 1i)); + let depth_center = depths_upper_left.y; + let depth_left = depths_upper_left.x; + let depth_top = depths_upper_left.z; + let depth_bottom = depths_bottom_right.x; + let depth_right = depths_bottom_right.z; + + // Calculate the depth differences (large differences represent object edges) + var edge_info = vec4(depth_left, depth_right, depth_top, depth_bottom) - depth_center; + let slope_left_right = (edge_info.y - edge_info.x) * 0.5; + let slope_top_bottom = (edge_info.w - edge_info.z) * 0.5; + let edge_info_slope_adjusted = edge_info + vec4(slope_left_right, -slope_left_right, slope_top_bottom, -slope_top_bottom); + edge_info = min(abs(edge_info), abs(edge_info_slope_adjusted)); + let bias = 0.25; // Using the bias and then saturating nudges the values a bit + let scale = depth_center * 0.011; // Weight the edges by their distance from the camera + edge_info = saturate((1.0 + bias) - edge_info / scale); // Apply the bias and scale, and invert edge_info so that small values become large, and vice versa + + // Pack the edge info into the texture + let edge_info_packed = vec4(pack4x8unorm(edge_info), 0u, 0u, 0u); + textureStore(depth_differences, pixel_coordinates, edge_info_packed); + + return depth_center; +} + +fn load_normal_view_space(uv: vec2) -> vec3 { + var world_normal = textureSampleLevel(normals, point_clamp_sampler, uv, 0.0).xyz; + world_normal = (world_normal * 2.0) - 1.0; + let view_from_world = mat3x3( + view.view_from_world[0].xyz, + view.view_from_world[1].xyz, + view.view_from_world[2].xyz, + ); + return view_from_world * world_normal; +} + +fn reconstruct_view_space_position(depth: f32, uv: vec2) -> vec3 { + let clip_xy = vec2(uv.x * 2.0 - 1.0, 1.0 - 2.0 * uv.y); + let t = view.view_from_clip * vec4(clip_xy, depth, 1.0); + let view_xyz = t.xyz / t.w; + return view_xyz; +} + +fn load_and_reconstruct_view_space_position(uv: vec2, sample_mip_level: f32) -> vec3 { + let depth = textureSampleLevel(preprocessed_depth, point_clamp_sampler, uv, sample_mip_level).r; + return reconstruct_view_space_position(depth, uv); +} + +@compute +@workgroup_size(8, 8, 1) +fn gtao(@builtin(global_invocation_id) global_id: vec3) { + let slice_count = f32(#SLICE_COUNT); + let samples_per_slice_side = f32(#SAMPLES_PER_SLICE_SIDE); + let effect_radius = 0.5 * 1.457; + let falloff_range = 0.615 * effect_radius; + let falloff_from = effect_radius * (1.0 - 0.615); + let falloff_mul = -1.0 / falloff_range; + let falloff_add = falloff_from / falloff_range + 1.0; + + let pixel_coordinates = vec2(global_id.xy); + let uv = (vec2(pixel_coordinates) + 0.5) / view.viewport.zw; + + var pixel_depth = calculate_neighboring_depth_differences(pixel_coordinates); + pixel_depth += 0.00001; // Avoid depth precision issues + + let pixel_position = reconstruct_view_space_position(pixel_depth, uv); + let pixel_normal = load_normal_view_space(uv); + let view_vec = normalize(-pixel_position); + + let noise = load_noise(pixel_coordinates); + let sample_scale = (-0.5 * effect_radius * view.clip_from_view[0][0]) / pixel_position.z; + + var visibility = 0.0; + for (var slice_t = 0.0; slice_t < slice_count; slice_t += 1.0) { + let slice = slice_t + noise.x; + let phi = (PI / slice_count) * slice; + let omega = vec2(cos(phi), sin(phi)); + + let direction = vec3(omega.xy, 0.0); + let orthographic_direction = direction - (dot(direction, view_vec) * view_vec); + let axis = cross(direction, view_vec); + let projected_normal = pixel_normal - axis * dot(pixel_normal, axis); + let projected_normal_length = length(projected_normal); + + let sign_norm = sign(dot(orthographic_direction, projected_normal)); + let cos_norm = saturate(dot(projected_normal, view_vec) / projected_normal_length); + let n = sign_norm * fast_acos(cos_norm); + + let min_cos_horizon_1 = cos(n + HALF_PI); + let min_cos_horizon_2 = cos(n - HALF_PI); + var cos_horizon_1 = min_cos_horizon_1; + var cos_horizon_2 = min_cos_horizon_2; + let sample_mul = vec2(omega.x, -omega.y) * sample_scale; + for (var sample_t = 0.0; sample_t < samples_per_slice_side; sample_t += 1.0) { + var sample_noise = (slice_t + sample_t * samples_per_slice_side) * 0.6180339887498948482; + sample_noise = fract(noise.y + sample_noise); + + var s = (sample_t + sample_noise) / samples_per_slice_side; + s *= s; // https://github.com/GameTechDev/XeGTAO#sample-distribution + let sample = s * sample_mul; + + // * view.viewport.zw gets us from [0, 1] to [0, viewport_size], which is needed for this to get the correct mip levels + let sample_mip_level = clamp(log2(length(sample * view.viewport.zw)) - 3.3, 0.0, 5.0); // https://github.com/GameTechDev/XeGTAO#memory-bandwidth-bottleneck + let sample_position_1 = load_and_reconstruct_view_space_position(uv + sample, sample_mip_level); + let sample_position_2 = load_and_reconstruct_view_space_position(uv - sample, sample_mip_level); + + let sample_difference_1 = sample_position_1 - pixel_position; + let sample_difference_2 = sample_position_2 - pixel_position; + let sample_distance_1 = length(sample_difference_1); + let sample_distance_2 = length(sample_difference_2); + var sample_cos_horizon_1 = dot(sample_difference_1 / sample_distance_1, view_vec); + var sample_cos_horizon_2 = dot(sample_difference_2 / sample_distance_2, view_vec); + + let weight_1 = saturate(sample_distance_1 * falloff_mul + falloff_add); + let weight_2 = saturate(sample_distance_2 * falloff_mul + falloff_add); + sample_cos_horizon_1 = mix(min_cos_horizon_1, sample_cos_horizon_1, weight_1); + sample_cos_horizon_2 = mix(min_cos_horizon_2, sample_cos_horizon_2, weight_2); + + cos_horizon_1 = max(cos_horizon_1, sample_cos_horizon_1); + cos_horizon_2 = max(cos_horizon_2, sample_cos_horizon_2); + } + + let horizon_1 = fast_acos(cos_horizon_1); + let horizon_2 = -fast_acos(cos_horizon_2); + let v1 = (cos_norm + 2.0 * horizon_1 * sin(n) - cos(2.0 * horizon_1 - n)) / 4.0; + let v2 = (cos_norm + 2.0 * horizon_2 * sin(n) - cos(2.0 * horizon_2 - n)) / 4.0; + visibility += projected_normal_length * (v1 + v2); + } + visibility /= slice_count; + visibility = clamp(visibility, 0.03, 1.0); + + textureStore(ambient_occlusion, pixel_coordinates, vec4(visibility, 0.0, 0.0, 0.0)); +} + +``` + +### bevy_shaders/shadow_sampling + +```rust +#define_import_path bevy_pbr::shadow_sampling + +#import bevy_pbr::{ + mesh_view_bindings as view_bindings, + utils::interleaved_gradient_noise, + utils, +} +#import bevy_render::maths::{orthonormalize, PI} + +// Do the lookup, using HW 2x2 PCF and comparison +fn sample_shadow_map_hardware(light_local: vec2, depth: f32, array_index: i32) -> f32 { +#ifdef NO_ARRAY_TEXTURES_SUPPORT + return textureSampleCompare( + view_bindings::directional_shadow_textures, + view_bindings::directional_shadow_textures_sampler, + light_local, + depth, + ); +#else + return textureSampleCompareLevel( + view_bindings::directional_shadow_textures, + view_bindings::directional_shadow_textures_sampler, + light_local, + array_index, + depth, + ); +#endif +} + +// Numbers determined by trial and error that gave nice results. +const SPOT_SHADOW_TEXEL_SIZE: f32 = 0.0134277345; +const POINT_SHADOW_SCALE: f32 = 0.003; +const POINT_SHADOW_TEMPORAL_OFFSET_SCALE: f32 = 0.5; + +// These are the standard MSAA sample point positions from D3D. They were chosen +// to get a reasonable distribution that's not too regular. +// +// https://learn.microsoft.com/en-us/windows/win32/api/d3d11/ne-d3d11-d3d11_standard_multisample_quality_levels?redirectedfrom=MSDN +const D3D_SAMPLE_POINT_POSITIONS: array, 8> = array( + vec2( 0.125, -0.375), + vec2(-0.125, 0.375), + vec2( 0.625, 0.125), + vec2(-0.375, -0.625), + vec2(-0.625, 0.625), + vec2(-0.875, -0.125), + vec2( 0.375, 0.875), + vec2( 0.875, -0.875), +); + +// And these are the coefficients corresponding to the probability distribution +// function of a 2D Gaussian lobe with zero mean and the identity covariance +// matrix at those points. +const D3D_SAMPLE_POINT_COEFFS: array = array( + 0.157112, + 0.157112, + 0.138651, + 0.130251, + 0.114946, + 0.114946, + 0.107982, + 0.079001, +); + +// https://web.archive.org/web/20230210095515/http://the-witness.net/news/2013/09/shadow-mapping-summary-part-1 +fn sample_shadow_map_castano_thirteen(light_local: vec2, depth: f32, array_index: i32) -> f32 { + let shadow_map_size = vec2(textureDimensions(view_bindings::directional_shadow_textures)); + let inv_shadow_map_size = 1.0 / shadow_map_size; + + let uv = light_local * shadow_map_size; + var base_uv = floor(uv + 0.5); + let s = (uv.x + 0.5 - base_uv.x); + let t = (uv.y + 0.5 - base_uv.y); + base_uv -= 0.5; + base_uv *= inv_shadow_map_size; + + let uw0 = (4.0 - 3.0 * s); + let uw1 = 7.0; + let uw2 = (1.0 + 3.0 * s); + + let u0 = (3.0 - 2.0 * s) / uw0 - 2.0; + let u1 = (3.0 + s) / uw1; + let u2 = s / uw2 + 2.0; + + let vw0 = (4.0 - 3.0 * t); + let vw1 = 7.0; + let vw2 = (1.0 + 3.0 * t); + + let v0 = (3.0 - 2.0 * t) / vw0 - 2.0; + let v1 = (3.0 + t) / vw1; + let v2 = t / vw2 + 2.0; + + var sum = 0.0; + + sum += uw0 * vw0 * sample_shadow_map_hardware(base_uv + (vec2(u0, v0) * inv_shadow_map_size), depth, array_index); + sum += uw1 * vw0 * sample_shadow_map_hardware(base_uv + (vec2(u1, v0) * inv_shadow_map_size), depth, array_index); + sum += uw2 * vw0 * sample_shadow_map_hardware(base_uv + (vec2(u2, v0) * inv_shadow_map_size), depth, array_index); + + sum += uw0 * vw1 * sample_shadow_map_hardware(base_uv + (vec2(u0, v1) * inv_shadow_map_size), depth, array_index); + sum += uw1 * vw1 * sample_shadow_map_hardware(base_uv + (vec2(u1, v1) * inv_shadow_map_size), depth, array_index); + sum += uw2 * vw1 * sample_shadow_map_hardware(base_uv + (vec2(u2, v1) * inv_shadow_map_size), depth, array_index); + + sum += uw0 * vw2 * sample_shadow_map_hardware(base_uv + (vec2(u0, v2) * inv_shadow_map_size), depth, array_index); + sum += uw1 * vw2 * sample_shadow_map_hardware(base_uv + (vec2(u1, v2) * inv_shadow_map_size), depth, array_index); + sum += uw2 * vw2 * sample_shadow_map_hardware(base_uv + (vec2(u2, v2) * inv_shadow_map_size), depth, array_index); + + return sum * (1.0 / 144.0); +} + +fn map(min1: f32, max1: f32, min2: f32, max2: f32, value: f32) -> f32 { + return min2 + (value - min1) * (max2 - min2) / (max1 - min1); +} + +// Creates a random rotation matrix using interleaved gradient noise. +// +// See: https://www.iryoku.com/next-generation-post-processing-in-call-of-duty-advanced-warfare/ +fn random_rotation_matrix(scale: vec2) -> mat2x2 { + let random_angle = 2.0 * PI * interleaved_gradient_noise( + scale, view_bindings::globals.frame_count); + let m = vec2(sin(random_angle), cos(random_angle)); + return mat2x2( + m.y, -m.x, + m.x, m.y + ); +} + +fn sample_shadow_map_jimenez_fourteen(light_local: vec2, depth: f32, array_index: i32, texel_size: f32) -> f32 { + let shadow_map_size = vec2(textureDimensions(view_bindings::directional_shadow_textures)); + let rotation_matrix = random_rotation_matrix(light_local * shadow_map_size); + + // Empirically chosen fudge factor to make PCF look better across different CSM cascades + let f = map(0.00390625, 0.022949219, 0.015, 0.035, texel_size); + let uv_offset_scale = f / (texel_size * shadow_map_size); + + // https://www.iryoku.com/next-generation-post-processing-in-call-of-duty-advanced-warfare (slides 120-135) + let sample_offset0 = (rotation_matrix * utils::SPIRAL_OFFSET_0_) * uv_offset_scale; + let sample_offset1 = (rotation_matrix * utils::SPIRAL_OFFSET_1_) * uv_offset_scale; + let sample_offset2 = (rotation_matrix * utils::SPIRAL_OFFSET_2_) * uv_offset_scale; + let sample_offset3 = (rotation_matrix * utils::SPIRAL_OFFSET_3_) * uv_offset_scale; + let sample_offset4 = (rotation_matrix * utils::SPIRAL_OFFSET_4_) * uv_offset_scale; + let sample_offset5 = (rotation_matrix * utils::SPIRAL_OFFSET_5_) * uv_offset_scale; + let sample_offset6 = (rotation_matrix * utils::SPIRAL_OFFSET_6_) * uv_offset_scale; + let sample_offset7 = (rotation_matrix * utils::SPIRAL_OFFSET_7_) * uv_offset_scale; + + var sum = 0.0; + sum += sample_shadow_map_hardware(light_local + sample_offset0, depth, array_index); + sum += sample_shadow_map_hardware(light_local + sample_offset1, depth, array_index); + sum += sample_shadow_map_hardware(light_local + sample_offset2, depth, array_index); + sum += sample_shadow_map_hardware(light_local + sample_offset3, depth, array_index); + sum += sample_shadow_map_hardware(light_local + sample_offset4, depth, array_index); + sum += sample_shadow_map_hardware(light_local + sample_offset5, depth, array_index); + sum += sample_shadow_map_hardware(light_local + sample_offset6, depth, array_index); + sum += sample_shadow_map_hardware(light_local + sample_offset7, depth, array_index); + return sum / 8.0; +} + +fn sample_shadow_map(light_local: vec2, depth: f32, array_index: i32, texel_size: f32) -> f32 { +#ifdef SHADOW_FILTER_METHOD_GAUSSIAN + return sample_shadow_map_castano_thirteen(light_local, depth, array_index); +#else ifdef SHADOW_FILTER_METHOD_TEMPORAL + return sample_shadow_map_jimenez_fourteen(light_local, depth, array_index, texel_size); +#else ifdef SHADOW_FILTER_METHOD_HARDWARE_2X2 + return sample_shadow_map_hardware(light_local, depth, array_index); +#else + // This needs a default return value to avoid shader compilation errors if it's compiled with no SHADOW_FILTER_METHOD_* defined. + // (eg. if the normal prepass is enabled it ends up compiling this due to the normal prepass depending on pbr_functions, which depends on shadows) + // This should never actually get used, as anyone using bevy's lighting/shadows should always have a SHADOW_FILTER_METHOD defined. + // Set to 0 to make it obvious that something is wrong. + return 0.0; +#endif +} + +// NOTE: Due to the non-uniform control flow in `shadows::fetch_point_shadow`, +// we must use the Level variant of textureSampleCompare to avoid undefined +// behavior due to some of the fragments in a quad (2x2 fragments) being +// processed not being sampled, and this messing with mip-mapping functionality. +// The shadow maps have no mipmaps so Level just samples from LOD 0. +fn sample_shadow_cubemap_hardware(light_local: vec3, depth: f32, light_id: u32) -> f32 { +#ifdef NO_CUBE_ARRAY_TEXTURES_SUPPORT + return textureSampleCompare(view_bindings::point_shadow_textures, view_bindings::point_shadow_textures_sampler, light_local, depth); +#else + return textureSampleCompareLevel(view_bindings::point_shadow_textures, view_bindings::point_shadow_textures_sampler, light_local, i32(light_id), depth); +#endif +} + +fn sample_shadow_cubemap_at_offset( + position: vec2, + coeff: f32, + x_basis: vec3, + y_basis: vec3, + light_local: vec3, + depth: f32, + light_id: u32, +) -> f32 { + return sample_shadow_cubemap_hardware( + light_local + position.x * x_basis + position.y * y_basis, + depth, + light_id + ) * coeff; +} + +// This more or less does what Castano13 does, but in 3D space. Castano13 is +// essentially an optimized 2D Gaussian filter that takes advantage of the +// bilinear filtering hardware to reduce the number of samples needed. This +// trick doesn't apply to cubemaps, so we manually apply a Gaussian filter over +// the standard 8xMSAA pattern instead. +fn sample_shadow_cubemap_gaussian( + light_local: vec3, + depth: f32, + scale: f32, + distance_to_light: f32, + light_id: u32, +) -> f32 { + // Create an orthonormal basis so we can apply a 2D sampling pattern to a + // cubemap. + var up = vec3(0.0, 1.0, 0.0); + if (dot(up, normalize(light_local)) > 0.99) { + up = vec3(1.0, 0.0, 0.0); // Avoid creating a degenerate basis. + } + let basis = orthonormalize(light_local, up) * scale * distance_to_light; + + var sum: f32 = 0.0; + sum += sample_shadow_cubemap_at_offset( + D3D_SAMPLE_POINT_POSITIONS[0], D3D_SAMPLE_POINT_COEFFS[0], + basis[0], basis[1], light_local, depth, light_id); + sum += sample_shadow_cubemap_at_offset( + D3D_SAMPLE_POINT_POSITIONS[1], D3D_SAMPLE_POINT_COEFFS[1], + basis[0], basis[1], light_local, depth, light_id); + sum += sample_shadow_cubemap_at_offset( + D3D_SAMPLE_POINT_POSITIONS[2], D3D_SAMPLE_POINT_COEFFS[2], + basis[0], basis[1], light_local, depth, light_id); + sum += sample_shadow_cubemap_at_offset( + D3D_SAMPLE_POINT_POSITIONS[3], D3D_SAMPLE_POINT_COEFFS[3], + basis[0], basis[1], light_local, depth, light_id); + sum += sample_shadow_cubemap_at_offset( + D3D_SAMPLE_POINT_POSITIONS[4], D3D_SAMPLE_POINT_COEFFS[4], + basis[0], basis[1], light_local, depth, light_id); + sum += sample_shadow_cubemap_at_offset( + D3D_SAMPLE_POINT_POSITIONS[5], D3D_SAMPLE_POINT_COEFFS[5], + basis[0], basis[1], light_local, depth, light_id); + sum += sample_shadow_cubemap_at_offset( + D3D_SAMPLE_POINT_POSITIONS[6], D3D_SAMPLE_POINT_COEFFS[6], + basis[0], basis[1], light_local, depth, light_id); + sum += sample_shadow_cubemap_at_offset( + D3D_SAMPLE_POINT_POSITIONS[7], D3D_SAMPLE_POINT_COEFFS[7], + basis[0], basis[1], light_local, depth, light_id); + return sum; +} + +// This is a port of the Jimenez14 filter above to the 3D space. It jitters the +// points in the spiral pattern after first creating a 2D orthonormal basis +// along the principal light direction. +fn sample_shadow_cubemap_temporal( + light_local: vec3, + depth: f32, + scale: f32, + distance_to_light: f32, + light_id: u32, +) -> f32 { + // Create an orthonormal basis so we can apply a 2D sampling pattern to a + // cubemap. + var up = vec3(0.0, 1.0, 0.0); + if (dot(up, normalize(light_local)) > 0.99) { + up = vec3(1.0, 0.0, 0.0); // Avoid creating a degenerate basis. + } + let basis = orthonormalize(light_local, up) * scale * distance_to_light; + + let rotation_matrix = random_rotation_matrix(vec2(1.0)); + + let sample_offset0 = rotation_matrix * utils::SPIRAL_OFFSET_0_ * + POINT_SHADOW_TEMPORAL_OFFSET_SCALE; + let sample_offset1 = rotation_matrix * utils::SPIRAL_OFFSET_1_ * + POINT_SHADOW_TEMPORAL_OFFSET_SCALE; + let sample_offset2 = rotation_matrix * utils::SPIRAL_OFFSET_2_ * + POINT_SHADOW_TEMPORAL_OFFSET_SCALE; + let sample_offset3 = rotation_matrix * utils::SPIRAL_OFFSET_3_ * + POINT_SHADOW_TEMPORAL_OFFSET_SCALE; + let sample_offset4 = rotation_matrix * utils::SPIRAL_OFFSET_4_ * + POINT_SHADOW_TEMPORAL_OFFSET_SCALE; + let sample_offset5 = rotation_matrix * utils::SPIRAL_OFFSET_5_ * + POINT_SHADOW_TEMPORAL_OFFSET_SCALE; + let sample_offset6 = rotation_matrix * utils::SPIRAL_OFFSET_6_ * + POINT_SHADOW_TEMPORAL_OFFSET_SCALE; + let sample_offset7 = rotation_matrix * utils::SPIRAL_OFFSET_7_ * + POINT_SHADOW_TEMPORAL_OFFSET_SCALE; + + var sum: f32 = 0.0; + sum += sample_shadow_cubemap_at_offset( + sample_offset0, 0.125, basis[0], basis[1], light_local, depth, light_id); + sum += sample_shadow_cubemap_at_offset( + sample_offset1, 0.125, basis[0], basis[1], light_local, depth, light_id); + sum += sample_shadow_cubemap_at_offset( + sample_offset2, 0.125, basis[0], basis[1], light_local, depth, light_id); + sum += sample_shadow_cubemap_at_offset( + sample_offset3, 0.125, basis[0], basis[1], light_local, depth, light_id); + sum += sample_shadow_cubemap_at_offset( + sample_offset4, 0.125, basis[0], basis[1], light_local, depth, light_id); + sum += sample_shadow_cubemap_at_offset( + sample_offset5, 0.125, basis[0], basis[1], light_local, depth, light_id); + sum += sample_shadow_cubemap_at_offset( + sample_offset6, 0.125, basis[0], basis[1], light_local, depth, light_id); + sum += sample_shadow_cubemap_at_offset( + sample_offset7, 0.125, basis[0], basis[1], light_local, depth, light_id); + return sum; +} + +fn sample_shadow_cubemap( + light_local: vec3, + distance_to_light: f32, + depth: f32, + light_id: u32, +) -> f32 { +#ifdef SHADOW_FILTER_METHOD_GAUSSIAN + return sample_shadow_cubemap_gaussian( + light_local, depth, POINT_SHADOW_SCALE, distance_to_light, light_id); +#else ifdef SHADOW_FILTER_METHOD_TEMPORAL + return sample_shadow_cubemap_temporal( + light_local, depth, POINT_SHADOW_SCALE, distance_to_light, light_id); +#else ifdef SHADOW_FILTER_METHOD_HARDWARE_2X2 + return sample_shadow_cubemap_hardware(light_local, depth, light_id); +#else + // This needs a default return value to avoid shader compilation errors if it's compiled with no SHADOW_FILTER_METHOD_* defined. + // (eg. if the normal prepass is enabled it ends up compiling this due to the normal prepass depending on pbr_functions, which depends on shadows) + // This should never actually get used, as anyone using bevy's lighting/shadows should always have a SHADOW_FILTER_METHOD defined. + // Set to 0 to make it obvious that something is wrong. + return 0.0; +#endif +} + +``` + +### bevy_shaders/custom_ui_material + +```rust +// This shader draws a circle with a given input color +#import bevy_ui::ui_vertex_output::UiVertexOutput + +@group(1) @binding(0) var color: vec4; +@group(1) @binding(1) var slider: f32; +@group(1) @binding(2) var material_color_texture: texture_2d; +@group(1) @binding(3) var material_color_sampler: sampler; + + +@fragment +fn fragment(in: UiVertexOutput) -> @location(0) vec4 { + if in.uv.x < slider { + let output_color = textureSample(material_color_texture, material_color_sampler, in.uv) * color; + return output_color; + } else { + return vec4(0.0); + } +} + +``` + +### bevy_shaders/ui + +```rust +#import bevy_render::view::View + +const TEXTURED = 1u; +const RIGHT_VERTEX = 2u; +const BOTTOM_VERTEX = 4u; +const BORDER: u32 = 8u; + +fn enabled(flags: u32, mask: u32) -> bool { + return (flags & mask) != 0u; +} + +@group(0) @binding(0) var view: View; + +struct VertexOutput { + @location(0) uv: vec2, + @location(1) color: vec4, + + @location(2) @interpolate(flat) size: vec2, + @location(3) @interpolate(flat) flags: u32, + @location(4) @interpolate(flat) radius: vec4, + @location(5) @interpolate(flat) border: vec4, + + // Position relative to the center of the rectangle. + @location(6) point: vec2, + @builtin(position) position: vec4, +}; + +@vertex +fn vertex( + @location(0) vertex_position: vec3, + @location(1) vertex_uv: vec2, + @location(2) vertex_color: vec4, + @location(3) flags: u32, + + // x: top left, y: top right, z: bottom right, w: bottom left. + @location(4) radius: vec4, + + // x: left, y: top, z: right, w: bottom. + @location(5) border: vec4, + @location(6) size: vec2, +) -> VertexOutput { + var out: VertexOutput; + out.uv = vertex_uv; + out.position = view.clip_from_world * vec4(vertex_position, 1.0); + out.color = vertex_color; + out.flags = flags; + out.radius = radius; + out.size = size; + out.border = border; + var point = 0.49999 * size; + if (flags & RIGHT_VERTEX) == 0u { + point.x *= -1.; + } + if (flags & BOTTOM_VERTEX) == 0u { + point.y *= -1.; + } + out.point = point; + + return out; +} + +@group(1) @binding(0) var sprite_texture: texture_2d; +@group(1) @binding(1) var sprite_sampler: sampler; + +// The returned value is the shortest distance from the given point to the boundary of the rounded +// box. +// +// Negative values indicate that the point is inside the rounded box, positive values that the point +// is outside, and zero is exactly on the boundary. +// +// Arguments: +// - `point` -> The function will return the distance from this point to the closest point on +// the boundary. +// - `size` -> The maximum width and height of the box. +// - `corner_radii` -> The radius of each rounded corner. Ordered counter clockwise starting +// top left: +// x: top left, y: top right, z: bottom right, w: bottom left. +fn sd_rounded_box(point: vec2, size: vec2, corner_radii: vec4) -> f32 { + // If 0.0 < y then select bottom left (w) and bottom right corner radius (z). + // Else select top left (x) and top right corner radius (y). + let rs = select(corner_radii.xy, corner_radii.wz, 0.0 < point.y); + // w and z are swapped above so that both pairs are in left to right order, otherwise this second + // select statement would return the incorrect value for the bottom pair. + let radius = select(rs.x, rs.y, 0.0 < point.x); + // Vector from the corner closest to the point, to the point. + let corner_to_point = abs(point) - 0.5 * size; + // Vector from the center of the radius circle to the point. + let q = corner_to_point + radius; + // Length from center of the radius circle to the point, zeros a component if the point is not + // within the quadrant of the radius circle that is part of the curved corner. + let l = length(max(q, vec2(0.0))); + let m = min(max(q.x, q.y), 0.0); + return l + m - radius; +} + +fn sd_inset_rounded_box(point: vec2, size: vec2, radius: vec4, inset: vec4) -> f32 { + let inner_size = size - inset.xy - inset.zw; + let inner_center = inset.xy + 0.5 * inner_size - 0.5 * size; + let inner_point = point - inner_center; + + var r = radius; + + // Top left corner. + r.x = r.x - max(inset.x, inset.y); + + // Top right corner. + r.y = r.y - max(inset.z, inset.y); + + // Bottom right corner. + r.z = r.z - max(inset.z, inset.w); + + // Bottom left corner. + r.w = r.w - max(inset.x, inset.w); + + let half_size = inner_size * 0.5; + let min_size = min(half_size.x, half_size.y); + + r = min(max(r, vec4(0.0)), vec4(min_size)); + + return sd_rounded_box(inner_point, inner_size, r); +} + +// get alpha for antialiasing for sdf +fn antialias(distance: f32) -> f32 { + // Using the fwidth(distance) was causing artifacts, so just use the distance. + // This antialiases between the distance values of 0.25 and -0.25 + return clamp(0.0, 1.0, 0.5 - 2.0 * distance); +} + +fn draw(in: VertexOutput, texture_color: vec4) -> vec4 { + // Only use the color sampled from the texture if the `TEXTURED` flag is enabled. + // This allows us to draw both textured and untextured shapes together in the same batch. + let color = select(in.color, in.color * texture_color, enabled(in.flags, TEXTURED)); + + // Signed distances. The magnitude is the distance of the point from the edge of the shape. + // * Negative values indicate that the point is inside the shape. + // * Zero values indicate the point is on the edge of the shape. + // * Positive values indicate the point is outside the shape. + + // Signed distance from the exterior boundary. + let external_distance = sd_rounded_box(in.point, in.size, in.radius); + + // Signed distance from the border's internal edge (the signed distance is negative if the point + // is inside the rect but not on the border). + // If the border size is set to zero, this is the same as the external distance. + let internal_distance = sd_inset_rounded_box(in.point, in.size, in.radius, in.border); + + // Signed distance from the border (the intersection of the rect with its border). + // Points inside the border have negative signed distance. Any point outside the border, whether + // outside the outside edge, or inside the inner edge have positive signed distance. + let border_distance = max(external_distance, -internal_distance); + + // At external edges with no border, `border_distance` is equal to zero. + // This select statement ensures we only perform anti-aliasing where a non-zero width border + // is present, otherwise an outline about the external boundary would be drawn even without + // a border. + let t = select(1.0 - step(0.0, border_distance), antialias(border_distance), external_distance < internal_distance); + + // Blend mode ALPHA_BLENDING is used for UI elements, so we don't premultiply alpha here. + return vec4(color.rgb, saturate(color.a * t)); +} + +fn draw_background(in: VertexOutput, texture_color: vec4) -> vec4 { + let color = select(in.color, in.color * texture_color, enabled(in.flags, TEXTURED)); + + // When drawing the background only draw the internal area and not the border. + let internal_distance = sd_inset_rounded_box(in.point, in.size, in.radius, in.border); + let t = antialias(internal_distance); + return vec4(color.rgb, saturate(color.a * t)); +} + +@fragment +fn fragment(in: VertexOutput) -> @location(0) vec4 { + let texture_color = textureSample(sprite_texture, sprite_sampler, in.uv); + + if enabled(in.flags, BORDER) { + return draw(in, texture_color); + } else { + return draw_background(in, texture_color); + } +} + +``` + +### bevy_shaders/mesh2d_view_types + +```rust +#define_import_path bevy_sprite::mesh2d_view_types + +#import bevy_render::view +#import bevy_render::globals + +``` + +### bevy_shaders/mesh2d_types + +```rust +#define_import_path bevy_sprite::mesh2d_types + +struct Mesh2d { + // Affine 4x3 matrix transposed to 3x4 + // Use bevy_render::maths::affine3_to_square to unpack + world_from_local: mat3x4, + // 3x3 matrix packed in mat2x4 and f32 as: + // [0].xyz, [1].x, + // [1].yz, [2].xy + // [2].z + // Use bevy_render::maths::mat2x4_f32_to_mat3x3_unpack to unpack + local_from_world_transpose_a: mat2x4, + local_from_world_transpose_b: f32, + // 'flags' is a bit field indicating various options. u32 is 32 bits so we have up to 32 options. + flags: u32, +}; + +``` + +### bevy_shaders/array_texture + +```rust +#import bevy_pbr::{ + forward_io::VertexOutput, + mesh_view_bindings::view, + pbr_types::{STANDARD_MATERIAL_FLAGS_DOUBLE_SIDED_BIT, PbrInput, pbr_input_new}, + pbr_functions as fns, + pbr_bindings, +} +#import bevy_core_pipeline::tonemapping::tone_mapping + +@group(2) @binding(0) var my_array_texture: texture_2d_array; +@group(2) @binding(1) var my_array_texture_sampler: sampler; + +@fragment +fn fragment( + @builtin(front_facing) is_front: bool, + mesh: VertexOutput, +) -> @location(0) vec4 { + let layer = i32(mesh.world_position.x) & 0x3; + + // Prepare a 'processed' StandardMaterial by sampling all textures to resolve + // the material members + var pbr_input: PbrInput = pbr_input_new(); + + pbr_input.material.base_color = textureSample(my_array_texture, my_array_texture_sampler, mesh.uv, layer); +#ifdef VERTEX_COLORS + pbr_input.material.base_color = pbr_input.material.base_color * mesh.color; +#endif + + let double_sided = (pbr_input.material.flags & STANDARD_MATERIAL_FLAGS_DOUBLE_SIDED_BIT) != 0u; + + pbr_input.frag_coord = mesh.position; + pbr_input.world_position = mesh.world_position; + pbr_input.world_normal = fns::prepare_world_normal( + mesh.world_normal, + double_sided, + is_front, + ); + + pbr_input.is_orthographic = view.clip_from_view[3].w == 1.0; + + pbr_input.N = normalize(pbr_input.world_normal); + +#ifdef VERTEX_TANGENTS + let Nt = textureSampleBias(pbr_bindings::normal_map_texture, pbr_bindings::normal_map_sampler, mesh.uv, view.mip_bias).rgb; + let TBN = fns::calculate_tbn_mikktspace(mesh.world_normal, mesh.world_tangent); + pbr_input.N = fns::apply_normal_mapping( + pbr_input.material.flags, + TBN, + double_sided, + is_front, + Nt, + ); +#endif + + pbr_input.V = fns::calculate_view(mesh.world_position, pbr_input.is_orthographic); + + return tone_mapping(fns::apply_pbr_lighting(pbr_input), view.color_grading); +} + +``` + +### bevy_shaders/fullscreen + +```rust +#define_import_path bevy_core_pipeline::fullscreen_vertex_shader + +struct FullscreenVertexOutput { + @builtin(position) + position: vec4, + @location(0) + uv: vec2, +}; + +// This vertex shader produces the following, when drawn using indices 0..3: +// +// 1 | 0-----x.....2 +// 0 | | s | . ´ +// -1 | x_____x´ +// -2 | : .´ +// -3 | 1´ +// +--------------- +// -1 0 1 2 3 +// +// The axes are clip-space x and y. The region marked s is the visible region. +// The digits in the corners of the right-angled triangle are the vertex +// indices. +// +// The top-left has UV 0,0, the bottom-left has 0,2, and the top-right has 2,0. +// This means that the UV gets interpolated to 1,1 at the bottom-right corner +// of the clip-space rectangle that is at 1,-1 in clip space. +@vertex +fn fullscreen_vertex_shader(@builtin(vertex_index) vertex_index: u32) -> FullscreenVertexOutput { + // See the explanation above for how this works + let uv = vec2(f32(vertex_index >> 1u), f32(vertex_index & 1u)) * 2.0; + let clip_position = vec4(uv * vec2(2.0, -2.0) + vec2(-1.0, 1.0), 0.0, 1.0); + + return FullscreenVertexOutput(clip_position, uv); +} + +``` + +### bevy_shaders/view + +```rust +#define_import_path bevy_render::view + +struct ColorGrading { + balance: mat3x3, + saturation: vec3, + contrast: vec3, + gamma: vec3, + gain: vec3, + lift: vec3, + midtone_range: vec2, + exposure: f32, + hue: f32, + post_saturation: f32, +} + +struct View { + clip_from_world: mat4x4, + unjittered_clip_from_world: mat4x4, + world_from_clip: mat4x4, + world_from_view: mat4x4, + view_from_world: mat4x4, + clip_from_view: mat4x4, + view_from_clip: mat4x4, + world_position: vec3, + exposure: f32, + // viewport(x_origin, y_origin, width, height) + viewport: vec4, + frustum: array, 6>, + color_grading: ColorGrading, + mip_bias: f32, +}; + +``` + +### bevy_shaders/pbr_bindings + +```rust +#define_import_path bevy_pbr::pbr_bindings + +#import bevy_pbr::pbr_types::StandardMaterial + +@group(2) @binding(0) var material: StandardMaterial; +@group(2) @binding(1) var base_color_texture: texture_2d; +@group(2) @binding(2) var base_color_sampler: sampler; +@group(2) @binding(3) var emissive_texture: texture_2d; +@group(2) @binding(4) var emissive_sampler: sampler; +@group(2) @binding(5) var metallic_roughness_texture: texture_2d; +@group(2) @binding(6) var metallic_roughness_sampler: sampler; +@group(2) @binding(7) var occlusion_texture: texture_2d; +@group(2) @binding(8) var occlusion_sampler: sampler; +@group(2) @binding(9) var normal_map_texture: texture_2d; +@group(2) @binding(10) var normal_map_sampler: sampler; +@group(2) @binding(11) var depth_map_texture: texture_2d; +@group(2) @binding(12) var depth_map_sampler: sampler; +#ifdef PBR_ANISOTROPY_TEXTURE_SUPPORTED +@group(2) @binding(13) var anisotropy_texture: texture_2d; +@group(2) @binding(14) var anisotropy_sampler: sampler; +#endif +#ifdef PBR_TRANSMISSION_TEXTURES_SUPPORTED +@group(2) @binding(15) var specular_transmission_texture: texture_2d; +@group(2) @binding(16) var specular_transmission_sampler: sampler; +@group(2) @binding(17) var thickness_texture: texture_2d; +@group(2) @binding(18) var thickness_sampler: sampler; +@group(2) @binding(19) var diffuse_transmission_texture: texture_2d; +@group(2) @binding(20) var diffuse_transmission_sampler: sampler; +#endif +#ifdef PBR_MULTI_LAYER_MATERIAL_TEXTURES_SUPPORTED +@group(2) @binding(21) var clearcoat_texture: texture_2d; +@group(2) @binding(22) var clearcoat_sampler: sampler; +@group(2) @binding(23) var clearcoat_roughness_texture: texture_2d; +@group(2) @binding(24) var clearcoat_roughness_sampler: sampler; +@group(2) @binding(25) var clearcoat_normal_texture: texture_2d; +@group(2) @binding(26) var clearcoat_normal_sampler: sampler; +#endif + +``` + +### bevy_shaders/texture_binding_array + +```rust +#import bevy_pbr::forward_io::VertexOutput + +@group(2) @binding(0) var textures: binding_array>; +@group(2) @binding(1) var nearest_sampler: sampler; +// We can also have array of samplers +// var samplers: binding_array; + +@fragment +fn fragment( + mesh: VertexOutput, +) -> @location(0) vec4 { + // Select the texture to sample from using non-uniform uv coordinates + let coords = clamp(vec2(mesh.uv * 4.0), vec2(0u), vec2(3u)); + let index = coords.y * 4u + coords.x; + let inner_uv = fract(mesh.uv * 4.0); + return textureSample(textures[index], nearest_sampler, inner_uv); +} + +``` + +### bevy_shaders/fill_cluster_buffers + +```rust +#import bevy_pbr::meshlet_bindings::{ + cluster_count, + meshlet_instance_meshlet_counts_prefix_sum, + meshlet_instance_meshlet_slice_starts, + meshlet_cluster_instance_ids, + meshlet_cluster_meshlet_ids, +} + +/// Writes out instance_id and meshlet_id to the global buffers for each cluster in the scene. + +@compute +@workgroup_size(128, 1, 1) // 128 threads per workgroup, 1 cluster per thread +fn fill_cluster_buffers( + @builtin(workgroup_id) workgroup_id: vec3, + @builtin(num_workgroups) num_workgroups: vec3, + @builtin(local_invocation_id) local_invocation_id: vec3 +) { + // Calculate the cluster ID for this thread + let cluster_id = local_invocation_id.x + 128u * dot(workgroup_id, vec3(num_workgroups.x * num_workgroups.x, num_workgroups.x, 1u)); + if cluster_id >= cluster_count { return; } + + // Binary search to find the instance this cluster belongs to + var left = 0u; + var right = arrayLength(&meshlet_instance_meshlet_counts_prefix_sum) - 1u; + while left <= right { + let mid = (left + right) / 2u; + if meshlet_instance_meshlet_counts_prefix_sum[mid] <= cluster_id { + left = mid + 1u; + } else { + right = mid - 1u; + } + } + let instance_id = right; + + // Find the meshlet ID for this cluster within the instance's MeshletMesh + let meshlet_id_local = cluster_id - meshlet_instance_meshlet_counts_prefix_sum[instance_id]; + + // Find the overall meshlet ID in the global meshlet buffer + let meshlet_id = meshlet_id_local + meshlet_instance_meshlet_slice_starts[instance_id]; + + // Write results to buffers + meshlet_cluster_instance_ids[cluster_id] = instance_id; + meshlet_cluster_meshlet_ids[cluster_id] = meshlet_id; +} + +``` + +### bevy_shaders/blit + +```rust +#import bevy_core_pipeline::fullscreen_vertex_shader::FullscreenVertexOutput + +@group(0) @binding(0) var in_texture: texture_2d; +@group(0) @binding(1) var in_sampler: sampler; + +@fragment +fn fs_main(in: FullscreenVertexOutput) -> @location(0) vec4 { + return textureSample(in_texture, in_sampler, in.uv); +} + +``` + +### bevy_shaders/fxaa + +```rust +// NVIDIA FXAA 3.11 +// Original source code by TIMOTHY LOTTES +// https://gist.github.com/kosua20/0c506b81b3812ac900048059d2383126 +// +// Cleaned version - https://github.com/kosua20/Rendu/blob/master/resources/common/shaders/screens/fxaa.frag +// +// Tweaks by mrDIMAS - https://github.com/FyroxEngine/Fyrox/blob/master/src/renderer/shaders/fxaa_fs.glsl + +#import bevy_core_pipeline::fullscreen_vertex_shader::FullscreenVertexOutput + +@group(0) @binding(0) var screenTexture: texture_2d; +@group(0) @binding(1) var samp: sampler; + +// Trims the algorithm from processing darks. +#ifdef EDGE_THRESH_MIN_LOW + const EDGE_THRESHOLD_MIN: f32 = 0.0833; +#endif + +#ifdef EDGE_THRESH_MIN_MEDIUM + const EDGE_THRESHOLD_MIN: f32 = 0.0625; +#endif + +#ifdef EDGE_THRESH_MIN_HIGH + const EDGE_THRESHOLD_MIN: f32 = 0.0312; +#endif + +#ifdef EDGE_THRESH_MIN_ULTRA + const EDGE_THRESHOLD_MIN: f32 = 0.0156; +#endif + +#ifdef EDGE_THRESH_MIN_EXTREME + const EDGE_THRESHOLD_MIN: f32 = 0.0078; +#endif + +// The minimum amount of local contrast required to apply algorithm. +#ifdef EDGE_THRESH_LOW + const EDGE_THRESHOLD_MAX: f32 = 0.250; +#endif + +#ifdef EDGE_THRESH_MEDIUM + const EDGE_THRESHOLD_MAX: f32 = 0.166; +#endif + +#ifdef EDGE_THRESH_HIGH + const EDGE_THRESHOLD_MAX: f32 = 0.125; +#endif + +#ifdef EDGE_THRESH_ULTRA + const EDGE_THRESHOLD_MAX: f32 = 0.063; +#endif + +#ifdef EDGE_THRESH_EXTREME + const EDGE_THRESHOLD_MAX: f32 = 0.031; +#endif + +const ITERATIONS: i32 = 12; //default is 12 +const SUBPIXEL_QUALITY: f32 = 0.75; +// #define QUALITY(q) ((q) < 5 ? 1.0 : ((q) > 5 ? ((q) < 10 ? 2.0 : ((q) < 11 ? 4.0 : 8.0)) : 1.5)) +fn QUALITY(q: i32) -> f32 { + switch (q) { + //case 0, 1, 2, 3, 4: { return 1.0; } + default: { return 1.0; } + case 5: { return 1.5; } + case 6, 7, 8, 9: { return 2.0; } + case 10: { return 4.0; } + case 11: { return 8.0; } + } +} + +fn rgb2luma(rgb: vec3) -> f32 { + return sqrt(dot(rgb, vec3(0.299, 0.587, 0.114))); +} + +// Performs FXAA post-process anti-aliasing as described in the Nvidia FXAA white paper and the associated shader code. +@fragment +fn fragment(in: FullscreenVertexOutput) -> @location(0) vec4 { + let resolution = vec2(textureDimensions(screenTexture)); + let inverseScreenSize = 1.0 / resolution.xy; + let texCoord = in.position.xy * inverseScreenSize; + + let centerSample = textureSampleLevel(screenTexture, samp, texCoord, 0.0); + let colorCenter = centerSample.rgb; + + // Luma at the current fragment + let lumaCenter = rgb2luma(colorCenter); + + // Luma at the four direct neighbors of the current fragment. + let lumaDown = rgb2luma(textureSampleLevel(screenTexture, samp, texCoord, 0.0, vec2(0, -1)).rgb); + let lumaUp = rgb2luma(textureSampleLevel(screenTexture, samp, texCoord, 0.0, vec2(0, 1)).rgb); + let lumaLeft = rgb2luma(textureSampleLevel(screenTexture, samp, texCoord, 0.0, vec2(-1, 0)).rgb); + let lumaRight = rgb2luma(textureSampleLevel(screenTexture, samp, texCoord, 0.0, vec2(1, 0)).rgb); + + // Find the maximum and minimum luma around the current fragment. + let lumaMin = min(lumaCenter, min(min(lumaDown, lumaUp), min(lumaLeft, lumaRight))); + let lumaMax = max(lumaCenter, max(max(lumaDown, lumaUp), max(lumaLeft, lumaRight))); + + // Compute the delta. + let lumaRange = lumaMax - lumaMin; + + // If the luma variation is lower that a threshold (or if we are in a really dark area), we are not on an edge, don't perform any AA. + if (lumaRange < max(EDGE_THRESHOLD_MIN, lumaMax * EDGE_THRESHOLD_MAX)) { + return centerSample; + } + + // Query the 4 remaining corners lumas. + let lumaDownLeft = rgb2luma(textureSampleLevel(screenTexture, samp, texCoord, 0.0, vec2(-1, -1)).rgb); + let lumaUpRight = rgb2luma(textureSampleLevel(screenTexture, samp, texCoord, 0.0, vec2(1, 1)).rgb); + let lumaUpLeft = rgb2luma(textureSampleLevel(screenTexture, samp, texCoord, 0.0, vec2(-1, 1)).rgb); + let lumaDownRight = rgb2luma(textureSampleLevel(screenTexture, samp, texCoord, 0.0, vec2(1, -1)).rgb); + + // Combine the four edges lumas (using intermediary variables for future computations with the same values). + let lumaDownUp = lumaDown + lumaUp; + let lumaLeftRight = lumaLeft + lumaRight; + + // Same for corners + let lumaLeftCorners = lumaDownLeft + lumaUpLeft; + let lumaDownCorners = lumaDownLeft + lumaDownRight; let lumaRightCorners = lumaDownRight + lumaUpRight; let lumaUpCorners = lumaUpRight + lumaUpLeft; - // Compute an estimation of the gradient along the horizontal and vertical axis. - let edgeHorizontal = abs(-2.0 * lumaLeft + lumaLeftCorners) + - abs(-2.0 * lumaCenter + lumaDownUp) * 2.0 + - abs(-2.0 * lumaRight + lumaRightCorners); + // Compute an estimation of the gradient along the horizontal and vertical axis. + let edgeHorizontal = abs(-2.0 * lumaLeft + lumaLeftCorners) + + abs(-2.0 * lumaCenter + lumaDownUp) * 2.0 + + abs(-2.0 * lumaRight + lumaRightCorners); + + let edgeVertical = abs(-2.0 * lumaUp + lumaUpCorners) + + abs(-2.0 * lumaCenter + lumaLeftRight) * 2.0 + + abs(-2.0 * lumaDown + lumaDownCorners); + + // Is the local edge horizontal or vertical ? + let isHorizontal = (edgeHorizontal >= edgeVertical); + + // Choose the step size (one pixel) accordingly. + var stepLength = select(inverseScreenSize.x, inverseScreenSize.y, isHorizontal); + + // Select the two neighboring texels lumas in the opposite direction to the local edge. + var luma1 = select(lumaLeft, lumaDown, isHorizontal); + var luma2 = select(lumaRight, lumaUp, isHorizontal); + + // Compute gradients in this direction. + let gradient1 = luma1 - lumaCenter; + let gradient2 = luma2 - lumaCenter; + + // Which direction is the steepest ? + let is1Steepest = abs(gradient1) >= abs(gradient2); + + // Gradient in the corresponding direction, normalized. + let gradientScaled = 0.25 * max(abs(gradient1), abs(gradient2)); + + // Average luma in the correct direction. + var lumaLocalAverage = 0.0; + if (is1Steepest) { + // Switch the direction + stepLength = -stepLength; + lumaLocalAverage = 0.5 * (luma1 + lumaCenter); + } else { + lumaLocalAverage = 0.5 * (luma2 + lumaCenter); + } + + // Shift UV in the correct direction by half a pixel. + // Compute offset (for each iteration step) in the right direction. + var currentUv = texCoord; + var offset = vec2(0.0, 0.0); + if (isHorizontal) { + currentUv.y = currentUv.y + stepLength * 0.5; + offset.x = inverseScreenSize.x; + } else { + currentUv.x = currentUv.x + stepLength * 0.5; + offset.y = inverseScreenSize.y; + } + + // Compute UVs to explore on each side of the edge, orthogonally. The QUALITY allows us to step faster. + var uv1 = currentUv - offset; // * QUALITY(0); // (quality 0 is 1.0) + var uv2 = currentUv + offset; // * QUALITY(0); // (quality 0 is 1.0) + + // Read the lumas at both current extremities of the exploration segment, and compute the delta wrt to the local average luma. + var lumaEnd1 = rgb2luma(textureSampleLevel(screenTexture, samp, uv1, 0.0).rgb); + var lumaEnd2 = rgb2luma(textureSampleLevel(screenTexture, samp, uv2, 0.0).rgb); + lumaEnd1 = lumaEnd1 - lumaLocalAverage; + lumaEnd2 = lumaEnd2 - lumaLocalAverage; + + // If the luma deltas at the current extremities is larger than the local gradient, we have reached the side of the edge. + var reached1 = abs(lumaEnd1) >= gradientScaled; + var reached2 = abs(lumaEnd2) >= gradientScaled; + var reachedBoth = reached1 && reached2; + + // If the side is not reached, we continue to explore in this direction. + uv1 = select(uv1 - offset, uv1, reached1); // * QUALITY(1); // (quality 1 is 1.0) + uv2 = select(uv2 - offset, uv2, reached2); // * QUALITY(1); // (quality 1 is 1.0) + + // If both sides have not been reached, continue to explore. + if (!reachedBoth) { + for (var i: i32 = 2; i < ITERATIONS; i = i + 1) { + // If needed, read luma in 1st direction, compute delta. + if (!reached1) { + lumaEnd1 = rgb2luma(textureSampleLevel(screenTexture, samp, uv1, 0.0).rgb); + lumaEnd1 = lumaEnd1 - lumaLocalAverage; + } + // If needed, read luma in opposite direction, compute delta. + if (!reached2) { + lumaEnd2 = rgb2luma(textureSampleLevel(screenTexture, samp, uv2, 0.0).rgb); + lumaEnd2 = lumaEnd2 - lumaLocalAverage; + } + // If the luma deltas at the current extremities is larger than the local gradient, we have reached the side of the edge. + reached1 = abs(lumaEnd1) >= gradientScaled; + reached2 = abs(lumaEnd2) >= gradientScaled; + reachedBoth = reached1 && reached2; + + // If the side is not reached, we continue to explore in this direction, with a variable quality. + if (!reached1) { + uv1 = uv1 - offset * QUALITY(i); + } + if (!reached2) { + uv2 = uv2 + offset * QUALITY(i); + } + + // If both sides have been reached, stop the exploration. + if (reachedBoth) { + break; + } + } + } + + // Compute the distances to each side edge of the edge (!). + var distance1 = select(texCoord.y - uv1.y, texCoord.x - uv1.x, isHorizontal); + var distance2 = select(uv2.y - texCoord.y, uv2.x - texCoord.x, isHorizontal); + + // In which direction is the side of the edge closer ? + let isDirection1 = distance1 < distance2; + let distanceFinal = min(distance1, distance2); + + // Thickness of the edge. + let edgeThickness = (distance1 + distance2); + + // Is the luma at center smaller than the local average ? + let isLumaCenterSmaller = lumaCenter < lumaLocalAverage; + + // If the luma at center is smaller than at its neighbor, the delta luma at each end should be positive (same variation). + let correctVariation1 = (lumaEnd1 < 0.0) != isLumaCenterSmaller; + let correctVariation2 = (lumaEnd2 < 0.0) != isLumaCenterSmaller; + + // Only keep the result in the direction of the closer side of the edge. + var correctVariation = select(correctVariation2, correctVariation1, isDirection1); + + // UV offset: read in the direction of the closest side of the edge. + let pixelOffset = - distanceFinal / edgeThickness + 0.5; + + // If the luma variation is incorrect, do not offset. + var finalOffset = select(0.0, pixelOffset, correctVariation); + + // Sub-pixel shifting + // Full weighted average of the luma over the 3x3 neighborhood. + let lumaAverage = (1.0 / 12.0) * (2.0 * (lumaDownUp + lumaLeftRight) + lumaLeftCorners + lumaRightCorners); + // Ratio of the delta between the global average and the center luma, over the luma range in the 3x3 neighborhood. + let subPixelOffset1 = clamp(abs(lumaAverage - lumaCenter) / lumaRange, 0.0, 1.0); + let subPixelOffset2 = (-2.0 * subPixelOffset1 + 3.0) * subPixelOffset1 * subPixelOffset1; + // Compute a sub-pixel offset based on this delta. + let subPixelOffsetFinal = subPixelOffset2 * subPixelOffset2 * SUBPIXEL_QUALITY; + + // Pick the biggest of the two offsets. + finalOffset = max(finalOffset, subPixelOffsetFinal); + + // Compute the final UV coordinates. + var finalUv = texCoord; + if (isHorizontal) { + finalUv.y = finalUv.y + finalOffset * stepLength; + } else { + finalUv.x = finalUv.x + finalOffset * stepLength; + } + + // Read the color at the new UV coordinates, and use it. + var finalColor = textureSampleLevel(screenTexture, samp, finalUv, 0.0).rgb; + return vec4(finalColor, centerSample.a); +} + +``` + +### bevy_shaders/view_transformations + +```rust +#define_import_path bevy_pbr::view_transformations + +#import bevy_pbr::mesh_view_bindings as view_bindings + +/// World space: +/// +y is up + +/// View space: +/// -z is forward, +x is right, +y is up +/// Forward is from the camera position into the scene. +/// (0.0, 0.0, -1.0) is linear distance of 1.0 in front of the camera's view relative to the camera's rotation +/// (0.0, 1.0, 0.0) is linear distance of 1.0 above the camera's view relative to the camera's rotation + +/// NDC (normalized device coordinate): +/// https://www.w3.org/TR/webgpu/#coordinate-systems +/// (-1.0, -1.0) in NDC is located at the bottom-left corner of NDC +/// (1.0, 1.0) in NDC is located at the top-right corner of NDC +/// Z is depth where: +/// 1.0 is near clipping plane +/// Perspective projection: 0.0 is inf far away +/// Orthographic projection: 0.0 is far clipping plane + +/// UV space: +/// 0.0, 0.0 is the top left +/// 1.0, 1.0 is the bottom right + + +// ----------------- +// TO WORLD -------- +// ----------------- + +/// Convert a view space position to world space +fn position_view_to_world(view_pos: vec3) -> vec3 { + let world_pos = view_bindings::view.world_from_view * vec4(view_pos, 1.0); + return world_pos.xyz; +} + +/// Convert a clip space position to world space +fn position_clip_to_world(clip_pos: vec4) -> vec3 { + let world_pos = view_bindings::view.world_from_clip * clip_pos; + return world_pos.xyz; +} + +/// Convert a ndc space position to world space +fn position_ndc_to_world(ndc_pos: vec3) -> vec3 { + let world_pos = view_bindings::view.world_from_clip * vec4(ndc_pos, 1.0); + return world_pos.xyz / world_pos.w; +} + +/// Convert a view space direction to world space +fn direction_view_to_world(view_dir: vec3) -> vec3 { + let world_dir = view_bindings::view.world_from_view * vec4(view_dir, 0.0); + return world_dir.xyz; +} + +/// Convert a clip space direction to world space +fn direction_clip_to_world(clip_dir: vec4) -> vec3 { + let world_dir = view_bindings::view.world_from_clip * clip_dir; + return world_dir.xyz; +} + +// ----------------- +// TO VIEW --------- +// ----------------- + +/// Convert a world space position to view space +fn position_world_to_view(world_pos: vec3) -> vec3 { + let view_pos = view_bindings::view.view_from_world * vec4(world_pos, 1.0); + return view_pos.xyz; +} + +/// Convert a clip space position to view space +fn position_clip_to_view(clip_pos: vec4) -> vec3 { + let view_pos = view_bindings::view.view_from_clip * clip_pos; + return view_pos.xyz; +} + +/// Convert a ndc space position to view space +fn position_ndc_to_view(ndc_pos: vec3) -> vec3 { + let view_pos = view_bindings::view.view_from_clip * vec4(ndc_pos, 1.0); + return view_pos.xyz / view_pos.w; +} + +/// Convert a world space direction to view space +fn direction_world_to_view(world_dir: vec3) -> vec3 { + let view_dir = view_bindings::view.view_from_world * vec4(world_dir, 0.0); + return view_dir.xyz; +} + +/// Convert a clip space direction to view space +fn direction_clip_to_view(clip_dir: vec4) -> vec3 { + let view_dir = view_bindings::view.view_from_clip * clip_dir; + return view_dir.xyz; +} + +// ----------------- +// TO CLIP --------- +// ----------------- + +/// Convert a world space position to clip space +fn position_world_to_clip(world_pos: vec3) -> vec4 { + let clip_pos = view_bindings::view.clip_from_world * vec4(world_pos, 1.0); + return clip_pos; +} + +/// Convert a view space position to clip space +fn position_view_to_clip(view_pos: vec3) -> vec4 { + let clip_pos = view_bindings::view.clip_from_view * vec4(view_pos, 1.0); + return clip_pos; +} + +/// Convert a world space direction to clip space +fn direction_world_to_clip(world_dir: vec3) -> vec4 { + let clip_dir = view_bindings::view.clip_from_world * vec4(world_dir, 0.0); + return clip_dir; +} + +/// Convert a view space direction to clip space +fn direction_view_to_clip(view_dir: vec3) -> vec4 { + let clip_dir = view_bindings::view.clip_from_view * vec4(view_dir, 0.0); + return clip_dir; +} + +// ----------------- +// TO NDC ---------- +// ----------------- + +/// Convert a world space position to ndc space +fn position_world_to_ndc(world_pos: vec3) -> vec3 { + let ndc_pos = view_bindings::view.clip_from_world * vec4(world_pos, 1.0); + return ndc_pos.xyz / ndc_pos.w; +} + +/// Convert a view space position to ndc space +fn position_view_to_ndc(view_pos: vec3) -> vec3 { + let ndc_pos = view_bindings::view.clip_from_view * vec4(view_pos, 1.0); + return ndc_pos.xyz / ndc_pos.w; +} + +// ----------------- +// DEPTH ----------- +// ----------------- + +/// Retrieve the perspective camera near clipping plane +fn perspective_camera_near() -> f32 { + return view_bindings::view.clip_from_view[3][2]; +} + +/// Convert ndc depth to linear view z. +/// Note: Depth values in front of the camera will be negative as -z is forward +fn depth_ndc_to_view_z(ndc_depth: f32) -> f32 { +#ifdef VIEW_PROJECTION_PERSPECTIVE + return -perspective_camera_near() / ndc_depth; +#else ifdef VIEW_PROJECTION_ORTHOGRAPHIC + return -(view_bindings::view.clip_from_view[3][2] - ndc_depth) / view_bindings::view.clip_from_view[2][2]; +#else + let view_pos = view_bindings::view.view_from_clip * vec4(0.0, 0.0, ndc_depth, 1.0); + return view_pos.z / view_pos.w; +#endif +} + +/// Convert linear view z to ndc depth. +/// Note: View z input should be negative for values in front of the camera as -z is forward +fn view_z_to_depth_ndc(view_z: f32) -> f32 { +#ifdef VIEW_PROJECTION_PERSPECTIVE + return -perspective_camera_near() / view_z; +#else ifdef VIEW_PROJECTION_ORTHOGRAPHIC + return view_bindings::view.clip_from_view[3][2] + view_z * view_bindings::view.clip_from_view[2][2]; +#else + let ndc_pos = view_bindings::view.clip_from_view * vec4(0.0, 0.0, view_z, 1.0); + return ndc_pos.z / ndc_pos.w; +#endif +} + +// ----------------- +// UV -------------- +// ----------------- + +/// Convert ndc space xy coordinate [-1.0 .. 1.0] to uv [0.0 .. 1.0] +fn ndc_to_uv(ndc: vec2) -> vec2 { + return ndc * vec2(0.5, -0.5) + vec2(0.5); +} + +/// Convert uv [0.0 .. 1.0] coordinate to ndc space xy [-1.0 .. 1.0] +fn uv_to_ndc(uv: vec2) -> vec2 { + return uv * vec2(2.0, -2.0) + vec2(-1.0, 1.0); +} + +/// returns the (0.0, 0.0) .. (1.0, 1.0) position within the viewport for the current render target +/// [0 .. render target viewport size] eg. [(0.0, 0.0) .. (1280.0, 720.0)] to [(0.0, 0.0) .. (1.0, 1.0)] +fn frag_coord_to_uv(frag_coord: vec2) -> vec2 { + return (frag_coord - view_bindings::view.viewport.xy) / view_bindings::view.viewport.zw; +} + +/// Convert frag coord to ndc +fn frag_coord_to_ndc(frag_coord: vec4) -> vec3 { + return vec3(uv_to_ndc(frag_coord_to_uv(frag_coord.xy)), frag_coord.z); +} + +/// Convert ndc space xy coordinate [-1.0 .. 1.0] to [0 .. render target +/// viewport size] +fn ndc_to_frag_coord(ndc: vec2) -> vec2 { + return ndc_to_uv(ndc) * view_bindings::view.viewport.zw; +} + +``` + +### bevy_shaders/raymarch + +```rust +// Copyright (c) 2023 Tomasz Stachowiak +// +// This contribution is dual licensed under EITHER OF +// +// Apache License, Version 2.0, (http://www.apache.org/licenses/LICENSE-2.0) +// MIT license (http://opensource.org/licenses/MIT) +// +// at your option. +// +// This is a port of the original [`raymarch.hlsl`] to WGSL. It's deliberately +// kept as close as possible so that patches to the original `raymarch.hlsl` +// have the greatest chances of applying to this version. +// +// [`raymarch.hlsl`]: +// https://gist.github.com/h3r2tic/9c8356bdaefbe80b1a22ae0aaee192db + +#define_import_path bevy_pbr::raymarch + +#import bevy_pbr::mesh_view_bindings::depth_prepass_texture +#import bevy_pbr::view_transformations::{ + direction_world_to_clip, + ndc_to_uv, + perspective_camera_near, + position_world_to_ndc, +} + +// Allows us to sample from the depth buffer with bilinear filtering. +@group(1) @binding(2) var depth_linear_sampler: sampler; + +// Allows us to sample from the depth buffer with nearest-neighbor filtering. +@group(1) @binding(3) var depth_nearest_sampler: sampler; + +// Main code + +struct HybridRootFinder { + linear_steps: u32, + bisection_steps: u32, + use_secant: bool, + linear_march_exponent: f32, + + jitter: f32, + min_t: f32, + max_t: f32, +} + +fn hybrid_root_finder_new_with_linear_steps(v: u32) -> HybridRootFinder { + var res: HybridRootFinder; + res.linear_steps = v; + res.bisection_steps = 0u; + res.use_secant = false; + res.linear_march_exponent = 1.0; + res.jitter = 1.0; + res.min_t = 0.0; + res.max_t = 1.0; + return res; +} + +fn hybrid_root_finder_find_root( + root_finder: ptr, + start: vec3, + end: vec3, + distance_fn: ptr, + hit_t: ptr, + miss_t: ptr, + hit_d: ptr, +) -> bool { + let dir = end - start; + + var min_t = (*root_finder).min_t; + var max_t = (*root_finder).max_t; + + var min_d = DistanceWithPenetration(0.0, false, 0.0); + var max_d = DistanceWithPenetration(0.0, false, 0.0); + + let step_size = (max_t - min_t) / f32((*root_finder).linear_steps); + + var intersected = false; + + // + // Ray march using linear steps + + if ((*root_finder).linear_steps > 0u) { + let candidate_t = mix( + min_t, + max_t, + pow( + (*root_finder).jitter / f32((*root_finder).linear_steps), + (*root_finder).linear_march_exponent + ) + ); + + let candidate = start + dir * candidate_t; + let candidate_d = depth_raymarch_distance_fn_evaluate(distance_fn, candidate); + intersected = candidate_d.distance < 0.0 && candidate_d.valid; + + if (intersected) { + max_t = candidate_t; + max_d = candidate_d; + // The `[min_t .. max_t]` interval contains an intersection. End the linear search. + } else { + // No intersection yet. Carry on. + min_t = candidate_t; + min_d = candidate_d; + + for (var step = 1u; step < (*root_finder).linear_steps; step += 1u) { + let candidate_t = mix( + (*root_finder).min_t, + (*root_finder).max_t, + pow( + (f32(step) + (*root_finder).jitter) / f32((*root_finder).linear_steps), + (*root_finder).linear_march_exponent + ) + ); + + let candidate = start + dir * candidate_t; + let candidate_d = depth_raymarch_distance_fn_evaluate(distance_fn, candidate); + intersected = candidate_d.distance < 0.0 && candidate_d.valid; + + if (intersected) { + max_t = candidate_t; + max_d = candidate_d; + // The `[min_t .. max_t]` interval contains an intersection. + // End the linear search. + break; + } else { + // No intersection yet. Carry on. + min_t = candidate_t; + min_d = candidate_d; + } + } + } + } + + *miss_t = min_t; + *hit_t = min_t; + + // + // Refine the hit using bisection + + if (intersected) { + for (var step = 0u; step < (*root_finder).bisection_steps; step += 1u) { + let mid_t = (min_t + max_t) * 0.5; + let candidate = start + dir * mid_t; + let candidate_d = depth_raymarch_distance_fn_evaluate(distance_fn, candidate); + + if (candidate_d.distance < 0.0 && candidate_d.valid) { + // Intersection at the mid point. Refine the first half. + max_t = mid_t; + max_d = candidate_d; + } else { + // No intersection yet at the mid point. Refine the second half. + min_t = mid_t; + min_d = candidate_d; + } + } + + if ((*root_finder).use_secant) { + // Finish with one application of the secant method + let total_d = min_d.distance + -max_d.distance; + + let mid_t = mix(min_t, max_t, min_d.distance / total_d); + let candidate = start + dir * mid_t; + let candidate_d = depth_raymarch_distance_fn_evaluate(distance_fn, candidate); + + // Only accept the result of the secant method if it improves upon + // the previous result. + // + // Technically root_finder should be `abs(candidate_d.distance) < + // min(min_d.distance, -max_d.distance) * frac`, but root_finder seems + // sufficient. + if (abs(candidate_d.distance) < min_d.distance * 0.9 && candidate_d.valid) { + *hit_t = mid_t; + *hit_d = candidate_d; + } else { + *hit_t = max_t; + *hit_d = max_d; + } + + return true; + } else { + *hit_t = max_t; + *hit_d = max_d; + return true; + } + } else { + // Mark the conservative miss distance. + *hit_t = min_t; + return false; + } +} + +struct DistanceWithPenetration { + /// Distance to the surface of which a root we're trying to find + distance: f32, + + /// Whether to consider this sample valid for intersection. + /// Mostly relevant for allowing the ray marcher to travel behind surfaces, + /// as it will mark surfaces it travels under as invalid. + valid: bool, + + /// Conservative estimate of depth to which the ray penetrates the marched surface. + penetration: f32, +} + +struct DepthRaymarchDistanceFn { + depth_tex_size: vec2, + + march_behind_surfaces: bool, + depth_thickness: f32, + + use_sloppy_march: bool, +} + +fn depth_raymarch_distance_fn_evaluate( + distance_fn: ptr, + ray_point_cs: vec3, +) -> DistanceWithPenetration { + let interp_uv = ndc_to_uv(ray_point_cs.xy); + + let ray_depth = 1.0 / ray_point_cs.z; + + // We're using both point-sampled and bilinear-filtered values from the depth buffer. + // + // That's really stupid but works like magic. For samples taken near the ray origin, + // the discrete nature of the depth buffer becomes a problem. It's not a land of continuous surfaces, + // but a bunch of stacked duplo bricks. + // + // Technically we should be taking discrete steps in distance_fn duplo land, but then we're at the mercy + // of arbitrary quantization of our directions -- and sometimes we'll take a step which would + // claim that the ray is occluded -- even though the underlying smooth surface wouldn't occlude it. + // + // If we instead take linear taps from the depth buffer, we reconstruct the linear surface. + // That fixes acne, but introduces false shadowing near object boundaries, as we now pretend + // that everything is shrink-wrapped by distance_fn continuous 2.5D surface, and our depth thickness + // heuristic ends up falling apart. + // + // The fix is to consider both the smooth and the discrete surfaces, and only claim occlusion + // when the ray descends below both. + // + // The two approaches end up fixing each other's artifacts: + // * The false occlusions due to duplo land are rejected because the ray stays above the smooth surface. + // * The shrink-wrap surface is no longer continuous, so it's possible for rays to miss it. + + let linear_depth = + 1.0 / textureSampleLevel(depth_prepass_texture, depth_linear_sampler, interp_uv, 0.0); + let unfiltered_depth = + 1.0 / textureSampleLevel(depth_prepass_texture, depth_nearest_sampler, interp_uv, 0.0); + + var max_depth: f32; + var min_depth: f32; + + if ((*distance_fn).use_sloppy_march) { + max_depth = unfiltered_depth; + min_depth = unfiltered_depth; + } else { + max_depth = max(linear_depth, unfiltered_depth); + min_depth = min(linear_depth, unfiltered_depth); + } + + let bias = 0.000002; + + var res: DistanceWithPenetration; + res.distance = max_depth * (1.0 + bias) - ray_depth; + + // distance_fn will be used at the end of the ray march to potentially discard the hit. + res.penetration = ray_depth - min_depth; + + if ((*distance_fn).march_behind_surfaces) { + res.valid = res.penetration < (*distance_fn).depth_thickness; + } else { + res.valid = true; + } + + return res; +} + +struct DepthRayMarchResult { + /// True if the raymarch hit something. + hit: bool, + + /// In case of a hit, the normalized distance to it. + /// + /// In case of a miss, the furthest the ray managed to travel, which could either be + /// exceeding the max range, or getting behind a surface further than the depth thickness. + /// + /// Range: `0..=1` as a lerp factor over `ray_start_cs..=ray_end_cs`. + hit_t: f32, + + /// UV correspindong to `hit_t`. + hit_uv: vec2, + + /// The distance that the hit point penetrates into the hit surface. + /// Will normally be non-zero due to limited precision of the ray march. + /// + /// In case of a miss: undefined. + hit_penetration: f32, + + /// Ditto, within the range `0..DepthRayMarch::depth_thickness_linear_z` + /// + /// In case of a miss: undefined. + hit_penetration_frac: f32, +} + +struct DepthRayMarch { + /// Number of steps to be taken at regular intervals to find an initial intersection. + /// Must not be zero. + linear_steps: u32, + + /// Exponent to be applied in the linear part of the march. + /// + /// A value of 1.0 will result in equidistant steps, and higher values will compress + /// the earlier steps, and expand the later ones. This might be desirable in order + /// to get more detail close to objects in SSR or SSGI. + /// + /// For optimal performance, this should be a small compile-time unsigned integer, + /// such as 1 or 2. + linear_march_exponent: f32, + + /// Number of steps in a bisection (binary search) to perform once the linear search + /// has found an intersection. Helps narrow down the hit, increasing the chance of + /// the secant method finding an accurate hit point. + /// + /// Useful when sampling color, e.g. SSR or SSGI, but pointless for contact shadows. + bisection_steps: u32, + + /// Approximate the root position using the secant method -- by solving for line-line + /// intersection between the ray approach rate and the surface gradient. + /// + /// Useful when sampling color, e.g. SSR or SSGI, but pointless for contact shadows. + use_secant: bool, + + /// Jitter to apply to the first step of the linear search; 0..=1 range, mapping + /// to the extent of a single linear step in the first phase of the search. + /// Use 1.0 if you don't want jitter. + jitter: f32, + + /// Clip space coordinates (w=1) of the ray. + ray_start_cs: vec3, + ray_end_cs: vec3, + + /// Should be used for contact shadows, but not for any color bounce, e.g. SSR. + /// + /// For SSR etc. this can easily create leaks, but with contact shadows it allows the rays + /// to pass over invalid occlusions (due to thickness), and find potentially valid ones ahead. + /// + /// Note that this will cause the linear search to potentially miss surfaces, + /// because when the ray overshoots and ends up penetrating a surface further than + /// `depth_thickness_linear_z`, the ray marcher will just carry on. + /// + /// For this reason, this may require a lot of samples, or high depth thickness, + /// so that `depth_thickness_linear_z >= world space ray length / linear_steps`. + march_behind_surfaces: bool, + + /// If `true`, the ray marcher only performs nearest lookups of the depth buffer, + /// resulting in aliasing and false occlusion when marching tiny detail. + /// It should work fine for longer traces with fewer rays though. + use_sloppy_march: bool, + + /// When marching the depth buffer, we only have 2.5D information, and don't know how + /// thick surfaces are. We shall assume that the depth buffer fragments are little squares + /// with a constant thickness defined by this parameter. + depth_thickness_linear_z: f32, + + /// Size of the depth buffer we're marching in, in pixels. + depth_tex_size: vec2, +} + +fn depth_ray_march_new_from_depth(depth_tex_size: vec2) -> DepthRayMarch { + var res: DepthRayMarch; + res.jitter = 1.0; + res.linear_steps = 4u; + res.bisection_steps = 0u; + res.linear_march_exponent = 1.0; + res.depth_tex_size = depth_tex_size; + res.depth_thickness_linear_z = 1.0; + res.march_behind_surfaces = false; + res.use_sloppy_march = false; + return res; +} + +fn depth_ray_march_to_cs_dir_impl( + raymarch: ptr, + dir_cs: vec4, + infinite: bool, +) { + var end_cs = vec4((*raymarch).ray_start_cs, 1.0) + dir_cs; + + // Perform perspective division, but avoid dividing by zero for rays + // heading directly towards the eye. + end_cs /= select(-1.0, 1.0, end_cs.w >= 0.0) * max(1e-10, abs(end_cs.w)); + + // Clip ray start to the view frustum + var delta_cs = end_cs.xyz - (*raymarch).ray_start_cs; + let near_edge = select(vec3(-1.0, -1.0, 0.0), vec3(1.0, 1.0, 1.0), delta_cs < vec3(0.0)); + let dist_to_near_edge = (near_edge - (*raymarch).ray_start_cs) / delta_cs; + let max_dist_to_near_edge = max(dist_to_near_edge.x, dist_to_near_edge.y); + (*raymarch).ray_start_cs += delta_cs * max(0.0, max_dist_to_near_edge); + + // Clip ray end to the view frustum + + delta_cs = end_cs.xyz - (*raymarch).ray_start_cs; + let far_edge = select(vec3(-1.0, -1.0, 0.0), vec3(1.0, 1.0, 1.0), delta_cs >= vec3(0.0)); + let dist_to_far_edge = (far_edge - (*raymarch).ray_start_cs) / delta_cs; + let min_dist_to_far_edge = min( + min(dist_to_far_edge.x, dist_to_far_edge.y), + dist_to_far_edge.z + ); + + if (infinite) { + delta_cs *= min_dist_to_far_edge; + } else { + // If unbounded, would make the ray reach the end of the frustum + delta_cs *= min(1.0, min_dist_to_far_edge); + } + + (*raymarch).ray_end_cs = (*raymarch).ray_start_cs + delta_cs; +} + +/// March from a clip-space position (w = 1) +fn depth_ray_march_from_cs(raymarch: ptr, v: vec3) { + (*raymarch).ray_start_cs = v; +} + +/// March to a clip-space position (w = 1) +/// +/// Must be called after `from_cs`, as it will clip the world-space ray to the view frustum. +fn depth_ray_march_to_cs(raymarch: ptr, end_cs: vec3) { + let dir = vec4(end_cs - (*raymarch).ray_start_cs, 0.0) * sign(end_cs.z); + depth_ray_march_to_cs_dir_impl(raymarch, dir, false); +} + +/// March towards a clip-space direction. Infinite (ray is extended to cover the whole view frustum). +/// +/// Must be called after `from_cs`, as it will clip the world-space ray to the view frustum. +fn depth_ray_march_to_cs_dir(raymarch: ptr, dir: vec4) { + depth_ray_march_to_cs_dir_impl(raymarch, dir, true); +} + +/// March to a world-space position. +/// +/// Must be called after `from_cs`, as it will clip the world-space ray to the view frustum. +fn depth_ray_march_to_ws(raymarch: ptr, end: vec3) { + depth_ray_march_to_cs(raymarch, position_world_to_ndc(end)); +} + +/// March towards a world-space direction. Infinite (ray is extended to cover the whole view frustum). +/// +/// Must be called after `from_cs`, as it will clip the world-space ray to the view frustum. +fn depth_ray_march_to_ws_dir(raymarch: ptr, dir: vec3) { + depth_ray_march_to_cs_dir_impl(raymarch, direction_world_to_clip(dir), true); +} + +/// Perform the ray march. +fn depth_ray_march_march(raymarch: ptr) -> DepthRayMarchResult { + var res = DepthRayMarchResult(false, 0.0, vec2(0.0), 0.0, 0.0); + + let ray_start_uv = ndc_to_uv((*raymarch).ray_start_cs.xy); + let ray_end_uv = ndc_to_uv((*raymarch).ray_end_cs.xy); + + let ray_uv_delta = ray_end_uv - ray_start_uv; + let ray_len_px = ray_uv_delta * (*raymarch).depth_tex_size; + + let min_px_per_step = 1u; + let step_count = max( + 2, + min(i32((*raymarch).linear_steps), i32(floor(length(ray_len_px) / f32(min_px_per_step)))) + ); + + let linear_z_to_scaled_linear_z = 1.0 / perspective_camera_near(); + let depth_thickness = (*raymarch).depth_thickness_linear_z * linear_z_to_scaled_linear_z; + + var distance_fn: DepthRaymarchDistanceFn; + distance_fn.depth_tex_size = (*raymarch).depth_tex_size; + distance_fn.march_behind_surfaces = (*raymarch).march_behind_surfaces; + distance_fn.depth_thickness = depth_thickness; + distance_fn.use_sloppy_march = (*raymarch).use_sloppy_march; + + var hit: DistanceWithPenetration; + + var hit_t = 0.0; + var miss_t = 0.0; + var root_finder = hybrid_root_finder_new_with_linear_steps(u32(step_count)); + root_finder.bisection_steps = (*raymarch).bisection_steps; + root_finder.use_secant = (*raymarch).use_secant; + root_finder.linear_march_exponent = (*raymarch).linear_march_exponent; + root_finder.jitter = (*raymarch).jitter; + let intersected = hybrid_root_finder_find_root( + &root_finder, + (*raymarch).ray_start_cs, + (*raymarch).ray_end_cs, + &distance_fn, + &hit_t, + &miss_t, + &hit + ); + + res.hit_t = hit_t; + + if (intersected && hit.penetration < depth_thickness && hit.distance < depth_thickness) { + res.hit = true; + res.hit_uv = mix(ray_start_uv, ray_end_uv, res.hit_t); + res.hit_penetration = hit.penetration / linear_z_to_scaled_linear_z; + res.hit_penetration_frac = hit.penetration / depth_thickness; + return res; + } + + res.hit_t = miss_t; + res.hit_uv = mix(ray_start_uv, ray_end_uv, res.hit_t); + + return res; +} + +``` + +### bevy_shaders/motion_blur + +```rust +#import bevy_pbr::prepass_utils +#import bevy_pbr::utils +#import bevy_core_pipeline::fullscreen_vertex_shader::FullscreenVertexOutput +#import bevy_render::globals::Globals + +#ifdef MULTISAMPLED +@group(0) @binding(0) var screen_texture: texture_2d; +@group(0) @binding(1) var motion_vectors: texture_multisampled_2d; +@group(0) @binding(2) var depth: texture_depth_multisampled_2d; +#else +@group(0) @binding(0) var screen_texture: texture_2d; +@group(0) @binding(1) var motion_vectors: texture_2d; +@group(0) @binding(2) var depth: texture_depth_2d; +#endif +@group(0) @binding(3) var texture_sampler: sampler; +struct MotionBlur { + shutter_angle: f32, + samples: u32, +#ifdef SIXTEEN_BYTE_ALIGNMENT + // WebGL2 structs must be 16 byte aligned. + _webgl2_padding: vec2 +#endif +} +@group(0) @binding(4) var settings: MotionBlur; +@group(0) @binding(5) var globals: Globals; + +@fragment +fn fragment( + #ifdef MULTISAMPLED + @builtin(sample_index) sample_index: u32, + #endif + in: FullscreenVertexOutput +) -> @location(0) vec4 { + let texture_size = vec2(textureDimensions(screen_texture)); + let frag_coords = vec2(in.uv * texture_size); + +#ifdef MULTISAMPLED + let base_color = textureLoad(screen_texture, frag_coords, i32(sample_index)); +#else + let base_color = textureSample(screen_texture, texture_sampler, in.uv); +#endif + + let shutter_angle = settings.shutter_angle; + +#ifdef MULTISAMPLED + let this_motion_vector = textureLoad(motion_vectors, frag_coords, i32(sample_index)).rg; +#else + let this_motion_vector = textureSample(motion_vectors, texture_sampler, in.uv).rg; +#endif + +#ifdef NO_DEPTH_TEXTURE_SUPPORT + let this_depth = 0.0; + let depth_supported = false; +#else + let depth_supported = true; +#ifdef MULTISAMPLED + let this_depth = textureLoad(depth, frag_coords, i32(sample_index)); +#else + let this_depth = textureSample(depth, texture_sampler, in.uv); +#endif +#endif + + // The exposure vector is the distance that this fragment moved while the camera shutter was + // open. This is the motion vector (total distance traveled) multiplied by the shutter angle (a + // fraction). In film, the shutter angle is commonly 0.5 or "180 degrees" (out of 360 total). + // This means that for a frame time of 20ms, the shutter is only open for 10ms. + // + // Using a shutter angle larger than 1.0 is non-physical, objects would need to move further + // than they physically travelled during a frame, which is not possible. Note: we allow values + // larger than 1.0 because it may be desired for artistic reasons. + let exposure_vector = shutter_angle * this_motion_vector; + + var accumulator: vec4; + var weight_total = 0.0; + let n_samples = i32(settings.samples); + let noise = utils::interleaved_gradient_noise(vec2(frag_coords), globals.frame_count); // 0 to 1 + + for (var i = -n_samples; i < n_samples; i++) { + // The current sample step vector, from in.uv + let step_vector = 0.5 * exposure_vector * (f32(i) + noise) / f32(n_samples); + var sample_uv = in.uv + step_vector; + + // If the sample is off screen, skip it. + if sample_uv.x < 0.0 || sample_uv.x > 1.0 || sample_uv.y < 0.0 || sample_uv.y > 1.0 { + continue; + } + + let sample_coords = vec2(sample_uv * texture_size); + + #ifdef MULTISAMPLED + let sample_color = textureLoad(screen_texture, sample_coords, i32(sample_index)); + #else + let sample_color = textureSample(screen_texture, texture_sampler, sample_uv); + #endif + #ifdef MULTISAMPLED + let sample_motion = textureLoad(motion_vectors, sample_coords, i32(sample_index)).rg; + #else + let sample_motion = textureSample(motion_vectors, texture_sampler, sample_uv).rg; + #endif + #ifdef NO_DEPTH_TEXTURE_SUPPORT + let sample_depth = 0.0; + #else + #ifdef MULTISAMPLED + let sample_depth = textureLoad(depth, sample_coords, i32(sample_index)); + #else + let sample_depth = textureSample(depth, texture_sampler, sample_uv); + #endif + #endif + + var weight = 1.0; + let is_sample_in_fg = !(depth_supported && sample_depth < this_depth && sample_depth > 0.0); + // If the depth is 0.0, this fragment has no depth written to it and we assume it is in the + // background. This ensures that things like skyboxes, which do not write to depth, are + // correctly sampled in motion blur. + if sample_depth != 0.0 && is_sample_in_fg { + // The following weight calculation is used to eliminate ghosting artifacts that are + // common in motion-vector-based motion blur implementations. While some resources + // recommend using depth, I've found that sampling the velocity results in significantly + // better results. Unlike a depth heuristic, this is not scale dependent. + // + // The most distracting artifacts occur when a stationary foreground object is + // incorrectly sampled while blurring a moving background object, causing the stationary + // object to blur when it should be sharp ("background bleeding"). This is most obvious + // when the camera is tracking a fast moving object. The tracked object should be sharp, + // and should not bleed into the motion blurred background. + // + // To attenuate these incorrect samples, we compare the motion of the fragment being + // blurred to the UV being sampled, to answer the question "is it possible that this + // sample was occluding the fragment?" + // + // Note to future maintainers: proceed with caution when making any changes here, and + // ensure you check all occlusion/disocclusion scenarios and fullscreen camera rotation + // blur for regressions. + let frag_speed = length(step_vector); + let sample_speed = length(sample_motion) / 2.0; // Halved because the sample is centered + let cos_angle = dot(step_vector, sample_motion) / (frag_speed * sample_speed * 2.0); + let motion_similarity = clamp(abs(cos_angle), 0.0, 1.0); + if sample_speed * motion_similarity < frag_speed { + // Project the sample's motion onto the frag's motion vector. If the sample did not + // cover enough distance to reach the original frag, there is no way it could have + // influenced this frag at all, and should be discarded. + weight = 0.0; + } + } + weight_total += weight; + accumulator += weight * sample_color; + } + + let has_moved_less_than_a_pixel = + dot(this_motion_vector * texture_size, this_motion_vector * texture_size) < 1.0; + // In case no samples were accepted, fall back to base color. + // We also fall back if motion is small, to not break antialiasing. + if weight_total <= 0.0 || has_moved_less_than_a_pixel { + accumulator = base_color; + weight_total = 1.0; + } + return accumulator / weight_total; +} +``` + +### bevy_shaders/ui_material + +```rust +#import bevy_render::{ + view::View, + globals::Globals, +} +#import bevy_ui::ui_vertex_output::UiVertexOutput + +@group(0) @binding(0) +var view: View; +@group(0) @binding(1) +var globals: Globals; + +@vertex +fn vertex( + @location(0) vertex_position: vec3, + @location(1) vertex_uv: vec2, + @location(2) size: vec2, + @location(3) border_widths: vec4, +) -> UiVertexOutput { + var out: UiVertexOutput; + out.uv = vertex_uv; + out.position = view.clip_from_world * vec4(vertex_position, 1.0); + out.size = size; + out.border_widths = border_widths; + return out; +} + +@fragment +fn fragment(in: UiVertexOutput) -> @location(0) vec4 { + return vec4(1.0); +} + +``` + +### bevy_shaders/cubemap_unlit + +```rust +#import bevy_pbr::forward_io::VertexOutput + +#ifdef CUBEMAP_ARRAY +@group(2) @binding(0) var base_color_texture: texture_cube_array; +#else +@group(2) @binding(0) var base_color_texture: texture_cube; +#endif + +@group(2) @binding(1) var base_color_sampler: sampler; + +@fragment +fn fragment( + mesh: VertexOutput, +) -> @location(0) vec4 { + let fragment_position_view_lh = mesh.world_position.xyz * vec3(1.0, 1.0, -1.0); + return textureSample( + base_color_texture, + base_color_sampler, + fragment_position_view_lh + ); +} + +``` + +### bevy_shaders/mesh_functions + +```rust +#define_import_path bevy_pbr::mesh_functions + +#import bevy_pbr::{ + mesh_view_bindings::{ + view, + visibility_ranges, + VISIBILITY_RANGE_UNIFORM_BUFFER_SIZE + }, + mesh_bindings::mesh, + mesh_types::MESH_FLAGS_SIGN_DETERMINANT_MODEL_3X3_BIT, + view_transformations::position_world_to_clip, +} +#import bevy_render::maths::{affine3_to_square, mat2x4_f32_to_mat3x3_unpack} + + +fn get_world_from_local(instance_index: u32) -> mat4x4 { + return affine3_to_square(mesh[instance_index].world_from_local); +} + +fn get_previous_world_from_local(instance_index: u32) -> mat4x4 { + return affine3_to_square(mesh[instance_index].previous_world_from_local); +} + +fn mesh_position_local_to_world(world_from_local: mat4x4, vertex_position: vec4) -> vec4 { + return world_from_local * vertex_position; +} + +// NOTE: The intermediate world_position assignment is important +// for precision purposes when using the 'equals' depth comparison +// function. +fn mesh_position_local_to_clip(world_from_local: mat4x4, vertex_position: vec4) -> vec4 { + let world_position = mesh_position_local_to_world(world_from_local, vertex_position); + return position_world_to_clip(world_position.xyz); +} + +fn mesh_normal_local_to_world(vertex_normal: vec3, instance_index: u32) -> vec3 { + // NOTE: The mikktspace method of normal mapping requires that the world normal is + // re-normalized in the vertex shader to match the way mikktspace bakes vertex tangents + // and normal maps so that the exact inverse process is applied when shading. Blender, Unity, + // Unreal Engine, Godot, and more all use the mikktspace method. + // We only skip normalization for invalid normals so that they don't become NaN. + // Do not change this code unless you really know what you are doing. + // http://www.mikktspace.com/ + if any(vertex_normal != vec3(0.0)) { + return normalize( + mat2x4_f32_to_mat3x3_unpack( + mesh[instance_index].local_from_world_transpose_a, + mesh[instance_index].local_from_world_transpose_b, + ) * vertex_normal + ); + } else { + return vertex_normal; + } +} + +// Calculates the sign of the determinant of the 3x3 model matrix based on a +// mesh flag +fn sign_determinant_model_3x3m(mesh_flags: u32) -> f32 { + // bool(u32) is false if 0u else true + // f32(bool) is 1.0 if true else 0.0 + // * 2.0 - 1.0 remaps 0.0 or 1.0 to -1.0 or 1.0 respectively + return f32(bool(mesh_flags & MESH_FLAGS_SIGN_DETERMINANT_MODEL_3X3_BIT)) * 2.0 - 1.0; +} + +fn mesh_tangent_local_to_world(world_from_local: mat4x4, vertex_tangent: vec4, instance_index: u32) -> vec4 { + // NOTE: The mikktspace method of normal mapping requires that the world tangent is + // re-normalized in the vertex shader to match the way mikktspace bakes vertex tangents + // and normal maps so that the exact inverse process is applied when shading. Blender, Unity, + // Unreal Engine, Godot, and more all use the mikktspace method. + // We only skip normalization for invalid tangents so that they don't become NaN. + // Do not change this code unless you really know what you are doing. + // http://www.mikktspace.com/ + if any(vertex_tangent != vec4(0.0)) { + return vec4( + normalize( + mat3x3( + world_from_local[0].xyz, + world_from_local[1].xyz, + world_from_local[2].xyz, + ) * vertex_tangent.xyz + ), + // NOTE: Multiplying by the sign of the determinant of the 3x3 model matrix accounts for + // situations such as negative scaling. + vertex_tangent.w * sign_determinant_model_3x3m(mesh[instance_index].flags) + ); + } else { + return vertex_tangent; + } +} + +// Returns an appropriate dither level for the current mesh instance. +// +// This looks up the LOD range in the `visibility_ranges` table and compares the +// camera distance to determine the dithering level. +#ifdef VISIBILITY_RANGE_DITHER +fn get_visibility_range_dither_level(instance_index: u32, world_position: vec4) -> i32 { +#if AVAILABLE_STORAGE_BUFFER_BINDINGS >= 6 + // If we're using a storage buffer, then the length is variable. + let visibility_buffer_array_len = arrayLength(&visibility_ranges); +#else // AVAILABLE_STORAGE_BUFFER_BINDINGS >= 6 + // If we're using a uniform buffer, then the length is constant + let visibility_buffer_array_len = VISIBILITY_RANGE_UNIFORM_BUFFER_SIZE; +#endif // AVAILABLE_STORAGE_BUFFER_BINDINGS >= 6 + + let visibility_buffer_index = mesh[instance_index].flags & 0xffffu; + if (visibility_buffer_index > visibility_buffer_array_len) { + return -16; + } + + let lod_range = visibility_ranges[visibility_buffer_index]; + let camera_distance = length(view.world_position.xyz - world_position.xyz); + + // This encodes the following mapping: + // + // `lod_range.` x y z w camera distance + // ←───────┼────────┼────────┼────────┼────────→ + // LOD level -16 -16 0 0 16 16 LOD level + let offset = select(-16, 0, camera_distance >= lod_range.z); + let bounds = select(lod_range.xy, lod_range.zw, camera_distance >= lod_range.z); + let level = i32(round((camera_distance - bounds.x) / (bounds.y - bounds.x) * 16.0)); + return offset + clamp(level, 0, 16); +} +#endif + +``` + +### bevy_shaders/lightmap + +```rust +#define_import_path bevy_pbr::lightmap + +#import bevy_pbr::mesh_bindings::mesh + +@group(1) @binding(4) var lightmaps_texture: texture_2d; +@group(1) @binding(5) var lightmaps_sampler: sampler; + +// Samples the lightmap, if any, and returns indirect illumination from it. +fn lightmap(uv: vec2, exposure: f32, instance_index: u32) -> vec3 { + let packed_uv_rect = mesh[instance_index].lightmap_uv_rect; + let uv_rect = vec4(vec4( + packed_uv_rect.x & 0xffffu, + packed_uv_rect.x >> 16u, + packed_uv_rect.y & 0xffffu, + packed_uv_rect.y >> 16u)) / 65535.0; + + let lightmap_uv = mix(uv_rect.xy, uv_rect.zw, uv); + + // Mipmapping lightmaps is usually a bad idea due to leaking across UV + // islands, so there's no harm in using mip level 0 and it lets us avoid + // control flow uniformity problems. + // + // TODO(pcwalton): Consider bicubic filtering. + return textureSampleLevel( + lightmaps_texture, + lightmaps_sampler, + lightmap_uv, + 0.0).rgb * exposure; +} + +``` + +### bevy_shaders/custom_material + +```rust +#import bevy_pbr::forward_io::VertexOutput +// we can import items from shader modules in the assets folder with a quoted path +#import "shaders/custom_material_import.wgsl"::COLOR_MULTIPLIER + +@group(2) @binding(0) var material_color: vec4; +@group(2) @binding(1) var material_color_texture: texture_2d; +@group(2) @binding(2) var material_color_sampler: sampler; + +@fragment +fn fragment( + mesh: VertexOutput, +) -> @location(0) vec4 { + return material_color * textureSample(material_color_texture, material_color_sampler, mesh.uv) * COLOR_MULTIPLIER; +} + +``` + +### bevy_shaders/line_joints + +```rust +#import bevy_render::view::View + +@group(0) @binding(0) var view: View; + + +struct LineGizmoUniform { + line_width: f32, + depth_bias: f32, + resolution: u32, +#ifdef SIXTEEN_BYTE_ALIGNMENT + // WebGL2 structs must be 16 byte aligned. + _padding: f32, +#endif +} + +@group(1) @binding(0) var joints_gizmo: LineGizmoUniform; + +struct VertexInput { + @location(0) position_a: vec3, + @location(1) position_b: vec3, + @location(2) position_c: vec3, + @location(3) color: vec4, + @builtin(vertex_index) index: u32, +}; + +struct VertexOutput { + @builtin(position) clip_position: vec4, + @location(0) color: vec4, +}; + +const EPSILON: f32 = 4.88e-04; + +@vertex +fn vertex_bevel(vertex: VertexInput) -> VertexOutput { + var positions = array, 3>( + vec2(0, 0), + vec2(0, 0.5), + vec2(0.5, 0), + ); + var position = positions[vertex.index]; + + var clip_a = view.clip_from_world * vec4(vertex.position_a, 1.); + var clip_b = view.clip_from_world * vec4(vertex.position_b, 1.); + var clip_c = view.clip_from_world * vec4(vertex.position_c, 1.); + + // Manual near plane clipping to avoid errors when doing the perspective divide inside this shader. + clip_a = clip_near_plane(clip_a, clip_c); + clip_b = clip_near_plane(clip_b, clip_a); + clip_c = clip_near_plane(clip_c, clip_b); + clip_a = clip_near_plane(clip_a, clip_c); + + let resolution = view.viewport.zw; + let screen_a = resolution * (0.5 * clip_a.xy / clip_a.w + 0.5); + let screen_b = resolution * (0.5 * clip_b.xy / clip_b.w + 0.5); + let screen_c = resolution * (0.5 * clip_c.xy / clip_c.w + 0.5); + + var color = vertex.color; + var line_width = joints_gizmo.line_width; + +#ifdef PERSPECTIVE + line_width /= clip_b.w; +#endif + + // Line thinness fade from https://acegikmo.com/shapes/docs/#anti-aliasing + if line_width > 0.0 && line_width < 1. { + color.a *= line_width; + line_width = 1.; + } + + let ab = normalize(screen_b - screen_a); + let cb = normalize(screen_b - screen_c); + let ab_norm = vec2(-ab.y, ab.x); + let cb_norm = vec2(cb.y, -cb.x); + let tangent = normalize(ab - cb); + let normal = vec2(-tangent.y, tangent.x); + let sigma = sign(dot(ab + cb, normal)); + + var p0 = line_width * sigma * ab_norm; + var p1 = line_width * sigma * cb_norm; + + let screen = screen_b + position.x * p0 + position.y * p1; + + let depth = depth(clip_b); + + var clip_position = vec4(clip_b.w * ((2. * screen) / resolution - 1.), depth, clip_b.w); + return VertexOutput(clip_position, color); +} + +@vertex +fn vertex_miter(vertex: VertexInput) -> VertexOutput { + var positions = array, 6>( + vec3(0, 0, 0), + vec3(0.5, 0, 0), + vec3(0, 0.5, 0), + vec3(0, 0, 0), + vec3(0, 0.5, 0), + vec3(0, 0, 0.5), + ); + var position = positions[vertex.index]; + + var clip_a = view.clip_from_world * vec4(vertex.position_a, 1.); + var clip_b = view.clip_from_world * vec4(vertex.position_b, 1.); + var clip_c = view.clip_from_world * vec4(vertex.position_c, 1.); + + // Manual near plane clipping to avoid errors when doing the perspective divide inside this shader. + clip_a = clip_near_plane(clip_a, clip_c); + clip_b = clip_near_plane(clip_b, clip_a); + clip_c = clip_near_plane(clip_c, clip_b); + clip_a = clip_near_plane(clip_a, clip_c); + + let resolution = view.viewport.zw; + let screen_a = resolution * (0.5 * clip_a.xy / clip_a.w + 0.5); + let screen_b = resolution * (0.5 * clip_b.xy / clip_b.w + 0.5); + let screen_c = resolution * (0.5 * clip_c.xy / clip_c.w + 0.5); + + var color = vertex.color; + var line_width = joints_gizmo.line_width; + +#ifdef PERSPECTIVE + line_width /= clip_b.w; +#endif + + // Line thinness fade from https://acegikmo.com/shapes/docs/#anti-aliasing + if line_width > 0.0 && line_width < 1. { + color.a *= line_width; + line_width = 1.; + } + + let ab = normalize(screen_b - screen_a); + let cb = normalize(screen_b - screen_c); + let ab_norm = vec2(-ab.y, ab.x); + let cb_norm = vec2(cb.y, -cb.x); + let tangent = normalize(ab - cb); + let normal = vec2(-tangent.y, tangent.x); + let sigma = sign(dot(ab + cb, normal)); + + var p0 = line_width * sigma * ab_norm; + var p1 = line_width * sigma * normal / dot(normal, ab_norm); + var p2 = line_width * sigma * cb_norm; + + var screen = screen_b + position.x * p0 + position.y * p1 + position.z * p2; + + var depth = depth(clip_b); + + var clip_position = vec4(clip_b.w * ((2. * screen) / resolution - 1.), depth, clip_b.w); + return VertexOutput(clip_position, color); +} + +@vertex +fn vertex_round(vertex: VertexInput) -> VertexOutput { + var clip_a = view.clip_from_world * vec4(vertex.position_a, 1.); + var clip_b = view.clip_from_world * vec4(vertex.position_b, 1.); + var clip_c = view.clip_from_world * vec4(vertex.position_c, 1.); + + // Manual near plane clipping to avoid errors when doing the perspective divide inside this shader. + clip_a = clip_near_plane(clip_a, clip_c); + clip_b = clip_near_plane(clip_b, clip_a); + clip_c = clip_near_plane(clip_c, clip_b); + clip_a = clip_near_plane(clip_a, clip_c); + + let resolution = view.viewport.zw; + let screen_a = resolution * (0.5 * clip_a.xy / clip_a.w + 0.5); + let screen_b = resolution * (0.5 * clip_b.xy / clip_b.w + 0.5); + let screen_c = resolution * (0.5 * clip_c.xy / clip_c.w + 0.5); + + var color = vertex.color; + var line_width = joints_gizmo.line_width; + +#ifdef PERSPECTIVE + line_width /= clip_b.w; +#endif + + // Line thinness fade from https://acegikmo.com/shapes/docs/#anti-aliasing + if line_width > 0.0 && line_width < 1. { + color.a *= line_width; + line_width = 1.; + } + + let ab = normalize(screen_b - screen_a); + let cb = normalize(screen_b - screen_c); + let ab_norm = vec2(-ab.y, ab.x); + let cb_norm = vec2(cb.y, -cb.x); + + // We render `joints_gizmo.resolution`triangles. The vertices in each triangle are ordered as follows: + // - 0: The 'center' vertex at `screen_b`. + // - 1: The vertex closer to the ab line. + // - 2: The vertex closer to the cb line. + var in_triangle_index = f32(vertex.index) % 3.0; + var tri_index = floor(f32(vertex.index) / 3.0); + var radius = sign(in_triangle_index) * 0.5 * line_width; + var theta = acos(dot(ab_norm, cb_norm)); + let sigma = sign(dot(ab_norm, cb)); + var angle = theta * (tri_index + in_triangle_index - 1) / f32(joints_gizmo.resolution); + var position_x = sigma * radius * cos(angle); + var position_y = radius * sin(angle); + + var screen = screen_b + position_x * ab_norm + position_y * ab; + + var depth = depth(clip_b); + + var clip_position = vec4(clip_b.w * ((2. * screen) / resolution - 1.), depth, clip_b.w); + return VertexOutput(clip_position, color); +} + +fn clip_near_plane(a: vec4, b: vec4) -> vec4 { + // Move a if a is behind the near plane and b is in front. + if a.z > a.w && b.z <= b.w { + // Interpolate a towards b until it's at the near plane. + let distance_a = a.z - a.w; + let distance_b = b.z - b.w; + // Add an epsilon to the interpolator to ensure that the point is + // not just behind the clip plane due to floating-point imprecision. + let t = distance_a / (distance_a - distance_b) + EPSILON; + return mix(a, b, t); + } + return a; +} + +fn depth(clip: vec4) -> f32 { + var depth: f32; + if joints_gizmo.depth_bias >= 0. { + depth = clip.z * (1. - joints_gizmo.depth_bias); + } else { + // depth * (clip.w / depth)^-depth_bias. So that when -depth_bias is 1.0, this is equal to clip.w + // and when equal to 0.0, it is exactly equal to depth. + // the epsilon is here to prevent the depth from exceeding clip.w when -depth_bias = 1.0 + // clip.w represents the near plane in homogeneous clip space in bevy, having a depth + // of this value means nothing can be in front of this + // The reason this uses an exponential function is that it makes it much easier for the + // user to chose a value that is convenient for them + depth = clip.z * exp2(-joints_gizmo.depth_bias * log2(clip.w / clip.z - EPSILON)); + } + return depth; +} + +struct FragmentInput { + @location(0) color: vec4, +}; + +struct FragmentOutput { + @location(0) color: vec4, +}; + +@fragment +fn fragment(in: FragmentInput) -> FragmentOutput { + // return FragmentOutput(vec4(1, 1, 1, 1)); + return FragmentOutput(in.color); +} + +``` + +### bevy_shaders/irradiance_volume + +```rust +#define_import_path bevy_pbr::irradiance_volume + +#import bevy_pbr::light_probe::query_light_probe +#import bevy_pbr::mesh_view_bindings::{ + irradiance_volumes, + irradiance_volume, + irradiance_volume_sampler, + light_probes, +}; + +#ifdef IRRADIANCE_VOLUMES_ARE_USABLE + +// See: +// https://advances.realtimerendering.com/s2006/Mitchell-ShadingInValvesSourceEngine.pdf +// Slide 28, "Ambient Cube Basis" +fn irradiance_volume_light(world_position: vec3, N: vec3) -> vec3 { + // Search for an irradiance volume that contains the fragment. + let query_result = query_light_probe(world_position, /*is_irradiance_volume=*/ true); + + // If there was no irradiance volume found, bail out. + if (query_result.texture_index < 0) { + return vec3(0.0f); + } + +#ifdef MULTIPLE_LIGHT_PROBES_IN_ARRAY + let irradiance_volume_texture = irradiance_volumes[query_result.texture_index]; +#else + let irradiance_volume_texture = irradiance_volume; +#endif + + let atlas_resolution = vec3(textureDimensions(irradiance_volume_texture)); + let resolution = vec3(textureDimensions(irradiance_volume_texture) / vec3(1u, 2u, 3u)); + + // Make sure to clamp to the edges to avoid texture bleed. + var unit_pos = (query_result.light_from_world * vec4(world_position, 1.0f)).xyz; + let stp = clamp((unit_pos + 0.5) * resolution, vec3(0.5f), resolution - vec3(0.5f)); + let uvw = stp / atlas_resolution; + + // The bottom half of each cube slice is the negative part, so choose it if applicable on each + // slice. + let neg_offset = select(vec3(0.0f), vec3(0.5f), N < vec3(0.0f)); + + let uvw_x = uvw + vec3(0.0f, neg_offset.x, 0.0f); + let uvw_y = uvw + vec3(0.0f, neg_offset.y, 1.0f / 3.0f); + let uvw_z = uvw + vec3(0.0f, neg_offset.z, 2.0f / 3.0f); + + let rgb_x = textureSampleLevel(irradiance_volume_texture, irradiance_volume_sampler, uvw_x, 0.0).rgb; + let rgb_y = textureSampleLevel(irradiance_volume_texture, irradiance_volume_sampler, uvw_y, 0.0).rgb; + let rgb_z = textureSampleLevel(irradiance_volume_texture, irradiance_volume_sampler, uvw_z, 0.0).rgb; + + // Use Valve's formula to sample. + let NN = N * N; + return (rgb_x * NN.x + rgb_y * NN.y + rgb_z * NN.z) * query_result.intensity; +} + +#endif // IRRADIANCE_VOLUMES_ARE_USABLE + +``` + +### bevy_shaders/mesh_vertex_output + +```rust +#define_import_path bevy_pbr::mesh_vertex_output + +struct MeshVertexOutput { + // this is `clip position` when the struct is used as a vertex stage output + // and `frag coord` when used as a fragment stage input + @builtin(position) position: vec4, + @location(0) world_position: vec4, + @location(1) world_normal: vec3, + #ifdef VERTEX_UVS + @location(2) uv: vec2, + #endif + #ifdef VERTEX_TANGENTS + @location(3) world_tangent: vec4, + #endif + #ifdef VERTEX_COLORS + @location(4) color: vec4, + #endif + #ifdef VERTEX_OUTPUT_INSTANCE_INDEX + @location(5) @interpolate(flat) instance_index: u32, + #endif +} + +``` + +### bevy_shaders/skybox_prepass + +```rust +#import bevy_render::view::View +#import bevy_core_pipeline::fullscreen_vertex_shader::FullscreenVertexOutput +#import bevy_pbr::view_transformations::uv_to_ndc + +struct PreviousViewUniforms { + view_from_world: mat4x4, + clip_from_world: mat4x4, +} + +@group(0) @binding(0) var view: View; +@group(0) @binding(1) var previous_view: PreviousViewUniforms; + +@fragment +fn fragment(in: FullscreenVertexOutput) -> @location(1) vec4 { + let clip_pos = uv_to_ndc(in.uv); // Convert from uv to clip space + let world_pos = view.world_from_clip * vec4(clip_pos, 0.0, 1.0); + let prev_clip_pos = (previous_view.clip_from_world * world_pos).xy; + let velocity = (clip_pos - prev_clip_pos) * vec2(0.5, -0.5); // Copied from mesh motion vectors + + return vec4(velocity.x, velocity.y, 0.0, 1.0); +} + +``` + +### bevy_shaders/deferred_lighting + +```rust +#import bevy_pbr::{ + prepass_utils, + pbr_types::STANDARD_MATERIAL_FLAGS_UNLIT_BIT, + pbr_functions, + pbr_deferred_functions::pbr_input_from_deferred_gbuffer, + pbr_deferred_types::unpack_unorm3x4_plus_unorm_20_, + lighting, + mesh_view_bindings::deferred_prepass_texture, +} + +#ifdef SCREEN_SPACE_AMBIENT_OCCLUSION +#import bevy_pbr::mesh_view_bindings::screen_space_ambient_occlusion_texture +#import bevy_pbr::gtao_utils::gtao_multibounce +#endif + +struct FullscreenVertexOutput { + @builtin(position) + position: vec4, + @location(0) + uv: vec2, +}; + +struct PbrDeferredLightingDepthId { + depth_id: u32, // limited to u8 +#ifdef SIXTEEN_BYTE_ALIGNMENT + // WebGL2 structs must be 16 byte aligned. + _webgl2_padding_0: f32, + _webgl2_padding_1: f32, + _webgl2_padding_2: f32, +#endif +} +@group(1) @binding(0) +var depth_id: PbrDeferredLightingDepthId; + +@vertex +fn vertex(@builtin(vertex_index) vertex_index: u32) -> FullscreenVertexOutput { + // See the full screen vertex shader for explanation above for how this works. + let uv = vec2(f32(vertex_index >> 1u), f32(vertex_index & 1u)) * 2.0; + // Depth is stored as unorm, so we are dividing the u8 depth_id by 255.0 here. + let clip_position = vec4(uv * vec2(2.0, -2.0) + vec2(-1.0, 1.0), f32(depth_id.depth_id) / 255.0, 1.0); + + return FullscreenVertexOutput(clip_position, uv); +} + +@fragment +fn fragment(in: FullscreenVertexOutput) -> @location(0) vec4 { + var frag_coord = vec4(in.position.xy, 0.0, 0.0); + + let deferred_data = textureLoad(deferred_prepass_texture, vec2(frag_coord.xy), 0); + +#ifdef WEBGL2 + frag_coord.z = unpack_unorm3x4_plus_unorm_20_(deferred_data.b).w; +#else +#ifdef DEPTH_PREPASS + frag_coord.z = prepass_utils::prepass_depth(in.position, 0u); +#endif +#endif + + var pbr_input = pbr_input_from_deferred_gbuffer(frag_coord, deferred_data); + var output_color = vec4(0.0); + + // NOTE: Unlit bit not set means == 0 is true, so the true case is if lit + if ((pbr_input.material.flags & STANDARD_MATERIAL_FLAGS_UNLIT_BIT) == 0u) { + +#ifdef SCREEN_SPACE_AMBIENT_OCCLUSION + let ssao = textureLoad(screen_space_ambient_occlusion_texture, vec2(in.position.xy), 0i).r; + let ssao_multibounce = gtao_multibounce(ssao, pbr_input.material.base_color.rgb); + pbr_input.diffuse_occlusion = min(pbr_input.diffuse_occlusion, ssao_multibounce); + + // Neubelt and Pettineo 2013, "Crafting a Next-gen Material Pipeline for The Order: 1886" + let NdotV = max(dot(pbr_input.N, pbr_input.V), 0.0001); + var perceptual_roughness: f32 = pbr_input.material.perceptual_roughness; + let roughness = lighting::perceptualRoughnessToRoughness(perceptual_roughness); + // Use SSAO to estimate the specular occlusion. + // Lagarde and Rousiers 2014, "Moving Frostbite to Physically Based Rendering" + pbr_input.specular_occlusion = saturate(pow(NdotV + ssao, exp2(-16.0 * roughness - 1.0)) - 1.0 + ssao); +#endif // SCREEN_SPACE_AMBIENT_OCCLUSION + + output_color = pbr_functions::apply_pbr_lighting(pbr_input); + } else { + output_color = pbr_input.material.base_color; + } + + output_color = pbr_functions::main_pass_post_lighting_processing(pbr_input, output_color); + + return output_color; +} + + +``` + +### bevy_shaders/skinning + +```rust +#define_import_path bevy_pbr::skinning + +#import bevy_pbr::mesh_types::SkinnedMesh + +#ifdef SKINNED + +@group(1) @binding(1) var joint_matrices: SkinnedMesh; + +// An array of matrices specifying the joint positions from the previous frame. +// +// This is used for motion vector computation. +// +// If this is the first frame, or we're otherwise prevented from using data from +// the previous frame, this is simply the same as `joint_matrices` above. +@group(1) @binding(6) var prev_joint_matrices: SkinnedMesh; + +fn skin_model( + indexes: vec4, + weights: vec4, +) -> mat4x4 { + return weights.x * joint_matrices.data[indexes.x] + + weights.y * joint_matrices.data[indexes.y] + + weights.z * joint_matrices.data[indexes.z] + + weights.w * joint_matrices.data[indexes.w]; +} + +// Returns the skinned position of a vertex with the given weights from the +// previous frame. +// +// This is used for motion vector computation. +fn skin_prev_model( + indexes: vec4, + weights: vec4, +) -> mat4x4 { + return weights.x * prev_joint_matrices.data[indexes.x] + + weights.y * prev_joint_matrices.data[indexes.y] + + weights.z * prev_joint_matrices.data[indexes.z] + + weights.w * prev_joint_matrices.data[indexes.w]; +} + +fn inverse_transpose_3x3m(in: mat3x3) -> mat3x3 { + let x = cross(in[1], in[2]); + let y = cross(in[2], in[0]); + let z = cross(in[0], in[1]); + let det = dot(in[2], z); + return mat3x3( + x / det, + y / det, + z / det + ); +} + +fn skin_normals( + world_from_local: mat4x4, + normal: vec3, +) -> vec3 { + return normalize( + inverse_transpose_3x3m( + mat3x3( + world_from_local[0].xyz, + world_from_local[1].xyz, + world_from_local[2].xyz + ) + ) * normal + ); +} + +#endif + +``` + +### bevy_shaders/visibility_buffer_raster + +```rust +#import bevy_pbr::{ + meshlet_bindings::{ + meshlet_cluster_meshlet_ids, + meshlets, + meshlet_vertex_ids, + meshlet_vertex_data, + meshlet_cluster_instance_ids, + meshlet_instance_uniforms, + meshlet_instance_material_ids, + draw_triangle_buffer, + view, + get_meshlet_index, + unpack_meshlet_vertex, + }, + mesh_functions::mesh_position_local_to_world, +} +#import bevy_render::maths::affine3_to_square + +/// Vertex/fragment shader for rasterizing meshlets into a visibility buffer. + +struct VertexOutput { + @builtin(position) clip_position: vec4, +#ifdef MESHLET_VISIBILITY_BUFFER_RASTER_PASS_OUTPUT + @location(0) @interpolate(flat) visibility: u32, + @location(1) @interpolate(flat) material_depth: u32, +#endif +#ifdef DEPTH_CLAMP_ORTHO + @location(0) unclamped_clip_depth: f32, +#endif +} + +#ifdef MESHLET_VISIBILITY_BUFFER_RASTER_PASS_OUTPUT +struct FragmentOutput { + @location(0) visibility: vec4, + @location(1) material_depth: vec4, +} +#endif + +@vertex +fn vertex(@builtin(vertex_index) vertex_index: u32) -> VertexOutput { + let packed_ids = draw_triangle_buffer[vertex_index / 3u]; + let cluster_id = packed_ids >> 6u; + let triangle_id = extractBits(packed_ids, 0u, 6u); + let index_id = (triangle_id * 3u) + (vertex_index % 3u); + let meshlet_id = meshlet_cluster_meshlet_ids[cluster_id]; + let meshlet = meshlets[meshlet_id]; + let index = get_meshlet_index(meshlet.start_index_id + index_id); + let vertex_id = meshlet_vertex_ids[meshlet.start_vertex_id + index]; + let vertex = unpack_meshlet_vertex(meshlet_vertex_data[vertex_id]); + let instance_id = meshlet_cluster_instance_ids[cluster_id]; + let instance_uniform = meshlet_instance_uniforms[instance_id]; + + let world_from_local = affine3_to_square(instance_uniform.world_from_local); + let world_position = mesh_position_local_to_world(world_from_local, vec4(vertex.position, 1.0)); + var clip_position = view.clip_from_world * vec4(world_position.xyz, 1.0); +#ifdef DEPTH_CLAMP_ORTHO + let unclamped_clip_depth = clip_position.z; + clip_position.z = min(clip_position.z, 1.0); +#endif + + return VertexOutput( + clip_position, +#ifdef MESHLET_VISIBILITY_BUFFER_RASTER_PASS_OUTPUT + packed_ids, + meshlet_instance_material_ids[instance_id], +#endif +#ifdef DEPTH_CLAMP_ORTHO + unclamped_clip_depth, +#endif + ); +} + +#ifdef MESHLET_VISIBILITY_BUFFER_RASTER_PASS_OUTPUT +@fragment +fn fragment(vertex_output: VertexOutput) -> FragmentOutput { + return FragmentOutput( + vec4(vertex_output.visibility, 0u, 0u, 0u), + vec4(vertex_output.material_depth, 0u, 0u, 0u), + ); +} +#endif + +#ifdef DEPTH_CLAMP_ORTHO +@fragment +fn fragment(vertex_output: VertexOutput) -> @builtin(frag_depth) f32 { + return vertex_output.unclamped_clip_depth; +} +#endif + +``` + +### bevy_shaders/prepass_utils + +```rust +#define_import_path bevy_pbr::prepass_utils + +#import bevy_pbr::mesh_view_bindings as view_bindings + +#ifdef DEPTH_PREPASS +fn prepass_depth(frag_coord: vec4, sample_index: u32) -> f32 { +#ifdef MULTISAMPLED + return textureLoad(view_bindings::depth_prepass_texture, vec2(frag_coord.xy), i32(sample_index)); +#else // MULTISAMPLED + return textureLoad(view_bindings::depth_prepass_texture, vec2(frag_coord.xy), 0); +#endif // MULTISAMPLED +} +#endif // DEPTH_PREPASS + +#ifdef NORMAL_PREPASS +fn prepass_normal(frag_coord: vec4, sample_index: u32) -> vec3 { +#ifdef MULTISAMPLED + let normal_sample = textureLoad(view_bindings::normal_prepass_texture, vec2(frag_coord.xy), i32(sample_index)); +#else + let normal_sample = textureLoad(view_bindings::normal_prepass_texture, vec2(frag_coord.xy), 0); +#endif // MULTISAMPLED + return normalize(normal_sample.xyz * 2.0 - vec3(1.0)); +} +#endif // NORMAL_PREPASS + +#ifdef MOTION_VECTOR_PREPASS +fn prepass_motion_vector(frag_coord: vec4, sample_index: u32) -> vec2 { +#ifdef MULTISAMPLED + let motion_vector_sample = textureLoad(view_bindings::motion_vector_prepass_texture, vec2(frag_coord.xy), i32(sample_index)); +#else + let motion_vector_sample = textureLoad(view_bindings::motion_vector_prepass_texture, vec2(frag_coord.xy), 0); +#endif + return motion_vector_sample.rg; +} +#endif // MOTION_VECTOR_PREPASS + +``` + +### bevy_shaders/pbr_types + +```rust +#define_import_path bevy_pbr::pbr_types + +// Since this is a hot path, try to keep the alignment and size of the struct members in mind. +// You can find the alignment and sizes at . +struct StandardMaterial { + base_color: vec4, + emissive: vec4, + attenuation_color: vec4, + uv_transform: mat3x3, + perceptual_roughness: f32, + metallic: f32, + reflectance: f32, + diffuse_transmission: f32, + specular_transmission: f32, + thickness: f32, + ior: f32, + attenuation_distance: f32, + clearcoat: f32, + clearcoat_perceptual_roughness: f32, + anisotropy_strength: f32, + anisotropy_rotation: vec2, + // 'flags' is a bit field indicating various options. u32 is 32 bits so we have up to 32 options. + flags: u32, + alpha_cutoff: f32, + parallax_depth_scale: f32, + max_parallax_layer_count: f32, + lightmap_exposure: f32, + max_relief_mapping_search_steps: u32, + /// ID for specifying which deferred lighting pass should be used for rendering this material, if any. + deferred_lighting_pass_id: u32, +}; + +// !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! +// NOTE: if these flags are updated or changed. Be sure to also update +// deferred_flags_from_mesh_material_flags and mesh_material_flags_from_deferred_flags +// !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! +const STANDARD_MATERIAL_FLAGS_BASE_COLOR_TEXTURE_BIT: u32 = 1u; +const STANDARD_MATERIAL_FLAGS_EMISSIVE_TEXTURE_BIT: u32 = 2u; +const STANDARD_MATERIAL_FLAGS_METALLIC_ROUGHNESS_TEXTURE_BIT: u32 = 4u; +const STANDARD_MATERIAL_FLAGS_OCCLUSION_TEXTURE_BIT: u32 = 8u; +const STANDARD_MATERIAL_FLAGS_DOUBLE_SIDED_BIT: u32 = 16u; +const STANDARD_MATERIAL_FLAGS_UNLIT_BIT: u32 = 32u; +const STANDARD_MATERIAL_FLAGS_TWO_COMPONENT_NORMAL_MAP: u32 = 64u; +const STANDARD_MATERIAL_FLAGS_FLIP_NORMAL_MAP_Y: u32 = 128u; +const STANDARD_MATERIAL_FLAGS_FOG_ENABLED_BIT: u32 = 256u; +const STANDARD_MATERIAL_FLAGS_DEPTH_MAP_BIT: u32 = 512u; +const STANDARD_MATERIAL_FLAGS_SPECULAR_TRANSMISSION_TEXTURE_BIT: u32 = 1024u; +const STANDARD_MATERIAL_FLAGS_THICKNESS_TEXTURE_BIT: u32 = 2048u; +const STANDARD_MATERIAL_FLAGS_DIFFUSE_TRANSMISSION_TEXTURE_BIT: u32 = 4096u; +const STANDARD_MATERIAL_FLAGS_ATTENUATION_ENABLED_BIT: u32 = 8192u; +const STANDARD_MATERIAL_FLAGS_CLEARCOAT_TEXTURE_BIT: u32 = 16384u; +const STANDARD_MATERIAL_FLAGS_CLEARCOAT_ROUGHNESS_TEXTURE_BIT: u32 = 32768u; +const STANDARD_MATERIAL_FLAGS_CLEARCOAT_NORMAL_TEXTURE_BIT: u32 = 65536u; +const STANDARD_MATERIAL_FLAGS_ANISOTROPY_TEXTURE_BIT: u32 = 131072u; +const STANDARD_MATERIAL_FLAGS_ALPHA_MODE_RESERVED_BITS: u32 = 3758096384u; // (0b111u32 << 29) +const STANDARD_MATERIAL_FLAGS_ALPHA_MODE_OPAQUE: u32 = 0u; // (0u32 << 29) +const STANDARD_MATERIAL_FLAGS_ALPHA_MODE_MASK: u32 = 536870912u; // (1u32 << 29) +const STANDARD_MATERIAL_FLAGS_ALPHA_MODE_BLEND: u32 = 1073741824u; // (2u32 << 29) +const STANDARD_MATERIAL_FLAGS_ALPHA_MODE_PREMULTIPLIED: u32 = 1610612736u; // (3u32 << 29) +const STANDARD_MATERIAL_FLAGS_ALPHA_MODE_ADD: u32 = 2147483648u; // (4u32 << 29) +const STANDARD_MATERIAL_FLAGS_ALPHA_MODE_MULTIPLY: u32 = 2684354560u; // (5u32 << 29) +const STANDARD_MATERIAL_FLAGS_ALPHA_MODE_ALPHA_TO_COVERAGE: u32 = 3221225472u; // (6u32 << 29) +// ↑ To calculate/verify the values above, use the following playground: +// https://play.rust-lang.org/?version=stable&mode=debug&edition=2021&gist=7792f8dd6fc6a8d4d0b6b1776898a7f4 + + +// Creates a StandardMaterial with default values +fn standard_material_new() -> StandardMaterial { + var material: StandardMaterial; + + // NOTE: Keep in-sync with src/pbr_material.rs! + material.base_color = vec4(1.0, 1.0, 1.0, 1.0); + material.emissive = vec4(0.0, 0.0, 0.0, 1.0); + material.perceptual_roughness = 0.5; + material.metallic = 0.00; + material.reflectance = 0.5; + material.diffuse_transmission = 0.0; + material.specular_transmission = 0.0; + material.thickness = 0.0; + material.ior = 1.5; + material.attenuation_distance = 1.0; + material.attenuation_color = vec4(1.0, 1.0, 1.0, 1.0); + material.clearcoat = 0.0; + material.clearcoat_perceptual_roughness = 0.0; + material.flags = STANDARD_MATERIAL_FLAGS_ALPHA_MODE_OPAQUE; + material.alpha_cutoff = 0.5; + material.parallax_depth_scale = 0.1; + material.max_parallax_layer_count = 16.0; + material.max_relief_mapping_search_steps = 5u; + material.deferred_lighting_pass_id = 1u; + // scale 1, translation 0, rotation 0 + material.uv_transform = mat3x3(1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0); + + return material; +} + +struct PbrInput { + material: StandardMaterial, + // Note: this gets monochromized upon deferred PbrInput reconstruction. + diffuse_occlusion: vec3, + // Note: this is 1.0 (entirely unoccluded) when SSAO and SSR are off. + specular_occlusion: f32, + frag_coord: vec4, + world_position: vec4, + // Normalized world normal used for shadow mapping as normal-mapping is not used for shadow + // mapping + world_normal: vec3, + // Normalized normal-mapped world normal used for lighting + N: vec3, + // Normalized view vector in world space, pointing from the fragment world position toward the + // view world position + V: vec3, + lightmap_light: vec3, + clearcoat_N: vec3, + anisotropy_strength: f32, + // These two aren't specific to anisotropy, but we only fill them in if + // we're doing anisotropy, so they're prefixed with `anisotropy_`. + anisotropy_T: vec3, + anisotropy_B: vec3, + is_orthographic: bool, + flags: u32, +}; + +// Creates a PbrInput with default values +fn pbr_input_new() -> PbrInput { + var pbr_input: PbrInput; + + pbr_input.material = standard_material_new(); + pbr_input.diffuse_occlusion = vec3(1.0); + // If SSAO is enabled, then this gets overwritten with proper specular occlusion. If its not, then we get specular environment map unoccluded (we have no data with which to occlude it with). + pbr_input.specular_occlusion = 1.0; + + pbr_input.frag_coord = vec4(0.0, 0.0, 0.0, 1.0); + pbr_input.world_position = vec4(0.0, 0.0, 0.0, 1.0); + pbr_input.world_normal = vec3(0.0, 0.0, 1.0); + + pbr_input.is_orthographic = false; + + pbr_input.N = vec3(0.0, 0.0, 1.0); + pbr_input.V = vec3(1.0, 0.0, 0.0); + + pbr_input.clearcoat_N = vec3(0.0); + pbr_input.anisotropy_T = vec3(0.0); + pbr_input.anisotropy_B = vec3(0.0); + + pbr_input.lightmap_light = vec3(0.0); + + pbr_input.flags = 0u; + + return pbr_input; +} + +``` + +### bevy_shaders/post_processing + +```rust +// This shader computes the chromatic aberration effect + +// Since post processing is a fullscreen effect, we use the fullscreen vertex shader provided by bevy. +// This will import a vertex shader that renders a single fullscreen triangle. +// +// A fullscreen triangle is a single triangle that covers the entire screen. +// The box in the top left in that diagram is the screen. The 4 x are the corner of the screen +// +// Y axis +// 1 | x-----x...... +// 0 | | s | . ´ +// -1 | x_____x´ +// -2 | : .´ +// -3 | :´ +// +--------------- X axis +// -1 0 1 2 3 +// +// As you can see, the triangle ends up bigger than the screen. +// +// You don't need to worry about this too much since bevy will compute the correct UVs for you. +#import bevy_core_pipeline::fullscreen_vertex_shader::FullscreenVertexOutput + +@group(0) @binding(0) var screen_texture: texture_2d; +@group(0) @binding(1) var texture_sampler: sampler; +struct PostProcessSettings { + intensity: f32, +#ifdef SIXTEEN_BYTE_ALIGNMENT + // WebGL2 structs must be 16 byte aligned. + _webgl2_padding: vec3 +#endif +} +@group(0) @binding(2) var settings: PostProcessSettings; + +@fragment +fn fragment(in: FullscreenVertexOutput) -> @location(0) vec4 { + // Chromatic aberration strength + let offset_strength = settings.intensity; + + // Sample each color channel with an arbitrary shift + return vec4( + textureSample(screen_texture, texture_sampler, in.uv + vec2(offset_strength, -offset_strength)).r, + textureSample(screen_texture, texture_sampler, in.uv + vec2(-offset_strength, 0.0)).g, + textureSample(screen_texture, texture_sampler, in.uv + vec2(0.0, offset_strength)).b, + 1.0 + ); +} + + +``` + +### bevy_shaders/utils + +```rust +#define_import_path bevy_pbr::utils + +#import bevy_pbr::rgb9e5 + +// Generates a random u32 in range [0, u32::MAX]. +// +// `state` is a mutable reference to a u32 used as the seed. +// +// Values are generated via "white noise", with no correlation between values. +// In shaders, you often want spatial and/or temporal correlation. Use a different RNG method for these use cases. +// +// https://www.pcg-random.org +// https://www.reedbeta.com/blog/hash-functions-for-gpu-rendering +fn rand_u(state: ptr) -> u32 { + *state = *state * 747796405u + 2891336453u; + let word = ((*state >> ((*state >> 28u) + 4u)) ^ *state) * 277803737u; + return (word >> 22u) ^ word; +} + +// Generates a random f32 in range [0, 1.0]. +fn rand_f(state: ptr) -> f32 { + *state = *state * 747796405u + 2891336453u; + let word = ((*state >> ((*state >> 28u) + 4u)) ^ *state) * 277803737u; + return f32((word >> 22u) ^ word) * bitcast(0x2f800004u); +} + +// Generates a random vec2 where each value is in range [0, 1.0]. +fn rand_vec2f(state: ptr) -> vec2 { + return vec2(rand_f(state), rand_f(state)); +} + +// Generates a random u32 in range [0, n). +fn rand_range_u(n: u32, state: ptr) -> u32 { + return rand_u(state) % n; +} + +// returns the (0-1, 0-1) position within the given viewport for the current buffer coords . +// buffer coords can be obtained from `@builtin(position).xy`. +// the view uniform struct contains the current camera viewport in `view.viewport`. +// topleft = 0,0 +fn coords_to_viewport_uv(position: vec2, viewport: vec4) -> vec2 { + return (position - viewport.xy) / viewport.zw; +} + +// https://jcgt.org/published/0003/02/01/paper.pdf + +// For encoding normals or unit direction vectors as octahedral coordinates. +fn octahedral_encode(v: vec3) -> vec2 { + var n = v / (abs(v.x) + abs(v.y) + abs(v.z)); + let octahedral_wrap = (1.0 - abs(n.yx)) * select(vec2(-1.0), vec2(1.0), n.xy > vec2f(0.0)); + let n_xy = select(octahedral_wrap, n.xy, n.z >= 0.0); + return n_xy * 0.5 + 0.5; +} + +// For decoding normals or unit direction vectors from octahedral coordinates. +fn octahedral_decode(v: vec2) -> vec3 { + let f = v * 2.0 - 1.0; + var n = vec3(f.xy, 1.0 - abs(f.x) - abs(f.y)); + let t = saturate(-n.z); + let w = select(vec2(t), vec2(-t), n.xy >= vec2(0.0)); + n = vec3(n.xy + w, n.z); + return normalize(n); +} + +// https://blog.demofox.org/2022/01/01/interleaved-gradient-noise-a-different-kind-of-low-discrepancy-sequence +fn interleaved_gradient_noise(pixel_coordinates: vec2, frame: u32) -> f32 { + let xy = pixel_coordinates + 5.588238 * f32(frame % 64u); + return fract(52.9829189 * fract(0.06711056 * xy.x + 0.00583715 * xy.y)); +} + +// https://www.iryoku.com/next-generation-post-processing-in-call-of-duty-advanced-warfare (slides 120-135) +// TODO: Use an array here instead of a bunch of constants, once arrays work properly under DX12. +// NOTE: The names have a final underscore to avoid the following error: +// `Composable module identifiers must not require substitution according to naga writeback rules` +const SPIRAL_OFFSET_0_ = vec2(-0.7071, 0.7071); +const SPIRAL_OFFSET_1_ = vec2(-0.0000, -0.8750); +const SPIRAL_OFFSET_2_ = vec2( 0.5303, 0.5303); +const SPIRAL_OFFSET_3_ = vec2(-0.6250, -0.0000); +const SPIRAL_OFFSET_4_ = vec2( 0.3536, -0.3536); +const SPIRAL_OFFSET_5_ = vec2(-0.0000, 0.3750); +const SPIRAL_OFFSET_6_ = vec2(-0.1768, -0.1768); +const SPIRAL_OFFSET_7_ = vec2( 0.1250, 0.0000); + +``` + +### bevy_shaders/gpu_readback + +```rust +// This shader is used for the gpu_readback example +// The actual work it does is not important for the example + +// This is the data that lives in the gpu only buffer +@group(0) @binding(0) var data: array; + +@compute @workgroup_size(1) +fn main(@builtin(global_invocation_id) global_id: vec3) { + // We use the global_id to index the array to make sure we don't + // access data used in another workgroup + data[global_id.x] += 1u; +} + +``` + +### bevy_shaders/robust_contrast_adaptive_sharpening + +```rust +// Copyright (c) 2022 Advanced Micro Devices, Inc. All rights reserved. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +#import bevy_core_pipeline::fullscreen_vertex_shader::FullscreenVertexOutput + +struct CASUniforms { + sharpness: f32, +}; + +@group(0) @binding(0) var screenTexture: texture_2d; +@group(0) @binding(1) var samp: sampler; +@group(0) @binding(2) var uniforms: CASUniforms; + +// This is set at the limit of providing unnatural results for sharpening. +const FSR_RCAS_LIMIT = 0.1875; +// -4.0 instead of -1.0 to avoid issues with MSAA. +const peakC = vec2(10.0, -40.0); + +// Robust Contrast Adaptive Sharpening (RCAS) +// Based on the following implementation: +// https://github.com/GPUOpen-Effects/FidelityFX-FSR2/blob/ea97a113b0f9cadf519fbcff315cc539915a3acd/src/ffx-fsr2-api/shaders/ffx_fsr1.h#L672 +// RCAS is based on the following logic. +// RCAS uses a 5 tap filter in a cross pattern (same as CAS), +// W b +// W 1 W for taps d e f +// W h +// Where 'W' is the negative lobe weight. +// output = (W*(b+d+f+h)+e)/(4*W+1) +// RCAS solves for 'W' by seeing where the signal might clip out of the {0 to 1} input range, +// 0 == (W*(b+d+f+h)+e)/(4*W+1) -> W = -e/(b+d+f+h) +// 1 == (W*(b+d+f+h)+e)/(4*W+1) -> W = (1-e)/(b+d+f+h-4) +// Then chooses the 'W' which results in no clipping, limits 'W', and multiplies by the 'sharp' amount. +// This solution above has issues with MSAA input as the steps along the gradient cause edge detection issues. +// So RCAS uses 4x the maximum and 4x the minimum (depending on equation)in place of the individual taps. +// As well as switching from 'e' to either the minimum or maximum (depending on side), to help in energy conservation. +// This stabilizes RCAS. +// RCAS does a simple highpass which is normalized against the local contrast then shaped, +// 0.25 +// 0.25 -1 0.25 +// 0.25 +// This is used as a noise detection filter, to reduce the effect of RCAS on grain, and focus on real edges. +// The CAS node runs after tonemapping, so the input will be in the range of 0 to 1. +@fragment +fn fragment(in: FullscreenVertexOutput) -> @location(0) vec4 { + // Algorithm uses minimal 3x3 pixel neighborhood. + // b + // d e f + // h + let b = textureSample(screenTexture, samp, in.uv, vec2(0, -1)).rgb; + let d = textureSample(screenTexture, samp, in.uv, vec2(-1, 0)).rgb; + // We need the alpha value of the pixel we're working on for the output + let e = textureSample(screenTexture, samp, in.uv).rgbw; + let f = textureSample(screenTexture, samp, in.uv, vec2(1, 0)).rgb; + let h = textureSample(screenTexture, samp, in.uv, vec2(0, 1)).rgb; + // Min and max of ring. + let mn4 = min(min(b, d), min(f, h)); + let mx4 = max(max(b, d), max(f, h)); + // Limiters + // 4.0 to avoid issues with MSAA. + let hitMin = mn4 / (4.0 * mx4); + let hitMax = (peakC.x - mx4) / (peakC.y + 4.0 * mn4); + let lobeRGB = max(-hitMin, hitMax); + var lobe = max(-FSR_RCAS_LIMIT, min(0.0, max(lobeRGB.r, max(lobeRGB.g, lobeRGB.b)))) * uniforms.sharpness; +#ifdef RCAS_DENOISE + // Luma times 2. + let bL = b.b * 0.5 + (b.r * 0.5 + b.g); + let dL = d.b * 0.5 + (d.r * 0.5 + d.g); + let eL = e.b * 0.5 + (e.r * 0.5 + e.g); + let fL = f.b * 0.5 + (f.r * 0.5 + f.g); + let hL = h.b * 0.5 + (h.r * 0.5 + h.g); + // Noise detection. + var noise = 0.25 * bL + 0.25 * dL + 0.25 * fL + 0.25 * hL - eL;; + noise = saturate(abs(noise) / (max(max(bL, dL), max(fL, hL)) - min(min(bL, dL), min(fL, hL)))); + noise = 1.0 - 0.5 * noise; + // Apply noise removal. + lobe *= noise; +#endif + return vec4((lobe * b + lobe * d + lobe * f + lobe * h + e.rgb) / (4.0 * lobe + 1.0), e.w); +} + +``` + +### bevy_shaders/lines + +```rust +// TODO use common view binding +#import bevy_render::view::View + +@group(0) @binding(0) var view: View; + + +struct LineGizmoUniform { + line_width: f32, + depth_bias: f32, +#ifdef SIXTEEN_BYTE_ALIGNMENT + // WebGL2 structs must be 16 byte aligned. + _padding: vec2, +#endif +} + +@group(1) @binding(0) var line_gizmo: LineGizmoUniform; + +struct VertexInput { + @location(0) position_a: vec3, + @location(1) position_b: vec3, + @location(2) color_a: vec4, + @location(3) color_b: vec4, + @builtin(vertex_index) index: u32, +}; + +struct VertexOutput { + @builtin(position) clip_position: vec4, + @location(0) color: vec4, + @location(1) uv: f32, +}; + +const EPSILON: f32 = 4.88e-04; + +@vertex +fn vertex(vertex: VertexInput) -> VertexOutput { + var positions = array, 6>( + vec2(-0.5, 0.), + vec2(-0.5, 1.), + vec2(0.5, 1.), + vec2(-0.5, 0.), + vec2(0.5, 1.), + vec2(0.5, 0.) + ); + let position = positions[vertex.index]; + + // algorithm based on https://wwwtyro.net/2019/11/18/instanced-lines.html + var clip_a = view.clip_from_world * vec4(vertex.position_a, 1.); + var clip_b = view.clip_from_world * vec4(vertex.position_b, 1.); + + // Manual near plane clipping to avoid errors when doing the perspective divide inside this shader. + clip_a = clip_near_plane(clip_a, clip_b); + clip_b = clip_near_plane(clip_b, clip_a); + let clip = mix(clip_a, clip_b, position.y); + + let resolution = view.viewport.zw; + let screen_a = resolution * (0.5 * clip_a.xy / clip_a.w + 0.5); + let screen_b = resolution * (0.5 * clip_b.xy / clip_b.w + 0.5); + + let y_basis = normalize(screen_b - screen_a); + let x_basis = vec2(-y_basis.y, y_basis.x); + + var color = mix(vertex.color_a, vertex.color_b, position.y); + + var line_width = line_gizmo.line_width; + var alpha = 1.; + + var uv: f32; +#ifdef PERSPECTIVE + line_width /= clip.w; + + // get height of near clipping plane in world space + let pos0 = view.view_from_clip * vec4(0, -1, 0, 1); // Bottom of the screen + let pos1 = view.view_from_clip * vec4(0, 1, 0, 1); // Top of the screen + let near_clipping_plane_height = length(pos0.xyz - pos1.xyz); + + // We can't use vertex.position_X because we may have changed the clip positions with clip_near_plane + let position_a = view.world_from_clip * clip_a; + let position_b = view.world_from_clip * clip_b; + let world_distance = length(position_a.xyz - position_b.xyz); + + // Offset to compensate for moved clip positions. If removed dots on lines will slide when position a is ofscreen. + let clipped_offset = length(position_a.xyz - vertex.position_a); + + uv = (clipped_offset + position.y * world_distance) * resolution.y / near_clipping_plane_height / line_gizmo.line_width; +#else + // Get the distance of b to the camera along camera axes + let camera_b = view.view_from_clip * clip_b; + + // This differentiates between orthographic and perspective cameras. + // For orthographic cameras no depth adaptment (depth_adaptment = 1) is needed. + var depth_adaptment: f32; + if (clip_b.w == 1.0) { + depth_adaptment = 1.0; + } + else { + depth_adaptment = -camera_b.z; + } + uv = position.y * depth_adaptment * length(screen_b - screen_a) / line_gizmo.line_width; +#endif + + // Line thinness fade from https://acegikmo.com/shapes/docs/#anti-aliasing + if line_width > 0.0 && line_width < 1. { + color.a *= line_width; + line_width = 1.; + } + + let x_offset = line_width * position.x * x_basis; + let screen = mix(screen_a, screen_b, position.y) + x_offset; + + var depth: f32; + if line_gizmo.depth_bias >= 0. { + depth = clip.z * (1. - line_gizmo.depth_bias); + } else { + // depth * (clip.w / depth)^-depth_bias. So that when -depth_bias is 1.0, this is equal to clip.w + // and when equal to 0.0, it is exactly equal to depth. + // the epsilon is here to prevent the depth from exceeding clip.w when -depth_bias = 1.0 + // clip.w represents the near plane in homogeneous clip space in bevy, having a depth + // of this value means nothing can be in front of this + // The reason this uses an exponential function is that it makes it much easier for the + // user to chose a value that is convenient for them + depth = clip.z * exp2(-line_gizmo.depth_bias * log2(clip.w / clip.z - EPSILON)); + } - let edgeVertical = abs(-2.0 * lumaUp + lumaUpCorners) + - abs(-2.0 * lumaCenter + lumaLeftRight) * 2.0 + - abs(-2.0 * lumaDown + lumaDownCorners); + var clip_position = vec4(clip.w * ((2. * screen) / resolution - 1.), depth, clip.w); - // Is the local edge horizontal or vertical ? - let isHorizontal = (edgeHorizontal >= edgeVertical); + return VertexOutput(clip_position, color, uv); +} - // Choose the step size (one pixel) accordingly. - var stepLength = select(inverseScreenSize.x, inverseScreenSize.y, isHorizontal); +fn clip_near_plane(a: vec4, b: vec4) -> vec4 { + // Move a if a is behind the near plane and b is in front. + if a.z > a.w && b.z <= b.w { + // Interpolate a towards b until it's at the near plane. + let distance_a = a.z - a.w; + let distance_b = b.z - b.w; + // Add an epsilon to the interpolator to ensure that the point is + // not just behind the clip plane due to floating-point imprecision. + let t = distance_a / (distance_a - distance_b) + EPSILON; + return mix(a, b, t); + } + return a; +} - // Select the two neighboring texels lumas in the opposite direction to the local edge. - var luma1 = select(lumaLeft, lumaDown, isHorizontal); - var luma2 = select(lumaRight, lumaUp, isHorizontal); +struct FragmentInput { + @builtin(position) position: vec4, + @location(0) color: vec4, + @location(1) uv: f32, +}; - // Compute gradients in this direction. - let gradient1 = luma1 - lumaCenter; - let gradient2 = luma2 - lumaCenter; +struct FragmentOutput { + @location(0) color: vec4, +}; - // Which direction is the steepest ? - let is1Steepest = abs(gradient1) >= abs(gradient2); +@fragment +fn fragment_solid(in: FragmentInput) -> FragmentOutput { + return FragmentOutput(in.color); +} +@fragment +fn fragment_dotted(in: FragmentInput) -> FragmentOutput { + var alpha: f32; +#ifdef PERSPECTIVE + alpha = 1 - floor(in.uv % 2.0); +#else + alpha = 1 - floor((in.uv * in.position.w) % 2.0); +#endif + + return FragmentOutput(vec4(in.color.xyz, in.color.w * alpha)); +} - // Gradient in the corresponding direction, normalized. - let gradientScaled = 0.25 * max(abs(gradient1), abs(gradient2)); +``` - // Average luma in the correct direction. - var lumaLocalAverage = 0.0; - if (is1Steepest) { - // Switch the direction - stepLength = -stepLength; - lumaLocalAverage = 0.5 * (luma1 + lumaCenter); - } else { - lumaLocalAverage = 0.5 * (luma2 + lumaCenter); - } +### bevy_shaders/pbr_deferred_types - // Shift UV in the correct direction by half a pixel. - // Compute offset (for each iteration step) in the right direction. - var currentUv = texCoord; - var offset = vec2(0.0, 0.0); - if (isHorizontal) { - currentUv.y = currentUv.y + stepLength * 0.5; - offset.x = inverseScreenSize.x; - } else { - currentUv.x = currentUv.x + stepLength * 0.5; - offset.y = inverseScreenSize.y; - } +```rust +#define_import_path bevy_pbr::pbr_deferred_types - // Compute UVs to explore on each side of the edge, orthogonally. The QUALITY allows us to step faster. - var uv1 = currentUv - offset; // * QUALITY(0); // (quality 0 is 1.0) - var uv2 = currentUv + offset; // * QUALITY(0); // (quality 0 is 1.0) +#import bevy_pbr::{ + mesh_types::MESH_FLAGS_SHADOW_RECEIVER_BIT, + pbr_types::{STANDARD_MATERIAL_FLAGS_FOG_ENABLED_BIT, STANDARD_MATERIAL_FLAGS_UNLIT_BIT}, +} - // Read the lumas at both current extremities of the exploration segment, and compute the delta wrt to the local average luma. - var lumaEnd1 = rgb2luma(textureSampleLevel(screenTexture, samp, uv1, 0.0).rgb); - var lumaEnd2 = rgb2luma(textureSampleLevel(screenTexture, samp, uv2, 0.0).rgb); - lumaEnd1 = lumaEnd1 - lumaLocalAverage; - lumaEnd2 = lumaEnd2 - lumaLocalAverage; +// Maximum of 8 bits available +const DEFERRED_FLAGS_UNLIT_BIT: u32 = 1u; +const DEFERRED_FLAGS_FOG_ENABLED_BIT: u32 = 2u; +const DEFERRED_MESH_FLAGS_SHADOW_RECEIVER_BIT: u32 = 4u; - // If the luma deltas at the current extremities is larger than the local gradient, we have reached the side of the edge. - var reached1 = abs(lumaEnd1) >= gradientScaled; - var reached2 = abs(lumaEnd2) >= gradientScaled; - var reachedBoth = reached1 && reached2; +fn deferred_flags_from_mesh_material_flags(mesh_flags: u32, mat_flags: u32) -> u32 { + var flags = 0u; + flags |= u32((mesh_flags & MESH_FLAGS_SHADOW_RECEIVER_BIT) != 0u) * DEFERRED_MESH_FLAGS_SHADOW_RECEIVER_BIT; + flags |= u32((mat_flags & STANDARD_MATERIAL_FLAGS_FOG_ENABLED_BIT) != 0u) * DEFERRED_FLAGS_FOG_ENABLED_BIT; + flags |= u32((mat_flags & STANDARD_MATERIAL_FLAGS_UNLIT_BIT) != 0u) * DEFERRED_FLAGS_UNLIT_BIT; + return flags; +} - // If the side is not reached, we continue to explore in this direction. - uv1 = select(uv1 - offset, uv1, reached1); // * QUALITY(1); // (quality 1 is 1.0) - uv2 = select(uv2 - offset, uv2, reached2); // * QUALITY(1); // (quality 1 is 1.0) +fn mesh_material_flags_from_deferred_flags(deferred_flags: u32) -> vec2 { + var mat_flags = 0u; + var mesh_flags = 0u; + mesh_flags |= u32((deferred_flags & DEFERRED_MESH_FLAGS_SHADOW_RECEIVER_BIT) != 0u) * MESH_FLAGS_SHADOW_RECEIVER_BIT; + mat_flags |= u32((deferred_flags & DEFERRED_FLAGS_FOG_ENABLED_BIT) != 0u) * STANDARD_MATERIAL_FLAGS_FOG_ENABLED_BIT; + mat_flags |= u32((deferred_flags & DEFERRED_FLAGS_UNLIT_BIT) != 0u) * STANDARD_MATERIAL_FLAGS_UNLIT_BIT; + return vec2(mesh_flags, mat_flags); +} - // If both sides have not been reached, continue to explore. - if (!reachedBoth) { - for (var i: i32 = 2; i < ITERATIONS; i = i + 1) { - // If needed, read luma in 1st direction, compute delta. - if (!reached1) { - lumaEnd1 = rgb2luma(textureSampleLevel(screenTexture, samp, uv1, 0.0).rgb); - lumaEnd1 = lumaEnd1 - lumaLocalAverage; - } - // If needed, read luma in opposite direction, compute delta. - if (!reached2) { - lumaEnd2 = rgb2luma(textureSampleLevel(screenTexture, samp, uv2, 0.0).rgb); - lumaEnd2 = lumaEnd2 - lumaLocalAverage; - } - // If the luma deltas at the current extremities is larger than the local gradient, we have reached the side of the edge. - reached1 = abs(lumaEnd1) >= gradientScaled; - reached2 = abs(lumaEnd2) >= gradientScaled; - reachedBoth = reached1 && reached2; +const U12MAXF = 4095.0; +const U16MAXF = 65535.0; +const U20MAXF = 1048575.0; - // If the side is not reached, we continue to explore in this direction, with a variable quality. - if (!reached1) { - uv1 = uv1 - offset * QUALITY(i); - } - if (!reached2) { - uv2 = uv2 + offset * QUALITY(i); - } +// Storing normals as oct24. +// Flags are stored in the remaining 8 bits. +// https://jcgt.org/published/0003/02/01/paper.pdf +// Could possibly go down to oct20 if the space is needed. - // If both sides have been reached, stop the exploration. - if (reachedBoth) { - break; - } - } - } +fn pack_24bit_normal_and_flags(octahedral_normal: vec2, flags: u32) -> u32 { + let unorm1 = u32(saturate(octahedral_normal.x) * U12MAXF + 0.5); + let unorm2 = u32(saturate(octahedral_normal.y) * U12MAXF + 0.5); + return (unorm1 & 0xFFFu) | ((unorm2 & 0xFFFu) << 12u) | ((flags & 0xFFu) << 24u); +} - // Compute the distances to each side edge of the edge (!). - var distance1 = select(texCoord.y - uv1.y, texCoord.x - uv1.x, isHorizontal); - var distance2 = select(uv2.y - texCoord.y, uv2.x - texCoord.x, isHorizontal); +fn unpack_24bit_normal(packed: u32) -> vec2 { + let unorm1 = packed & 0xFFFu; + let unorm2 = (packed >> 12u) & 0xFFFu; + return vec2(f32(unorm1) / U12MAXF, f32(unorm2) / U12MAXF); +} - // In which direction is the side of the edge closer ? - let isDirection1 = distance1 < distance2; - let distanceFinal = min(distance1, distance2); +fn unpack_flags(packed: u32) -> u32 { + return (packed >> 24u) & 0xFFu; +} - // Thickness of the edge. - let edgeThickness = (distance1 + distance2); +// The builtin one didn't work in webgl. +// "'unpackUnorm4x8' : no matching overloaded function found" +// https://github.com/gfx-rs/naga/issues/2006 +fn unpack_unorm4x8_(v: u32) -> vec4 { + return vec4( + f32(v & 0xFFu), + f32((v >> 8u) & 0xFFu), + f32((v >> 16u) & 0xFFu), + f32((v >> 24u) & 0xFFu) + ) / 255.0; +} - // Is the luma at center smaller than the local average ? - let isLumaCenterSmaller = lumaCenter < lumaLocalAverage; +// 'packUnorm4x8' : no matching overloaded function found +// https://github.com/gfx-rs/naga/issues/2006 +fn pack_unorm4x8_(values: vec4) -> u32 { + let v = vec4(saturate(values) * 255.0 + 0.5); + return (v.w << 24u) | (v.z << 16u) | (v.y << 8u) | v.x; +} - // If the luma at center is smaller than at its neighbor, the delta luma at each end should be positive (same variation). - let correctVariation1 = (lumaEnd1 < 0.0) != isLumaCenterSmaller; - let correctVariation2 = (lumaEnd2 < 0.0) != isLumaCenterSmaller; +// Pack 3x 4bit unorm + 1x 20bit +fn pack_unorm3x4_plus_unorm_20_(v: vec4) -> u32 { + let sm = vec3(saturate(v.xyz) * 15.0 + 0.5); + let bg = u32(saturate(v.w) * U20MAXF + 0.5); + return (bg << 12u) | (sm.z << 8u) | (sm.y << 4u) | sm.x; +} - // Only keep the result in the direction of the closer side of the edge. - var correctVariation = select(correctVariation2, correctVariation1, isDirection1); +// Unpack 3x 4bit unorm + 1x 20bit +fn unpack_unorm3x4_plus_unorm_20_(v: u32) -> vec4 { + return vec4( + f32(v & 0xfu) / 15.0, + f32((v >> 4u) & 0xFu) / 15.0, + f32((v >> 8u) & 0xFu) / 15.0, + f32((v >> 12u) & 0xFFFFFFu) / U20MAXF, + ); +} - // UV offset: read in the direction of the closest side of the edge. - let pixelOffset = - distanceFinal / edgeThickness + 0.5; +``` - // If the luma variation is incorrect, do not offset. - var finalOffset = select(0.0, pixelOffset, correctVariation); +### bevy_shaders/mesh_types - // Sub-pixel shifting - // Full weighted average of the luma over the 3x3 neighborhood. - let lumaAverage = (1.0 / 12.0) * (2.0 * (lumaDownUp + lumaLeftRight) + lumaLeftCorners + lumaRightCorners); - // Ratio of the delta between the global average and the center luma, over the luma range in the 3x3 neighborhood. - let subPixelOffset1 = clamp(abs(lumaAverage - lumaCenter) / lumaRange, 0.0, 1.0); - let subPixelOffset2 = (-2.0 * subPixelOffset1 + 3.0) * subPixelOffset1 * subPixelOffset1; - // Compute a sub-pixel offset based on this delta. - let subPixelOffsetFinal = subPixelOffset2 * subPixelOffset2 * SUBPIXEL_QUALITY; +```rust +#define_import_path bevy_pbr::mesh_types - // Pick the biggest of the two offsets. - finalOffset = max(finalOffset, subPixelOffsetFinal); +struct Mesh { + // Affine 4x3 matrices transposed to 3x4 + // Use bevy_render::maths::affine3_to_square to unpack + world_from_local: mat3x4, + previous_world_from_local: mat3x4, + // 3x3 matrix packed in mat2x4 and f32 as: + // [0].xyz, [1].x, + // [1].yz, [2].xy + // [2].z + // Use bevy_pbr::mesh_functions::mat2x4_f32_to_mat3x3_unpack to unpack + local_from_world_transpose_a: mat2x4, + local_from_world_transpose_b: f32, + // 'flags' is a bit field indicating various options. u32 is 32 bits so we have up to 32 options. + flags: u32, + lightmap_uv_rect: vec2, +}; - // Compute the final UV coordinates. - var finalUv = texCoord; - if (isHorizontal) { - finalUv.y = finalUv.y + finalOffset * stepLength; - } else { - finalUv.x = finalUv.x + finalOffset * stepLength; - } +#ifdef SKINNED +struct SkinnedMesh { + data: array, 256u>, +}; +#endif - // Read the color at the new UV coordinates, and use it. - var finalColor = textureSampleLevel(screenTexture, samp, finalUv, 0.0).rgb; - return vec4(finalColor, centerSample.a); -} +#ifdef MORPH_TARGETS +struct MorphWeights { + weights: array, 16u>, // 16 = 64 / 4 (64 = MAX_MORPH_WEIGHTS) +}; +#endif + +// [2^0, 2^16) +const MESH_FLAGS_VISIBILITY_RANGE_INDEX_BITS: u32 = 65535u; +// 2^29 +const MESH_FLAGS_SHADOW_RECEIVER_BIT: u32 = 536870912u; +// 2^30 +const MESH_FLAGS_TRANSMITTED_SHADOW_RECEIVER_BIT: u32 = 1073741824u; +// 2^31 - if the flag is set, the sign is positive, else it is negative +const MESH_FLAGS_SIGN_DETERMINANT_MODEL_3X3_BIT: u32 = 2147483648u; ``` -### crates/bevy_core_pipeline/src/skybox/skybox +### bevy_shaders/custom_material_2d ```rust -#import bevy_render::view::View -#import bevy_pbr::utils::coords_to_viewport_uv +#import bevy_sprite::mesh2d_vertex_output::VertexOutput +// we can import items from shader modules in the assets folder with a quoted path +#import "shaders/custom_material_import.wgsl"::COLOR_MULTIPLIER -struct SkyboxUniforms { - brightness: f32, -#ifdef SIXTEEN_BYTE_ALIGNMENT - _wasm_padding_8b: u32, - _wasm_padding_12b: u32, - _wasm_padding_16b: u32, -#endif +@group(2) @binding(0) var material_color: vec4; +@group(2) @binding(1) var base_color_texture: texture_2d; +@group(2) @binding(2) var base_color_sampler: sampler; + +@fragment +fn fragment(mesh: VertexOutput) -> @location(0) vec4 { + return material_color * textureSample(base_color_texture, base_color_sampler, mesh.uv) * COLOR_MULTIPLIER; } -@group(0) @binding(0) var skybox: texture_cube; -@group(0) @binding(1) var skybox_sampler: sampler; -@group(0) @binding(2) var view: View; -@group(0) @binding(3) var uniforms: SkyboxUniforms; +``` -fn coords_to_ray_direction(position: vec2, viewport: vec4) -> vec3 { - // Using world positions of the fragment and camera to calculate a ray direction - // breaks down at large translations. This code only needs to know the ray direction. - // The ray direction is along the direction from the camera to the fragment position. - // In view space, the camera is at the origin, so the view space ray direction is - // along the direction of the fragment position - (0,0,0) which is just the - // fragment position. - // Use the position on the near clipping plane to avoid -inf world position - // because the far plane of an infinite reverse projection is at infinity. - let view_position_homogeneous = view.inverse_projection * vec4( - coords_to_viewport_uv(position, viewport) * vec2(2.0, -2.0) + vec2(-1.0, 1.0), - 1.0, - 1.0, - ); - let view_ray_direction = view_position_homogeneous.xyz / view_position_homogeneous.w; - // Transforming the view space ray direction by the view matrix, transforms the - // direction to world space. Note that the w element is set to 0.0, as this is a - // vector direction, not a position, That causes the matrix multiplication to ignore - // the translations from the view matrix. - let ray_direction = (view.view * vec4(view_ray_direction, 0.0)).xyz; +### bevy_shaders/shader_defs - return normalize(ray_direction); -} +```rust +#import bevy_pbr::forward_io::VertexOutput -struct VertexOutput { - @builtin(position) position: vec4, +struct CustomMaterial { + color: vec4, }; -// 3 | 2. -// 2 | : `. -// 1 | x-----x. -// 0 | | s | `. -// -1 | 0-----x.....1 -// +--------------- -// -1 0 1 2 3 -// -// The axes are clip-space x and y. The region marked s is the visible region. -// The digits in the corners of the right-angled triangle are the vertex -// indices. -@vertex -fn skybox_vertex(@builtin(vertex_index) vertex_index: u32) -> VertexOutput { - // See the explanation above for how this works. - let clip_position = vec4( - f32(vertex_index & 1u), - f32((vertex_index >> 1u) & 1u), - 0.25, - 0.5 - ) * 4.0 - vec4(1.0); - - return VertexOutput(clip_position); -} +@group(2) @binding(0) var material: CustomMaterial; @fragment -fn skybox_fragment(in: VertexOutput) -> @location(0) vec4 { - let ray_direction = coords_to_ray_direction(in.position.xy, view.viewport); - - // Cube maps are left-handed so we negate the z coordinate. - let out = textureSample(skybox, skybox_sampler, ray_direction * vec3(1.0, 1.0, -1.0)); - return vec4(out.rgb * uniforms.brightness, out.a); +fn fragment( + mesh: VertexOutput, +) -> @location(0) vec4 { +#ifdef IS_RED + return vec4(1.0, 0.0, 0.0, 1.0); +#else + return material.color; +#endif } ``` -### crates/bevy_core_pipeline/src/taa/taa +### bevy_shaders/screenshot ```rust -// References: -// https://www.elopezr.com/temporal-aa-and-the-quest-for-the-holy-trail -// http://behindthepixels.io/assets/files/TemporalAA.pdf -// http://leiy.cc/publications/TAA/TAA_EG2020_Talk.pdf -// https://advances.realtimerendering.com/s2014/index.html#_HIGH-QUALITY_TEMPORAL_SUPERSAMPLING - -// Controls how much to blend between the current and past samples -// Lower numbers = less of the current sample and more of the past sample = more smoothing -// Values chosen empirically -const DEFAULT_HISTORY_BLEND_RATE: f32 = 0.1; // Default blend rate to use when no confidence in history -const MIN_HISTORY_BLEND_RATE: f32 = 0.015; // Minimum blend rate allowed, to ensure at least some of the current sample is used - -@group(0) @binding(0) var view_target: texture_2d; -@group(0) @binding(1) var history: texture_2d; -@group(0) @binding(2) var motion_vectors: texture_2d; -@group(0) @binding(3) var depth: texture_depth_2d; -@group(0) @binding(4) var nearest_sampler: sampler; -@group(0) @binding(5) var linear_sampler: sampler; - -struct Output { - @location(0) view_target: vec4, - @location(1) history: vec4, -}; - -// TAA is ideally applied after tonemapping, but before post processing -// Post processing wants to go before tonemapping, which conflicts -// Solution: Put TAA before tonemapping, tonemap TAA input, apply TAA, invert-tonemap TAA output -// https://advances.realtimerendering.com/s2014/index.html#_HIGH-QUALITY_TEMPORAL_SUPERSAMPLING, slide 20 -// https://gpuopen.com/learn/optimized-reversible-tonemapper-for-resolve -fn rcp(x: f32) -> f32 { return 1.0 / x; } -fn max3(x: vec3) -> f32 { return max(x.r, max(x.g, x.b)); } -fn tonemap(color: vec3) -> vec3 { return color * rcp(max3(color) + 1.0); } -fn reverse_tonemap(color: vec3) -> vec3 { return color * rcp(1.0 - max3(color)); } - -// The following 3 functions are from Playdead (MIT-licensed) -// https://github.com/playdeadgames/temporal/blob/master/Assets/Shaders/TemporalReprojection.shader -fn RGB_to_YCoCg(rgb: vec3) -> vec3 { - let y = (rgb.r / 4.0) + (rgb.g / 2.0) + (rgb.b / 4.0); - let co = (rgb.r / 2.0) - (rgb.b / 2.0); - let cg = (-rgb.r / 4.0) + (rgb.g / 2.0) - (rgb.b / 4.0); - return vec3(y, co, cg); +// This vertex shader will create a triangle that will cover the entire screen +// with minimal effort, avoiding the need for a vertex buffer etc. +@vertex +fn vs_main(@builtin(vertex_index) in_vertex_index: u32) -> @builtin(position) vec4 { + let x = f32((in_vertex_index & 1u) << 2u); + let y = f32((in_vertex_index & 2u) << 1u); + return vec4(x - 1.0, y - 1.0, 0.0, 1.0); } -fn YCoCg_to_RGB(ycocg: vec3) -> vec3 { - let r = ycocg.x + ycocg.y - ycocg.z; - let g = ycocg.x + ycocg.z; - let b = ycocg.x - ycocg.y - ycocg.z; - return saturate(vec3(r, g, b)); -} +@group(0) @binding(0) var t: texture_2d; -fn clip_towards_aabb_center(history_color: vec3, current_color: vec3, aabb_min: vec3, aabb_max: vec3) -> vec3 { - let p_clip = 0.5 * (aabb_max + aabb_min); - let e_clip = 0.5 * (aabb_max - aabb_min) + 0.00000001; - let v_clip = history_color - p_clip; - let v_unit = v_clip / e_clip; - let a_unit = abs(v_unit); - let ma_unit = max3(a_unit); - if ma_unit > 1.0 { - return p_clip + (v_clip / ma_unit); - } else { - return history_color; - } +@fragment +fn fs_main(@builtin(position) pos: vec4) -> @location(0) vec4 { + let coords = floor(pos.xy); + return textureLoad(t, vec2(coords), 0i); } -fn sample_history(u: f32, v: f32) -> vec3 { - return textureSample(history, linear_sampler, vec2(u, v)).rgb; -} +``` -fn sample_view_target(uv: vec2) -> vec3 { - var sample = textureSample(view_target, nearest_sampler, uv).rgb; -#ifdef TONEMAP - sample = tonemap(sample); -#endif - return RGB_to_YCoCg(sample); -} +### bevy_shaders/pbr_lighting -@fragment -fn taa(@location(0) uv: vec2) -> Output { - let texture_size = vec2(textureDimensions(view_target)); - let texel_size = 1.0 / texture_size; +```rust +#define_import_path bevy_pbr::lighting - // Fetch the current sample - let original_color = textureSample(view_target, nearest_sampler, uv); - var current_color = original_color.rgb; -#ifdef TONEMAP - current_color = tonemap(current_color); -#endif +#import bevy_pbr::{ + mesh_view_types::POINT_LIGHT_FLAGS_SPOT_LIGHT_Y_NEGATIVE, + mesh_view_bindings as view_bindings, +} +#import bevy_render::maths::PI -#ifndef RESET - // Pick the closest motion_vector from 5 samples (reduces aliasing on the edges of moving entities) - // https://advances.realtimerendering.com/s2014/index.html#_HIGH-QUALITY_TEMPORAL_SUPERSAMPLING, slide 27 - let offset = texel_size * 2.0; - let d_uv_tl = uv + vec2(-offset.x, offset.y); - let d_uv_tr = uv + vec2(offset.x, offset.y); - let d_uv_bl = uv + vec2(-offset.x, -offset.y); - let d_uv_br = uv + vec2(offset.x, -offset.y); - var closest_uv = uv; - let d_tl = textureSample(depth, nearest_sampler, d_uv_tl); - let d_tr = textureSample(depth, nearest_sampler, d_uv_tr); - var closest_depth = textureSample(depth, nearest_sampler, uv); - let d_bl = textureSample(depth, nearest_sampler, d_uv_bl); - let d_br = textureSample(depth, nearest_sampler, d_uv_br); - if d_tl > closest_depth { - closest_uv = d_uv_tl; - closest_depth = d_tl; - } - if d_tr > closest_depth { - closest_uv = d_uv_tr; - closest_depth = d_tr; - } - if d_bl > closest_depth { - closest_uv = d_uv_bl; - closest_depth = d_bl; - } - if d_br > closest_depth { - closest_uv = d_uv_br; - } - let closest_motion_vector = textureSample(motion_vectors, nearest_sampler, closest_uv).rg; +const LAYER_BASE: u32 = 0; +const LAYER_CLEARCOAT: u32 = 1; - // Reproject to find the equivalent sample from the past - // Uses 5-sample Catmull-Rom filtering (reduces blurriness) - // Catmull-Rom filtering: https://gist.github.com/TheRealMJP/c83b8c0f46b63f3a88a5986f4fa982b1 - // Ignoring corners: https://www.activision.com/cdn/research/Dynamic_Temporal_Antialiasing_and_Upsampling_in_Call_of_Duty_v4.pdf#page=68 - // Technically we should renormalize the weights since we're skipping the corners, but it's basically the same result - let history_uv = uv - closest_motion_vector; - let sample_position = history_uv * texture_size; - let texel_center = floor(sample_position - 0.5) + 0.5; - let f = sample_position - texel_center; - let w0 = f * (-0.5 + f * (1.0 - 0.5 * f)); - let w1 = 1.0 + f * f * (-2.5 + 1.5 * f); - let w2 = f * (0.5 + f * (2.0 - 1.5 * f)); - let w3 = f * f * (-0.5 + 0.5 * f); - let w12 = w1 + w2; - let texel_position_0 = (texel_center - 1.0) * texel_size; - let texel_position_3 = (texel_center + 2.0) * texel_size; - let texel_position_12 = (texel_center + (w2 / w12)) * texel_size; - var history_color = sample_history(texel_position_12.x, texel_position_0.y) * w12.x * w0.y; - history_color += sample_history(texel_position_0.x, texel_position_12.y) * w0.x * w12.y; - history_color += sample_history(texel_position_12.x, texel_position_12.y) * w12.x * w12.y; - history_color += sample_history(texel_position_3.x, texel_position_12.y) * w3.x * w12.y; - history_color += sample_history(texel_position_12.x, texel_position_3.y) * w12.x * w3.y; +// From the Filament design doc +// https://google.github.io/filament/Filament.html#table_symbols +// Symbol Definition +// v View unit vector +// l Incident light unit vector +// n Surface normal unit vector +// h Half unit vector between l and v +// f BRDF +// f_d Diffuse component of a BRDF +// f_r Specular component of a BRDF +// α Roughness, remapped from using input perceptualRoughness +// σ Diffuse reflectance +// Ω Spherical domain +// f0 Reflectance at normal incidence +// f90 Reflectance at grazing angle +// χ+(a) Heaviside function (1 if a>0 and 0 otherwise) +// nior Index of refraction (IOR) of an interface +// ⟨n⋅l⟩ Dot product clamped to [0..1] +// ⟨a⟩ Saturated value (clamped to [0..1]) - // Constrain past sample with 3x3 YCoCg variance clipping (reduces ghosting) - // YCoCg: https://advances.realtimerendering.com/s2014/index.html#_HIGH-QUALITY_TEMPORAL_SUPERSAMPLING, slide 33 - // Variance clipping: https://developer.download.nvidia.com/gameworks/events/GDC2016/msalvi_temporal_supersampling.pdf - let s_tl = sample_view_target(uv + vec2(-texel_size.x, texel_size.y)); - let s_tm = sample_view_target(uv + vec2( 0.0, texel_size.y)); - let s_tr = sample_view_target(uv + vec2( texel_size.x, texel_size.y)); - let s_ml = sample_view_target(uv + vec2(-texel_size.x, 0.0)); - let s_mm = RGB_to_YCoCg(current_color); - let s_mr = sample_view_target(uv + vec2( texel_size.x, 0.0)); - let s_bl = sample_view_target(uv + vec2(-texel_size.x, -texel_size.y)); - let s_bm = sample_view_target(uv + vec2( 0.0, -texel_size.y)); - let s_br = sample_view_target(uv + vec2( texel_size.x, -texel_size.y)); - let moment_1 = s_tl + s_tm + s_tr + s_ml + s_mm + s_mr + s_bl + s_bm + s_br; - let moment_2 = (s_tl * s_tl) + (s_tm * s_tm) + (s_tr * s_tr) + (s_ml * s_ml) + (s_mm * s_mm) + (s_mr * s_mr) + (s_bl * s_bl) + (s_bm * s_bm) + (s_br * s_br); - let mean = moment_1 / 9.0; - let variance = (moment_2 / 9.0) - (mean * mean); - let std_deviation = sqrt(max(variance, vec3(0.0))); - history_color = RGB_to_YCoCg(history_color); - history_color = clip_towards_aabb_center(history_color, s_mm, mean - std_deviation, mean + std_deviation); - history_color = YCoCg_to_RGB(history_color); +// The Bidirectional Reflectance Distribution Function (BRDF) describes the surface response of a standard material +// and consists of two components, the diffuse component (f_d) and the specular component (f_r): +// f(v,l) = f_d(v,l) + f_r(v,l) +// +// The form of the microfacet model is the same for diffuse and specular +// f_r(v,l) = f_d(v,l) = 1 / { |n⋅v||n⋅l| } ∫_Ω D(m,α) G(v,l,m) f_m(v,l,m) (v⋅m) (l⋅m) dm +// +// In which: +// D, also called the Normal Distribution Function (NDF) models the distribution of the microfacets +// G models the visibility (or occlusion or shadow-masking) of the microfacets +// f_m is the microfacet BRDF and differs between specular and diffuse components +// +// The above integration needs to be approximated. - // How confident we are that the history is representative of the current frame - var history_confidence = textureSample(history, nearest_sampler, uv).a; - let pixel_motion_vector = abs(closest_motion_vector) * texture_size; - if pixel_motion_vector.x < 0.01 && pixel_motion_vector.y < 0.01 { - // Increment when pixels are not moving - history_confidence += 10.0; - } else { - // Else reset - history_confidence = 1.0; - } +// Input to a lighting function for a single layer (either the base layer or the +// clearcoat layer). +struct LayerLightingInput { + // The normal vector. + N: vec3, + // The reflected vector. + R: vec3, + // The normal vector ⋅ the view vector. + NdotV: f32, - // Blend current and past sample - // Use more of the history if we're confident in it (reduces noise when there is no motion) - // https://hhoppe.com/supersample.pdf, section 4.1 - var current_color_factor = clamp(1.0 / history_confidence, MIN_HISTORY_BLEND_RATE, DEFAULT_HISTORY_BLEND_RATE); + // The perceptual roughness of the layer. + perceptual_roughness: f32, + // The roughness of the layer. + roughness: f32, +} - // Reject history when motion vectors point off screen - if any(saturate(history_uv) != history_uv) { - current_color_factor = 1.0; - history_confidence = 1.0; - } +// Input to a lighting function (`point_light`, `spot_light`, +// `directional_light`). +struct LightingInput { +#ifdef STANDARD_MATERIAL_CLEARCOAT + layers: array, +#else // STANDARD_MATERIAL_CLEARCOAT + layers: array, +#endif // STANDARD_MATERIAL_CLEARCOAT - current_color = mix(history_color, current_color, current_color_factor); -#endif // #ifndef RESET + // The world-space position. + P: vec3, + // The vector to the view. + V: vec3, + // The diffuse color of the material. + diffuse_color: vec3, - // Write output to history and view target - var out: Output; -#ifdef RESET - let history_confidence = 1.0 / MIN_HISTORY_BLEND_RATE; -#endif - out.history = vec4(current_color, history_confidence); -#ifdef TONEMAP - current_color = reverse_tonemap(current_color); -#endif - out.view_target = vec4(current_color, original_color.a); - return out; + // Specular reflectance at the normal incidence angle. + // + // This should be read F₀, but due to Naga limitations we can't name it that. + F0_: vec3, + // Constants for the BRDF approximation. + // + // See `EnvBRDFApprox` in + // . + // What we call `F_ab` they call `AB`. + F_ab: vec2, + +#ifdef STANDARD_MATERIAL_CLEARCOAT + // The strength of the clearcoat layer. + clearcoat_strength: f32, +#endif // STANDARD_MATERIAL_CLEARCOAT + +#ifdef STANDARD_MATERIAL_ANISOTROPY + // The anisotropy strength, reflecting the amount of increased roughness in + // the tangent direction. + anisotropy: f32, + // The tangent direction for anisotropy: i.e. the direction in which + // roughness increases. + Ta: vec3, + // The bitangent direction, which is the cross product of the normal with + // the tangent direction. + Ba: vec3, +#endif // STANDARD_MATERIAL_ANISOTROPY +} + +// Values derived from the `LightingInput` for both diffuse and specular lights. +struct DerivedLightingInput { + // The half-vector between L, the incident light vector, and V, the view + // vector. + H: vec3, + // The normal vector ⋅ the incident light vector. + NdotL: f32, + // The normal vector ⋅ the half-vector. + NdotH: f32, + // The incident light vector ⋅ the half-vector. + LdotH: f32, } -``` +// distanceAttenuation is simply the square falloff of light intensity +// combined with a smooth attenuation at the edge of the light radius +// +// light radius is a non-physical construct for efficiency purposes, +// because otherwise every light affects every fragment in the scene +fn getDistanceAttenuation(distanceSquare: f32, inverseRangeSquared: f32) -> f32 { + let factor = distanceSquare * inverseRangeSquared; + let smoothFactor = saturate(1.0 - factor * factor); + let attenuation = smoothFactor * smoothFactor; + return attenuation * 1.0 / max(distanceSquare, 0.0001); +} -### crates/bevy_core_pipeline/src/tonemapping/tonemapping_shared +// Normal distribution function (specular D) +// Based on https://google.github.io/filament/Filament.html#citation-walter07 -```rust -#define_import_path bevy_core_pipeline::tonemapping +// D_GGX(h,α) = α^2 / { π ((n⋅h)^2 (α2−1) + 1)^2 } -#import bevy_render::view::ColorGrading +// Simple implementation, has precision problems when using fp16 instead of fp32 +// see https://google.github.io/filament/Filament.html#listing_speculardfp16 +fn D_GGX(roughness: f32, NdotH: f32, h: vec3) -> f32 { + let oneMinusNdotHSquared = 1.0 - NdotH * NdotH; + let a = NdotH * roughness; + let k = roughness / (oneMinusNdotHSquared + a * a); + let d = k * k * (1.0 / PI); + return d; +} -// hack !! not sure what to do with this -#ifdef TONEMAPPING_PASS - @group(0) @binding(3) var dt_lut_texture: texture_3d; - @group(0) @binding(4) var dt_lut_sampler: sampler; -#else - @group(0) @binding(18) var dt_lut_texture: texture_3d; - @group(0) @binding(19) var dt_lut_sampler: sampler; -#endif +// An approximation of the anisotropic GGX distribution function. +// +// 1 +// D(𝐡) = ─────────────────────────────────────────────────── +// παₜα_b((𝐡 ⋅ 𝐭)² / αₜ²) + (𝐡 ⋅ 𝐛)² / α_b² + (𝐡 ⋅ 𝐧)²)² +// +// * `T` = 𝐭 = the tangent direction = the direction of increased roughness. +// +// * `B` = 𝐛 = the bitangent direction = the direction of decreased roughness. +// +// * `at` = αₜ = the alpha-roughness in the tangent direction. +// +// * `ab` = α_b = the alpha-roughness in the bitangent direction. +// +// This is from the `KHR_materials_anisotropy` spec: +// +fn D_GGX_anisotropic(at: f32, ab: f32, NdotH: f32, TdotH: f32, BdotH: f32) -> f32 { + let a2 = at * ab; + let f = vec3(ab * TdotH, at * BdotH, a2 * NdotH); + let w2 = a2 / dot(f, f); + let d = a2 * w2 * w2 * (1.0 / PI); + return d; +} -fn sample_current_lut(p: vec3) -> vec3 { - // Don't include code that will try to sample from LUTs if tonemap method doesn't require it - // Allows this file to be imported without necessarily needing the lut texture bindings -#ifdef TONEMAP_METHOD_AGX - return textureSampleLevel(dt_lut_texture, dt_lut_sampler, p, 0.0).rgb; -#else ifdef TONEMAP_METHOD_TONY_MC_MAPFACE - return textureSampleLevel(dt_lut_texture, dt_lut_sampler, p, 0.0).rgb; -#else ifdef TONEMAP_METHOD_BLENDER_FILMIC - return textureSampleLevel(dt_lut_texture, dt_lut_sampler, p, 0.0).rgb; -#else - return vec3(1.0, 0.0, 1.0); - #endif +// Visibility function (Specular G) +// V(v,l,a) = G(v,l,α) / { 4 (n⋅v) (n⋅l) } +// such that f_r becomes +// f_r(v,l) = D(h,α) V(v,l,α) F(v,h,f0) +// where +// V(v,l,α) = 0.5 / { n⋅l sqrt((n⋅v)^2 (1−α2) + α2) + n⋅v sqrt((n⋅l)^2 (1−α2) + α2) } +// Note the two sqrt's, that may be slow on mobile, see https://google.github.io/filament/Filament.html#listing_approximatedspecularv +fn V_SmithGGXCorrelated(roughness: f32, NdotV: f32, NdotL: f32) -> f32 { + let a2 = roughness * roughness; + let lambdaV = NdotL * sqrt((NdotV - a2 * NdotV) * NdotV + a2); + let lambdaL = NdotV * sqrt((NdotL - a2 * NdotL) * NdotL + a2); + let v = 0.5 / (lambdaV + lambdaL); + return v; } -// -------------------------------------- -// --- SomewhatBoringDisplayTransform --- -// -------------------------------------- -// By Tomasz Stachowiak +// The visibility function, anisotropic variant. +fn V_GGX_anisotropic( + at: f32, + ab: f32, + NdotL: f32, + NdotV: f32, + BdotV: f32, + TdotV: f32, + TdotL: f32, + BdotL: f32, +) -> f32 { + let GGX_V = NdotL * length(vec3(at * TdotV, ab * BdotV, NdotV)); + let GGX_L = NdotV * length(vec3(at * TdotL, ab * BdotL, NdotL)); + let v = 0.5 / (GGX_V + GGX_L); + return saturate(v); +} -fn rgb_to_ycbcr(col: vec3) -> vec3 { - let m = mat3x3( - 0.2126, 0.7152, 0.0722, - -0.1146, -0.3854, 0.5, - 0.5, -0.4542, -0.0458 - ); - return col * m; +// A simpler, but nonphysical, alternative to Smith-GGX. We use this for +// clearcoat, per the Filament spec. +// +// https://google.github.io/filament/Filament.html#materialsystem/clearcoatmodel#toc4.9.1 +fn V_Kelemen(LdotH: f32) -> f32 { + return 0.25 / (LdotH * LdotH); } -fn ycbcr_to_rgb(col: vec3) -> vec3 { - let m = mat3x3( - 1.0, 0.0, 1.5748, - 1.0, -0.1873, -0.4681, - 1.0, 1.8556, 0.0 - ); - return max(vec3(0.0), col * m); +// Fresnel function +// see https://google.github.io/filament/Filament.html#citation-schlick94 +// F_Schlick(v,h,f_0,f_90) = f_0 + (f_90 − f_0) (1 − v⋅h)^5 +fn F_Schlick_vec(f0: vec3, f90: f32, VdotH: f32) -> vec3 { + // not using mix to keep the vec3 and float versions identical + return f0 + (f90 - f0) * pow(1.0 - VdotH, 5.0); } -fn tonemap_curve(v: f32) -> f32 { -#ifdef 0 - // Large linear part in the lows, but compresses highs. - float c = v + v * v + 0.5 * v * v * v; - return c / (1.0 + c); -#else - return 1.0 - exp(-v); -#endif +fn F_Schlick(f0: f32, f90: f32, VdotH: f32) -> f32 { + // not using mix to keep the vec3 and float versions identical + return f0 + (f90 - f0) * pow(1.0 - VdotH, 5.0); } -fn tonemap_curve3_(v: vec3) -> vec3 { - return vec3(tonemap_curve(v.r), tonemap_curve(v.g), tonemap_curve(v.b)); +fn fresnel(f0: vec3, LdotH: f32) -> vec3 { + // f_90 suitable for ambient occlusion + // see https://google.github.io/filament/Filament.html#lighting/occlusion + let f90 = saturate(dot(f0, vec3(50.0 * 0.33))); + return F_Schlick_vec(f0, f90, LdotH); } -fn somewhat_boring_display_transform(col: vec3) -> vec3 { - var boring_color = col; - let ycbcr = rgb_to_ycbcr(boring_color); +// Given distribution, visibility, and Fresnel term, calculates the final +// specular light. +// +// Multiscattering approximation: +// +fn specular_multiscatter( + input: ptr, + D: f32, + V: f32, + F: vec3, + specular_intensity: f32, +) -> vec3 { + // Unpack. + let F0 = (*input).F0_; + let F_ab = (*input).F_ab; - let bt = tonemap_curve(length(ycbcr.yz) * 2.4); - var desat = max((bt - 0.7) * 0.8, 0.0); - desat *= desat; + var Fr = (specular_intensity * D * V) * F; + Fr *= 1.0 + F0 * (1.0 / F_ab.x - 1.0); + return Fr; +} - let desat_col = mix(boring_color.rgb, ycbcr.xxx, desat); +// Specular BRDF +// https://google.github.io/filament/Filament.html#materialsystem/specularbrdf - let tm_luma = tonemap_curve(ycbcr.x); - let tm0 = boring_color.rgb * max(0.0, tm_luma / max(1e-5, tonemapping_luminance(boring_color.rgb))); - let final_mult = 0.97; - let tm1 = tonemap_curve3_(desat_col); +// N, V, and L must all be normalized. +fn derive_lighting_input(N: vec3, V: vec3, L: vec3) -> DerivedLightingInput { + var input: DerivedLightingInput; + var H: vec3 = normalize(L + V); + input.H = H; + input.NdotL = saturate(dot(N, L)); + input.NdotH = saturate(dot(N, H)); + input.LdotH = saturate(dot(L, H)); + return input; +} - boring_color = mix(tm0, tm1, bt * bt); +// Returns L in the `xyz` components and the specular intensity in the `w` component. +fn compute_specular_layer_values_for_point_light( + input: ptr, + layer: u32, + V: vec3, + light_to_frag: vec3, + light_position_radius: f32, +) -> vec4 { + // Unpack. + let R = (*input).layers[layer].R; + let a = (*input).layers[layer].roughness; - return boring_color * final_mult; + // Representative Point Area Lights. + // see http://blog.selfshadow.com/publications/s2013-shading-course/karis/s2013_pbs_epic_notes_v2.pdf p14-16 + let centerToRay = dot(light_to_frag, R) * R - light_to_frag; + let closestPoint = light_to_frag + centerToRay * saturate( + light_position_radius * inverseSqrt(dot(centerToRay, centerToRay))); + let LspecLengthInverse = inverseSqrt(dot(closestPoint, closestPoint)); + let normalizationFactor = a / saturate(a + (light_position_radius * 0.5 * LspecLengthInverse)); + let intensity = normalizationFactor * normalizationFactor; + + let L: vec3 = closestPoint * LspecLengthInverse; // normalize() equivalent? + return vec4(L, intensity); +} + +// Cook-Torrance approximation of the microfacet model integration using Fresnel law F to model f_m +// f_r(v,l) = { D(h,α) G(v,l,α) F(v,h,f0) } / { 4 (n⋅v) (n⋅l) } +fn specular( + input: ptr, + derived_input: ptr, + specular_intensity: f32, +) -> vec3 { + // Unpack. + let roughness = (*input).layers[LAYER_BASE].roughness; + let NdotV = (*input).layers[LAYER_BASE].NdotV; + let F0 = (*input).F0_; + let H = (*derived_input).H; + let NdotL = (*derived_input).NdotL; + let NdotH = (*derived_input).NdotH; + let LdotH = (*derived_input).LdotH; + + // Calculate distribution. + let D = D_GGX(roughness, NdotH, H); + // Calculate visibility. + let V = V_SmithGGXCorrelated(roughness, NdotV, NdotL); + // Calculate the Fresnel term. + let F = fresnel(F0, LdotH); + + // Calculate the specular light. + let Fr = specular_multiscatter(input, D, V, F, specular_intensity); + return Fr; +} + +// Calculates the specular light for the clearcoat layer. Returns Fc, the +// Fresnel term, in the first channel, and Frc, the specular clearcoat light, in +// the second channel. +// +// +fn specular_clearcoat( + input: ptr, + derived_input: ptr, + clearcoat_strength: f32, + specular_intensity: f32, +) -> vec2 { + // Unpack. + let roughness = (*input).layers[LAYER_CLEARCOAT].roughness; + let H = (*derived_input).H; + let NdotH = (*derived_input).NdotH; + let LdotH = (*derived_input).LdotH; + + // Calculate distribution. + let Dc = D_GGX(roughness, NdotH, H); + // Calculate visibility. + let Vc = V_Kelemen(LdotH); + // Calculate the Fresnel term. + let Fc = F_Schlick(0.04, 1.0, LdotH) * clearcoat_strength; + // Calculate the specular light. + let Frc = (specular_intensity * Dc * Vc) * Fc; + return vec2(Fc, Frc); +} + +#ifdef STANDARD_MATERIAL_ANISOTROPY + +fn specular_anisotropy( + input: ptr, + derived_input: ptr, + L: vec3, + specular_intensity: f32, +) -> vec3 { + // Unpack. + let roughness = (*input).layers[LAYER_BASE].roughness; + let NdotV = (*input).layers[LAYER_BASE].NdotV; + let V = (*input).V; + let F0 = (*input).F0_; + let anisotropy = (*input).anisotropy; + let Ta = (*input).Ta; + let Ba = (*input).Ba; + let H = (*derived_input).H; + let NdotL = (*derived_input).NdotL; + let NdotH = (*derived_input).NdotH; + let LdotH = (*derived_input).LdotH; + + let TdotL = dot(Ta, L); + let BdotL = dot(Ba, L); + let TdotH = dot(Ta, H); + let BdotH = dot(Ba, H); + let TdotV = dot(Ta, V); + let BdotV = dot(Ba, V); + + let ab = roughness * roughness; + let at = mix(ab, 1.0, anisotropy * anisotropy); + + let Da = D_GGX_anisotropic(at, ab, NdotH, TdotH, BdotH); + let Va = V_GGX_anisotropic(at, ab, NdotL, NdotV, BdotV, TdotV, TdotL, BdotL); + let Fa = fresnel(F0, LdotH); + + // Calculate the specular light. + let Fr = specular_multiscatter(input, Da, Va, Fa, specular_intensity); + return Fr; +} + +#endif // STANDARD_MATERIAL_ANISOTROPY + +// Diffuse BRDF +// https://google.github.io/filament/Filament.html#materialsystem/diffusebrdf +// fd(v,l) = σ/π * 1 / { |n⋅v||n⋅l| } ∫Ω D(m,α) G(v,l,m) (v⋅m) (l⋅m) dm +// +// simplest approximation +// float Fd_Lambert() { +// return 1.0 / PI; +// } +// +// vec3 Fd = diffuseColor * Fd_Lambert(); +// +// Disney approximation +// See https://google.github.io/filament/Filament.html#citation-burley12 +// minimal quality difference +fn Fd_Burley( + input: ptr, + derived_input: ptr, +) -> f32 { + // Unpack. + let roughness = (*input).layers[LAYER_BASE].roughness; + let NdotV = (*input).layers[LAYER_BASE].NdotV; + let NdotL = (*derived_input).NdotL; + let LdotH = (*derived_input).LdotH; + + let f90 = 0.5 + 2.0 * roughness * LdotH * LdotH; + let lightScatter = F_Schlick(1.0, f90, NdotL); + let viewScatter = F_Schlick(1.0, f90, NdotV); + return lightScatter * viewScatter * (1.0 / PI); +} + +// Scale/bias approximation +// https://www.unrealengine.com/en-US/blog/physically-based-shading-on-mobile +// TODO: Use a LUT (more accurate) +fn F_AB(perceptual_roughness: f32, NdotV: f32) -> vec2 { + let c0 = vec4(-1.0, -0.0275, -0.572, 0.022); + let c1 = vec4(1.0, 0.0425, 1.04, -0.04); + let r = perceptual_roughness * c0 + c1; + let a004 = min(r.x * r.x, exp2(-9.28 * NdotV)) * r.x + r.y; + return vec2(-1.04, 1.04) * a004 + r.zw; } -// ------------------------------------------ -// ------------- Tony McMapface ------------- -// ------------------------------------------ -// By Tomasz Stachowiak -// https://github.com/h3r2tic/tony-mc-mapface - -const TONY_MC_MAPFACE_LUT_DIMS: f32 = 48.0; +fn EnvBRDFApprox(F0: vec3, F_ab: vec2) -> vec3 { + return F0 * F_ab.x + F_ab.y; +} -fn sample_tony_mc_mapface_lut(stimulus: vec3) -> vec3 { - var uv = (stimulus / (stimulus + 1.0)) * (f32(TONY_MC_MAPFACE_LUT_DIMS - 1.0) / f32(TONY_MC_MAPFACE_LUT_DIMS)) + 0.5 / f32(TONY_MC_MAPFACE_LUT_DIMS); - return sample_current_lut(saturate(uv)).rgb; +fn perceptualRoughnessToRoughness(perceptualRoughness: f32) -> f32 { + // clamp perceptual roughness to prevent precision problems + // According to Filament design 0.089 is recommended for mobile + // Filament uses 0.045 for non-mobile + let clampedPerceptualRoughness = clamp(perceptualRoughness, 0.089, 1.0); + return clampedPerceptualRoughness * clampedPerceptualRoughness; } -// --------------------------------- -// ---------- ACES Fitted ---------- -// --------------------------------- +fn point_light(light_id: u32, input: ptr) -> vec3 { + // Unpack. + let diffuse_color = (*input).diffuse_color; + let P = (*input).P; + let N = (*input).layers[LAYER_BASE].N; + let V = (*input).V; + + let light = &view_bindings::clusterable_objects.data[light_id]; + let light_to_frag = (*light).position_radius.xyz - P; + let L = normalize(light_to_frag); + let distance_square = dot(light_to_frag, light_to_frag); + let rangeAttenuation = getDistanceAttenuation(distance_square, (*light).color_inverse_square_range.w); -// Same base implementation that Godot 4.0 uses for Tonemap ACES. + // Base layer -// https://github.com/TheRealMJP/BakingLab/blob/master/BakingLab/ACES.hlsl + let specular_L_intensity = compute_specular_layer_values_for_point_light( + input, + LAYER_BASE, + V, + light_to_frag, + (*light).position_radius.w, + ); + var specular_derived_input = derive_lighting_input(N, V, specular_L_intensity.xyz); -// The code in this file was originally written by Stephen Hill (@self_shadow), who deserves all -// credit for coming up with this fit and implementing it. Buy him a beer next time you see him. :) + let specular_intensity = specular_L_intensity.w; -fn RRTAndODTFit(v: vec3) -> vec3 { - let a = v * (v + 0.0245786) - 0.000090537; - let b = v * (0.983729 * v + 0.4329510) + 0.238081; - return a / b; -} +#ifdef STANDARD_MATERIAL_ANISOTROPY + let specular_light = specular_anisotropy(input, &specular_derived_input, L, specular_intensity); +#else // STANDARD_MATERIAL_ANISOTROPY + let specular_light = specular(input, &specular_derived_input, specular_intensity); +#endif // STANDARD_MATERIAL_ANISOTROPY -fn ACESFitted(color: vec3) -> vec3 { - var fitted_color = color; + // Clearcoat - // sRGB => XYZ => D65_2_D60 => AP1 => RRT_SAT - let rgb_to_rrt = mat3x3( - vec3(0.59719, 0.35458, 0.04823), - vec3(0.07600, 0.90834, 0.01566), - vec3(0.02840, 0.13383, 0.83777) +#ifdef STANDARD_MATERIAL_CLEARCOAT + // Unpack. + let clearcoat_N = (*input).layers[LAYER_CLEARCOAT].N; + let clearcoat_strength = (*input).clearcoat_strength; + + // Perform specular input calculations again for the clearcoat layer. We + // can't reuse the above because the clearcoat normal might be different + // from the main layer normal. + let clearcoat_specular_L_intensity = compute_specular_layer_values_for_point_light( + input, + LAYER_CLEARCOAT, + V, + light_to_frag, + (*light).position_radius.w, ); - - // ODT_SAT => XYZ => D60_2_D65 => sRGB - let odt_to_rgb = mat3x3( - vec3(1.60475, -0.53108, -0.07367), - vec3(-0.10208, 1.10813, -0.00605), - vec3(-0.00327, -0.07276, 1.07602) + var clearcoat_specular_derived_input = + derive_lighting_input(clearcoat_N, V, clearcoat_specular_L_intensity.xyz); + + // Calculate the specular light. + let clearcoat_specular_intensity = clearcoat_specular_L_intensity.w; + let Fc_Frc = specular_clearcoat( + input, + &clearcoat_specular_derived_input, + clearcoat_strength, + clearcoat_specular_intensity ); + let inv_Fc = 1.0 - Fc_Frc.r; // Inverse Fresnel term. + let Frc = Fc_Frc.g; // Clearcoat light. +#endif // STANDARD_MATERIAL_CLEARCOAT - fitted_color *= rgb_to_rrt; - - // Apply RRT and ODT - fitted_color = RRTAndODTFit(fitted_color); + // Diffuse. + // Comes after specular since its N⋅L is used in the lighting equation. + var derived_input = derive_lighting_input(N, V, L); + let diffuse = diffuse_color * Fd_Burley(input, &derived_input); - fitted_color *= odt_to_rgb; + // See https://google.github.io/filament/Filament.html#mjx-eqn-pointLightLuminanceEquation + // Lout = f(v,l) Φ / { 4 π d^2 }⟨n⋅l⟩ + // where + // f(v,l) = (f_d(v,l) + f_r(v,l)) * light_color + // Φ is luminous power in lumens + // our rangeAttenuation = 1 / d^2 multiplied with an attenuation factor for smoothing at the edge of the non-physical maximum light radius - // Clamp to [0, 1] - fitted_color = saturate(fitted_color); + // For a point light, luminous intensity, I, in lumens per steradian is given by: + // I = Φ / 4 π + // The derivation of this can be seen here: https://google.github.io/filament/Filament.html#mjx-eqn-pointLightLuminousPower - return fitted_color; -} + // NOTE: (*light).color.rgb is premultiplied with (*light).intensity / 4 π (which would be the luminous intensity) on the CPU -// ------------------------------- -// ------------- AgX ------------- -// ------------------------------- -// By Troy Sobotka -// https://github.com/MrLixm/AgXc -// https://github.com/sobotka/AgX + var color: vec3; +#ifdef STANDARD_MATERIAL_CLEARCOAT + // Account for the Fresnel term from the clearcoat darkening the main layer. + // + // + color = (diffuse + specular_light * inv_Fc) * inv_Fc + Frc; +#else // STANDARD_MATERIAL_CLEARCOAT + color = diffuse + specular_light; +#endif // STANDARD_MATERIAL_CLEARCOAT -// pow() but safe for NaNs/negatives -fn powsafe(color: vec3, power: f32) -> vec3 { - return pow(abs(color), vec3(power)) * sign(color); + return color * (*light).color_inverse_square_range.rgb * + (rangeAttenuation * derived_input.NdotL); } -/* - Increase color saturation of the given color data. - :param color: expected sRGB primaries input - :param saturationAmount: expected 0-1 range with 1=neutral, 0=no saturation. - -- ref[2] [4] -*/ -fn saturation(color: vec3, saturationAmount: f32) -> vec3 { - let luma = tonemapping_luminance(color); - return mix(vec3(luma), color, vec3(saturationAmount)); -} +fn spot_light(light_id: u32, input: ptr) -> vec3 { + // reuse the point light calculations + let point_light = point_light(light_id, input); -/* - Output log domain encoded data. - Similar to OCIO lg2 AllocationTransform. - ref[0] -*/ -fn convertOpenDomainToNormalizedLog2_(color: vec3, minimum_ev: f32, maximum_ev: f32) -> vec3 { - let in_midgray = 0.18; + let light = &view_bindings::clusterable_objects.data[light_id]; - // remove negative before log transform - var normalized_color = max(vec3(0.0), color); - // avoid infinite issue with log -- ref[1] - normalized_color = select(normalized_color, 0.00001525878 + normalized_color, normalized_color < vec3(0.00003051757)); - normalized_color = clamp( - log2(normalized_color / in_midgray), - vec3(minimum_ev), - vec3(maximum_ev) - ); - let total_exposure = maximum_ev - minimum_ev; + // reconstruct spot dir from x/z and y-direction flag + var spot_dir = vec3((*light).light_custom_data.x, 0.0, (*light).light_custom_data.y); + spot_dir.y = sqrt(max(0.0, 1.0 - spot_dir.x * spot_dir.x - spot_dir.z * spot_dir.z)); + if ((*light).flags & POINT_LIGHT_FLAGS_SPOT_LIGHT_Y_NEGATIVE) != 0u { + spot_dir.y = -spot_dir.y; + } + let light_to_frag = (*light).position_radius.xyz - (*input).P.xyz; - return (normalized_color - minimum_ev) / total_exposure; -} + // calculate attenuation based on filament formula https://google.github.io/filament/Filament.html#listing_glslpunctuallight + // spot_scale and spot_offset have been precomputed + // note we normalize here to get "l" from the filament listing. spot_dir is already normalized + let cd = dot(-spot_dir, normalize(light_to_frag)); + let attenuation = saturate(cd * (*light).light_custom_data.z + (*light).light_custom_data.w); + let spot_attenuation = attenuation * attenuation; -// Inverse of above -fn convertNormalizedLog2ToOpenDomain(color: vec3, minimum_ev: f32, maximum_ev: f32) -> vec3 { - var open_color = color; - let in_midgray = 0.18; - let total_exposure = maximum_ev - minimum_ev; + return point_light * spot_attenuation; +} - open_color = (open_color * total_exposure) + minimum_ev; - open_color = pow(vec3(2.0), open_color); - open_color = open_color * in_midgray; +fn directional_light(light_id: u32, input: ptr) -> vec3 { + // Unpack. + let diffuse_color = (*input).diffuse_color; + let NdotV = (*input).layers[LAYER_BASE].NdotV; + let N = (*input).layers[LAYER_BASE].N; + let V = (*input).V; + let roughness = (*input).layers[LAYER_BASE].roughness; - return open_color; -} + let light = &view_bindings::lights.directional_lights[light_id]; + let L = (*light).direction_to_light.xyz; + var derived_input = derive_lighting_input(N, V, L); -/*================= - Main processes -=================*/ + let diffuse = diffuse_color * Fd_Burley(input, &derived_input); -// Prepare the data for display encoding. Converted to log domain. -fn applyAgXLog(Image: vec3) -> vec3 { - var prepared_image = max(vec3(0.0), Image); // clamp negatives - let r = dot(prepared_image, vec3(0.84247906, 0.0784336, 0.07922375)); - let g = dot(prepared_image, vec3(0.04232824, 0.87846864, 0.07916613)); - let b = dot(prepared_image, vec3(0.04237565, 0.0784336, 0.87914297)); - prepared_image = vec3(r, g, b); +#ifdef STANDARD_MATERIAL_ANISOTROPY + let specular_light = specular_anisotropy(input, &derived_input, L, 1.0); +#else // STANDARD_MATERIAL_ANISOTROPY + let specular_light = specular(input, &derived_input, 1.0); +#endif // STANDARD_MATERIAL_ANISOTROPY - prepared_image = convertOpenDomainToNormalizedLog2_(prepared_image, -10.0, 6.5); - - prepared_image = clamp(prepared_image, vec3(0.0), vec3(1.0)); - return prepared_image; -} +#ifdef STANDARD_MATERIAL_CLEARCOAT + let clearcoat_N = (*input).layers[LAYER_CLEARCOAT].N; + let clearcoat_strength = (*input).clearcoat_strength; -fn applyLUT3D(Image: vec3, block_size: f32) -> vec3 { - return sample_current_lut(Image * ((block_size - 1.0) / block_size) + 0.5 / block_size).rgb; -} + // Perform specular input calculations again for the clearcoat layer. We + // can't reuse the above because the clearcoat normal might be different + // from the main layer normal. + var derived_clearcoat_input = derive_lighting_input(clearcoat_N, V, L); -// ------------------------- -// ------------------------- -// ------------------------- + let Fc_Frc = + specular_clearcoat(input, &derived_clearcoat_input, clearcoat_strength, 1.0); + let inv_Fc = 1.0 - Fc_Frc.r; + let Frc = Fc_Frc.g; +#endif // STANDARD_MATERIAL_CLEARCOAT -fn sample_blender_filmic_lut(stimulus: vec3) -> vec3 { - let block_size = 64.0; - let normalized = saturate(convertOpenDomainToNormalizedLog2_(stimulus, -11.0, 12.0)); - return applyLUT3D(normalized, block_size); -} + var color: vec3; +#ifdef STANDARD_MATERIAL_CLEARCOAT + // Account for the Fresnel term from the clearcoat darkening the main layer. + // + // + color = (diffuse + specular_light * inv_Fc) * inv_Fc * derived_input.NdotL + + Frc * derived_clearcoat_input.NdotL; +#else // STANDARD_MATERIAL_CLEARCOAT + color = (diffuse + specular_light) * derived_input.NdotL; +#endif // STANDARD_MATERIAL_CLEARCOAT -// from https://64.github.io/tonemapping/ -// reinhard on RGB oversaturates colors -fn tonemapping_reinhard(color: vec3) -> vec3 { - return color / (1.0 + color); + return color * (*light).color.rgb; } -fn tonemapping_reinhard_extended(color: vec3, max_white: f32) -> vec3 { - let numerator = color * (1.0 + (color / vec3(max_white * max_white))); - return numerator / (1.0 + color); -} +``` -// luminance coefficients from Rec. 709. -// https://en.wikipedia.org/wiki/Rec._709 -fn tonemapping_luminance(v: vec3) -> f32 { - return dot(v, vec3(0.2126, 0.7152, 0.0722)); -} +### bevy_shaders/pbr -fn tonemapping_change_luminance(c_in: vec3, l_out: f32) -> vec3 { - let l_in = tonemapping_luminance(c_in); - return c_in * (l_out / l_in); +```rust +#import bevy_pbr::{ + pbr_functions::alpha_discard, + pbr_fragment::pbr_input_from_standard_material, } -fn tonemapping_reinhard_luminance(color: vec3) -> vec3 { - let l_old = tonemapping_luminance(color); - let l_new = l_old / (1.0 + l_old); - return tonemapping_change_luminance(color, l_new); +#ifdef PREPASS_PIPELINE +#import bevy_pbr::{ + prepass_io::{VertexOutput, FragmentOutput}, + pbr_deferred_functions::deferred_output, } - -fn rgb_to_srgb_simple(color: vec3) -> vec3 { - return pow(color, vec3(1.0 / 2.2)); +#else +#import bevy_pbr::{ + forward_io::{VertexOutput, FragmentOutput}, + pbr_functions, + pbr_functions::{apply_pbr_lighting, main_pass_post_lighting_processing}, + pbr_types::STANDARD_MATERIAL_FLAGS_UNLIT_BIT, } +#endif -// Source: Advanced VR Rendering, GDC 2015, Alex Vlachos, Valve, Slide 49 -// https://media.steampowered.com/apps/valve/2015/Alex_Vlachos_Advanced_VR_Rendering_GDC2015.pdf -fn screen_space_dither(frag_coord: vec2) -> vec3 { - var dither = vec3(dot(vec2(171.0, 231.0), frag_coord)).xxx; - dither = fract(dither.rgb / vec3(103.0, 71.0, 97.0)); - return (dither - 0.5) / 255.0; -} +#ifdef MESHLET_MESH_MATERIAL_PASS +#import bevy_pbr::meshlet_visibility_buffer_resolve::resolve_vertex_output +#endif -fn tone_mapping(in: vec4, color_grading: ColorGrading) -> vec4 { - var color = max(in.rgb, vec3(0.0)); +@fragment +fn fragment( +#ifdef MESHLET_MESH_MATERIAL_PASS + @builtin(position) frag_coord: vec4, +#else + in: VertexOutput, + @builtin(front_facing) is_front: bool, +#endif +) -> FragmentOutput { +#ifdef MESHLET_MESH_MATERIAL_PASS + let in = resolve_vertex_output(frag_coord); + let is_front = true; +#endif - // Possible future grading: + // If we're in the crossfade section of a visibility range, conditionally + // discard the fragment according to the visibility pattern. +#ifdef VISIBILITY_RANGE_DITHER + pbr_functions::visibility_range_dither(in.position, in.visibility_range_dither); +#endif - // highlight gain gamma: 0.. - // let luma = powsafe(vec3(tonemapping_luminance(color)), 1.0); + // generate a PbrInput struct from the StandardMaterial bindings + var pbr_input = pbr_input_from_standard_material(in, is_front); - // highlight gain: 0.. - // color += color * luma.xxx * 1.0; + // alpha discard + pbr_input.material.base_color = alpha_discard(pbr_input.material, pbr_input.material.base_color); - // Linear pre tonemapping grading - color = saturation(color, color_grading.pre_saturation); - color = powsafe(color, color_grading.gamma); - color = color * powsafe(vec3(2.0), color_grading.exposure); - color = max(color, vec3(0.0)); +#ifdef PREPASS_PIPELINE + // write the gbuffer, lighting pass id, and optionally normal and motion_vector textures + let out = deferred_output(in, pbr_input); +#else + // in forward mode, we calculate the lit color immediately, and then apply some post-lighting effects here. + // in deferred mode the lit color and these effects will be calculated in the deferred lighting shader + var out: FragmentOutput; + if (pbr_input.material.flags & STANDARD_MATERIAL_FLAGS_UNLIT_BIT) == 0u { + out.color = apply_pbr_lighting(pbr_input); + } else { + out.color = pbr_input.material.base_color; + } - // tone_mapping -#ifdef TONEMAP_METHOD_NONE - color = color; -#else ifdef TONEMAP_METHOD_REINHARD - color = tonemapping_reinhard(color.rgb); -#else ifdef TONEMAP_METHOD_REINHARD_LUMINANCE - color = tonemapping_reinhard_luminance(color.rgb); -#else ifdef TONEMAP_METHOD_ACES_FITTED - color = ACESFitted(color.rgb); -#else ifdef TONEMAP_METHOD_AGX - color = applyAgXLog(color); - color = applyLUT3D(color, 32.0); -#else ifdef TONEMAP_METHOD_SOMEWHAT_BORING_DISPLAY_TRANSFORM - color = somewhat_boring_display_transform(color.rgb); -#else ifdef TONEMAP_METHOD_TONY_MC_MAPFACE - color = sample_tony_mc_mapface_lut(color); -#else ifdef TONEMAP_METHOD_BLENDER_FILMIC - color = sample_blender_filmic_lut(color.rgb); + // apply in-shader post processing (fog, alpha-premultiply, and also tonemapping, debanding if the camera is non-hdr) + // note this does not include fullscreen postprocessing effects like bloom. + out.color = main_pass_post_lighting_processing(pbr_input, out.color); #endif - // Perceptual post tonemapping grading - color = saturation(color, color_grading.post_saturation); - - return vec4(color, in.a); -} - -// This is an **incredibly crude** approximation of the inverse of the tone mapping function. -// We assume here that there's a simple linear relationship between the input and output -// which is not true at all, but useful to at least preserve the overall luminance of colors -// when sampling from an already tonemapped image. (e.g. for transmissive materials when HDR is off) -fn approximate_inverse_tone_mapping(in: vec4, color_grading: ColorGrading) -> vec4 { - let out = tone_mapping(in, color_grading); - let approximate_ratio = length(in.rgb) / length(out.rgb); - return vec4(in.rgb * approximate_ratio, in.a); + return out; } ``` -### crates/bevy_core_pipeline/src/tonemapping/tonemapping +### bevy_shaders/skybox ```rust -#define TONEMAPPING_PASS - #import bevy_render::view::View -#import bevy_core_pipeline::{ - fullscreen_vertex_shader::FullscreenVertexOutput, - tonemapping::{tone_mapping, powsafe, screen_space_dither}, -} - -@group(0) @binding(0) var view: View; - -@group(0) @binding(1) var hdr_texture: texture_2d; -@group(0) @binding(2) var hdr_sampler: sampler; -@group(0) @binding(3) var dt_lut_texture: texture_3d; -@group(0) @binding(4) var dt_lut_sampler: sampler; - -@fragment -fn fragment(in: FullscreenVertexOutput) -> @location(0) vec4 { - let hdr_color = textureSample(hdr_texture, hdr_sampler, in.uv); - - var output_rgb = tone_mapping(hdr_color, view.color_grading).rgb; +#import bevy_pbr::utils::coords_to_viewport_uv -#ifdef DEBAND_DITHER - output_rgb = powsafe(output_rgb.rgb, 1.0 / 2.2); - output_rgb = output_rgb + screen_space_dither(in.position.xy); - // This conversion back to linear space is required because our output texture format is - // SRGB; the GPU will assume our output is linear and will apply an SRGB conversion. - output_rgb = powsafe(output_rgb.rgb, 2.2); +struct SkyboxUniforms { + brightness: f32, +#ifdef SIXTEEN_BYTE_ALIGNMENT + _wasm_padding_8b: u32, + _wasm_padding_12b: u32, + _wasm_padding_16b: u32, #endif - - return vec4(output_rgb, hdr_color.a); } -``` +@group(0) @binding(0) var skybox: texture_cube; +@group(0) @binding(1) var skybox_sampler: sampler; +@group(0) @binding(2) var view: View; +@group(0) @binding(3) var uniforms: SkyboxUniforms; -### crates/bevy_core_pipeline/src/fullscreen_vertex_shader/fullscreen +fn coords_to_ray_direction(position: vec2, viewport: vec4) -> vec3 { + // Using world positions of the fragment and camera to calculate a ray direction + // breaks down at large translations. This code only needs to know the ray direction. + // The ray direction is along the direction from the camera to the fragment position. + // In view space, the camera is at the origin, so the view space ray direction is + // along the direction of the fragment position - (0,0,0) which is just the + // fragment position. + // Use the position on the near clipping plane to avoid -inf world position + // because the far plane of an infinite reverse projection is at infinity. + let view_position_homogeneous = view.view_from_clip * vec4( + coords_to_viewport_uv(position, viewport) * vec2(2.0, -2.0) + vec2(-1.0, 1.0), + 1.0, + 1.0, + ); + let view_ray_direction = view_position_homogeneous.xyz / view_position_homogeneous.w; + // Transforming the view space ray direction by the view matrix, transforms the + // direction to world space. Note that the w element is set to 0.0, as this is a + // vector direction, not a position, That causes the matrix multiplication to ignore + // the translations from the view matrix. + let ray_direction = (view.world_from_view * vec4(view_ray_direction, 0.0)).xyz; -```rust -#define_import_path bevy_core_pipeline::fullscreen_vertex_shader + return normalize(ray_direction); +} -struct FullscreenVertexOutput { - @builtin(position) - position: vec4, - @location(0) - uv: vec2, +struct VertexOutput { + @builtin(position) position: vec4, }; -// This vertex shader produces the following, when drawn using indices 0..3: -// -// 1 | 0-----x.....2 -// 0 | | s | . ´ -// -1 | x_____x´ -// -2 | : .´ -// -3 | 1´ +// 3 | 2. +// 2 | : `. +// 1 | x-----x. +// 0 | | s | `. +// -1 | 0-----x.....1 // +--------------- // -1 0 1 2 3 // // The axes are clip-space x and y. The region marked s is the visible region. // The digits in the corners of the right-angled triangle are the vertex // indices. -// -// The top-left has UV 0,0, the bottom-left has 0,2, and the top-right has 2,0. -// This means that the UV gets interpolated to 1,1 at the bottom-right corner -// of the clip-space rectangle that is at 1,-1 in clip space. @vertex -fn fullscreen_vertex_shader(@builtin(vertex_index) vertex_index: u32) -> FullscreenVertexOutput { - // See the explanation above for how this works - let uv = vec2(f32(vertex_index >> 1u), f32(vertex_index & 1u)) * 2.0; - let clip_position = vec4(uv * vec2(2.0, -2.0) + vec2(-1.0, 1.0), 0.0, 1.0); +fn skybox_vertex(@builtin(vertex_index) vertex_index: u32) -> VertexOutput { + // See the explanation above for how this works. + let clip_position = vec4( + f32(vertex_index & 1u), + f32((vertex_index >> 1u) & 1u), + 0.25, + 0.5 + ) * 4.0 - vec4(1.0); - return FullscreenVertexOutput(clip_position, uv); + return VertexOutput(clip_position); +} + +@fragment +fn skybox_fragment(in: VertexOutput) -> @location(0) vec4 { + let ray_direction = coords_to_ray_direction(in.position.xy, view.viewport); + + // Cube maps are left-handed so we negate the z coordinate. + let out = textureSample(skybox, skybox_sampler, ray_direction * vec3(1.0, 1.0, -1.0)); + return vec4(out.rgb * uniforms.brightness, out.a); } ``` -### crates/bevy_core_pipeline/src/contrast_adaptive_sharpening/robust_contrast_adaptive_sharpening +### bevy_shaders/tonemapping_shared ```rust -// Copyright (c) 2022 Advanced Micro Devices, Inc. All rights reserved. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. +#define_import_path bevy_core_pipeline::tonemapping -#import bevy_core_pipeline::fullscreen_vertex_shader::FullscreenVertexOutput +#import bevy_render::{ + view::ColorGrading, + color_operations::{hsv_to_rgb, rgb_to_hsv}, + maths::{PI_2, powsafe}, +} -struct CASUniforms { - sharpness: f32, -}; +#import bevy_core_pipeline::tonemapping_lut_bindings::{ + dt_lut_texture, + dt_lut_sampler, +} -@group(0) @binding(0) var screenTexture: texture_2d; -@group(0) @binding(1) var samp: sampler; -@group(0) @binding(2) var uniforms: CASUniforms; +// Half the size of the crossfade region between shadows and midtones and +// between midtones and highlights. This value, 0.1, corresponds to 10% of the +// gamut on either side of the cutoff point. +const LEVEL_MARGIN: f32 = 0.1; -// This is set at the limit of providing unnatural results for sharpening. -const FSR_RCAS_LIMIT = 0.1875; -// -4.0 instead of -1.0 to avoid issues with MSAA. -const peakC = vec2(10.0, -40.0); +// The inverse reciprocal of twice the above, used when scaling the midtone +// region. +const LEVEL_MARGIN_DIV: f32 = 0.5 / LEVEL_MARGIN; -// Robust Contrast Adaptive Sharpening (RCAS) -// Based on the following implementation: -// https://github.com/GPUOpen-Effects/FidelityFX-FSR2/blob/ea97a113b0f9cadf519fbcff315cc539915a3acd/src/ffx-fsr2-api/shaders/ffx_fsr1.h#L672 -// RCAS is based on the following logic. -// RCAS uses a 5 tap filter in a cross pattern (same as CAS), -// W b -// W 1 W for taps d e f -// W h -// Where 'W' is the negative lobe weight. -// output = (W*(b+d+f+h)+e)/(4*W+1) -// RCAS solves for 'W' by seeing where the signal might clip out of the {0 to 1} input range, -// 0 == (W*(b+d+f+h)+e)/(4*W+1) -> W = -e/(b+d+f+h) -// 1 == (W*(b+d+f+h)+e)/(4*W+1) -> W = (1-e)/(b+d+f+h-4) -// Then chooses the 'W' which results in no clipping, limits 'W', and multiplies by the 'sharp' amount. -// This solution above has issues with MSAA input as the steps along the gradient cause edge detection issues. -// So RCAS uses 4x the maximum and 4x the minimum (depending on equation)in place of the individual taps. -// As well as switching from 'e' to either the minimum or maximum (depending on side), to help in energy conservation. -// This stabilizes RCAS. -// RCAS does a simple highpass which is normalized against the local contrast then shaped, -// 0.25 -// 0.25 -1 0.25 -// 0.25 -// This is used as a noise detection filter, to reduce the effect of RCAS on grain, and focus on real edges. -// The CAS node runs after tonemapping, so the input will be in the range of 0 to 1. -@fragment -fn fragment(in: FullscreenVertexOutput) -> @location(0) vec4 { - // Algorithm uses minimal 3x3 pixel neighborhood. - // b - // d e f - // h - let b = textureSample(screenTexture, samp, in.uv, vec2(0, -1)).rgb; - let d = textureSample(screenTexture, samp, in.uv, vec2(-1, 0)).rgb; - // We need the alpha value of the pixel we're working on for the output - let e = textureSample(screenTexture, samp, in.uv).rgbw; - let f = textureSample(screenTexture, samp, in.uv, vec2(1, 0)).rgb; - let h = textureSample(screenTexture, samp, in.uv, vec2(0, 1)).rgb; - // Min and max of ring. - let mn4 = min(min(b, d), min(f, h)); - let mx4 = max(max(b, d), max(f, h)); - // Limiters - // 4.0 to avoid issues with MSAA. - let hitMin = mn4 / (4.0 * mx4); - let hitMax = (peakC.x - mx4) / (peakC.y + 4.0 * mn4); - let lobeRGB = max(-hitMin, hitMax); - var lobe = max(-FSR_RCAS_LIMIT, min(0.0, max(lobeRGB.r, max(lobeRGB.g, lobeRGB.b)))) * uniforms.sharpness; -#ifdef RCAS_DENOISE - // Luma times 2. - let bL = b.b * 0.5 + (b.r * 0.5 + b.g); - let dL = d.b * 0.5 + (d.r * 0.5 + d.g); - let eL = e.b * 0.5 + (e.r * 0.5 + e.g); - let fL = f.b * 0.5 + (f.r * 0.5 + f.g); - let hL = h.b * 0.5 + (h.r * 0.5 + h.g); - // Noise detection. - var noise = 0.25 * bL + 0.25 * dL + 0.25 * fL + 0.25 * hL - eL;; - noise = saturate(abs(noise) / (max(max(bL, dL), max(fL, hL)) - min(min(bL, dL), min(fL, hL)))); - noise = 1.0 - 0.5 * noise; - // Apply noise removal. - lobe *= noise; +fn sample_current_lut(p: vec3) -> vec3 { + // Don't include code that will try to sample from LUTs if tonemap method doesn't require it + // Allows this file to be imported without necessarily needing the lut texture bindings +#ifdef TONEMAP_METHOD_AGX + return textureSampleLevel(dt_lut_texture, dt_lut_sampler, p, 0.0).rgb; +#else ifdef TONEMAP_METHOD_TONY_MC_MAPFACE + return textureSampleLevel(dt_lut_texture, dt_lut_sampler, p, 0.0).rgb; +#else ifdef TONEMAP_METHOD_BLENDER_FILMIC + return textureSampleLevel(dt_lut_texture, dt_lut_sampler, p, 0.0).rgb; +#else + return vec3(1.0, 0.0, 1.0); + #endif +} + +// -------------------------------------- +// --- SomewhatBoringDisplayTransform --- +// -------------------------------------- +// By Tomasz Stachowiak + +fn rgb_to_ycbcr(col: vec3) -> vec3 { + let m = mat3x3( + 0.2126, 0.7152, 0.0722, + -0.1146, -0.3854, 0.5, + 0.5, -0.4542, -0.0458 + ); + return col * m; +} + +fn ycbcr_to_rgb(col: vec3) -> vec3 { + let m = mat3x3( + 1.0, 0.0, 1.5748, + 1.0, -0.1873, -0.4681, + 1.0, 1.8556, 0.0 + ); + return max(vec3(0.0), col * m); +} + +fn tonemap_curve(v: f32) -> f32 { +#ifdef 0 + // Large linear part in the lows, but compresses highs. + float c = v + v * v + 0.5 * v * v * v; + return c / (1.0 + c); +#else + return 1.0 - exp(-v); #endif - return vec4((lobe * b + lobe * d + lobe * f + lobe * h + e.rgb) / (4.0 * lobe + 1.0), e.w); } -``` +fn tonemap_curve3_(v: vec3) -> vec3 { + return vec3(tonemap_curve(v.r), tonemap_curve(v.g), tonemap_curve(v.b)); +} -### crates/bevy_core_pipeline/src/deferred/copy_deferred_lighting_id +fn somewhat_boring_display_transform(col: vec3) -> vec3 { + var boring_color = col; + let ycbcr = rgb_to_ycbcr(boring_color); -```rust -#import bevy_core_pipeline::fullscreen_vertex_shader::FullscreenVertexOutput + let bt = tonemap_curve(length(ycbcr.yz) * 2.4); + var desat = max((bt - 0.7) * 0.8, 0.0); + desat *= desat; -@group(0) @binding(0) -var material_id_texture: texture_2d; + let desat_col = mix(boring_color.rgb, ycbcr.xxx, desat); -struct FragmentOutput { - @builtin(frag_depth) frag_depth: f32, + let tm_luma = tonemap_curve(ycbcr.x); + let tm0 = boring_color.rgb * max(0.0, tm_luma / max(1e-5, tonemapping_luminance(boring_color.rgb))); + let final_mult = 0.97; + let tm1 = tonemap_curve3_(desat_col); -} + boring_color = mix(tm0, tm1, bt * bt); -@fragment -fn fragment(in: FullscreenVertexOutput) -> FragmentOutput { - var out: FragmentOutput; - // Depth is stored as unorm, so we are dividing the u8 by 255.0 here. - out.frag_depth = f32(textureLoad(material_id_texture, vec2(in.position.xy), 0).x) / 255.0; - return out; + return boring_color * final_mult; } +// ------------------------------------------ +// ------------- Tony McMapface ------------- +// ------------------------------------------ +// By Tomasz Stachowiak +// https://github.com/h3r2tic/tony-mc-mapface -``` +const TONY_MC_MAPFACE_LUT_DIMS: f32 = 48.0; -### crates/bevy_core_pipeline/src/blit/blit +fn sample_tony_mc_mapface_lut(stimulus: vec3) -> vec3 { + var uv = (stimulus / (stimulus + 1.0)) * (f32(TONY_MC_MAPFACE_LUT_DIMS - 1.0) / f32(TONY_MC_MAPFACE_LUT_DIMS)) + 0.5 / f32(TONY_MC_MAPFACE_LUT_DIMS); + return sample_current_lut(saturate(uv)).rgb; +} -```rust -#import bevy_core_pipeline::fullscreen_vertex_shader::FullscreenVertexOutput +// --------------------------------- +// ---------- ACES Fitted ---------- +// --------------------------------- -@group(0) @binding(0) var in_texture: texture_2d; -@group(0) @binding(1) var in_sampler: sampler; +// Same base implementation that Godot 4.0 uses for Tonemap ACES. -@fragment -fn fs_main(in: FullscreenVertexOutput) -> @location(0) vec4 { - return textureSample(in_texture, in_sampler, in.uv); +// https://github.com/TheRealMJP/BakingLab/blob/master/BakingLab/ACES.hlsl + +// The code in this file was originally written by Stephen Hill (@self_shadow), who deserves all +// credit for coming up with this fit and implementing it. Buy him a beer next time you see him. :) + +fn RRTAndODTFit(v: vec3) -> vec3 { + let a = v * (v + 0.0245786) - 0.000090537; + let b = v * (0.983729 * v + 0.4329510) + 0.238081; + return a / b; } -``` +fn ACESFitted(color: vec3) -> vec3 { + var fitted_color = color; -### crates/bevy_render/src/globals + // sRGB => XYZ => D65_2_D60 => AP1 => RRT_SAT + let rgb_to_rrt = mat3x3( + vec3(0.59719, 0.35458, 0.04823), + vec3(0.07600, 0.90834, 0.01566), + vec3(0.02840, 0.13383, 0.83777) + ); -```rust -#define_import_path bevy_render::globals + // ODT_SAT => XYZ => D60_2_D65 => sRGB + let odt_to_rgb = mat3x3( + vec3(1.60475, -0.53108, -0.07367), + vec3(-0.10208, 1.10813, -0.00605), + vec3(-0.00327, -0.07276, 1.07602) + ); -struct Globals { - // The time since startup in seconds - // Wraps to 0 after 1 hour. - time: f32, - // The delta time since the previous frame in seconds - delta_time: f32, - // Frame count since the start of the app. - // It wraps to zero when it reaches the maximum value of a u32. - frame_count: u32, -#ifdef SIXTEEN_BYTE_ALIGNMENT - // WebGL2 structs must be 16 byte aligned. - _webgl2_padding: f32 -#endif -}; + fitted_color *= rgb_to_rrt; -``` + // Apply RRT and ODT + fitted_color = RRTAndODTFit(fitted_color); -### crates/bevy_render/src/maths + fitted_color *= odt_to_rgb; -```rust -#define_import_path bevy_render::maths + // Clamp to [0, 1] + fitted_color = saturate(fitted_color); -fn affine2_to_square(affine: mat3x2) -> mat3x3 { - return mat3x3( - vec3(affine[0].xy, 0.0), - vec3(affine[1].xy, 0.0), - vec3(affine[2].xy, 1.0), - ); + return fitted_color; } -fn affine3_to_square(affine: mat3x4) -> mat4x4 { - return transpose(mat4x4( - affine[0], - affine[1], - affine[2], - vec4(0.0, 0.0, 0.0, 1.0), - )); -} +// ------------------------------- +// ------------- AgX ------------- +// ------------------------------- +// By Troy Sobotka +// https://github.com/MrLixm/AgXc +// https://github.com/sobotka/AgX -fn mat2x4_f32_to_mat3x3_unpack( - a: mat2x4, - b: f32, -) -> mat3x3 { - return mat3x3( - a[0].xyz, - vec3(a[0].w, a[1].xy), - vec3(a[1].zw, b), - ); +/* + Increase color saturation of the given color data. + :param color: expected sRGB primaries input + :param saturationAmount: expected 0-1 range with 1=neutral, 0=no saturation. + -- ref[2] [4] +*/ +fn saturation(color: vec3, saturationAmount: f32) -> vec3 { + let luma = tonemapping_luminance(color); + return mix(vec3(luma), color, vec3(saturationAmount)); } -// Extracts the square portion of an affine matrix: i.e. discards the -// translation. -fn affine3_to_mat3x3(affine: mat4x3) -> mat3x3 { - return mat3x3(affine[0].xyz, affine[1].xyz, affine[2].xyz); -} +/* + Output log domain encoded data. + Similar to OCIO lg2 AllocationTransform. + ref[0] +*/ +fn convertOpenDomainToNormalizedLog2_(color: vec3, minimum_ev: f32, maximum_ev: f32) -> vec3 { + let in_midgray = 0.18; -// Returns the inverse of a 3x3 matrix. -fn inverse_mat3x3(matrix: mat3x3) -> mat3x3 { - let tmp0 = cross(matrix[1], matrix[2]); - let tmp1 = cross(matrix[2], matrix[0]); - let tmp2 = cross(matrix[0], matrix[1]); - let inv_det = 1.0 / dot(matrix[2], tmp2); - return transpose(mat3x3(tmp0 * inv_det, tmp1 * inv_det, tmp2 * inv_det)); -} + // remove negative before log transform + var normalized_color = max(vec3(0.0), color); + // avoid infinite issue with log -- ref[1] + normalized_color = select(normalized_color, 0.00001525878 + normalized_color, normalized_color < vec3(0.00003051757)); + normalized_color = clamp( + log2(normalized_color / in_midgray), + vec3(minimum_ev), + vec3(maximum_ev) + ); + let total_exposure = maximum_ev - minimum_ev; -// Returns the inverse of an affine matrix. -// -// https://en.wikipedia.org/wiki/Affine_transformation#Groups -fn inverse_affine3(affine: mat4x3) -> mat4x3 { - let matrix3 = affine3_to_mat3x3(affine); - let inv_matrix3 = inverse_mat3x3(matrix3); - return mat4x3(inv_matrix3[0], inv_matrix3[1], inv_matrix3[2], -(inv_matrix3 * affine[3])); + return (normalized_color - minimum_ev) / total_exposure; } -// Creates an orthonormal basis given a Z vector and an up vector (which becomes -// Y after orthonormalization). -// -// The results are equivalent to the Gram-Schmidt process [1]. -// -// [1]: https://math.stackexchange.com/a/1849294 -fn orthonormalize(z_unnormalized: vec3, up: vec3) -> mat3x3 { - let z_basis = normalize(z_unnormalized); - let x_basis = normalize(cross(z_basis, up)); - let y_basis = cross(z_basis, x_basis); - return mat3x3(x_basis, y_basis, z_basis); -} +// Inverse of above +fn convertNormalizedLog2ToOpenDomain(color: vec3, minimum_ev: f32, maximum_ev: f32) -> vec3 { + var open_color = color; + let in_midgray = 0.18; + let total_exposure = maximum_ev - minimum_ev; -``` + open_color = (open_color * total_exposure) + minimum_ev; + open_color = pow(vec3(2.0), open_color); + open_color = open_color * in_midgray; -### crates/bevy_render/src/view/view + return open_color; +} -```rust -#define_import_path bevy_render::view -struct ColorGrading { - exposure: f32, - gamma: f32, - pre_saturation: f32, - post_saturation: f32, -} +/*================= + Main processes +=================*/ -struct View { - view_proj: mat4x4, - unjittered_view_proj: mat4x4, - inverse_view_proj: mat4x4, - view: mat4x4, - inverse_view: mat4x4, - projection: mat4x4, - inverse_projection: mat4x4, - world_position: vec3, - exposure: f32, - // viewport(x_origin, y_origin, width, height) - viewport: vec4, - frustum: array, 6>, - color_grading: ColorGrading, - mip_bias: f32, - render_layers: u32, -}; +// Prepare the data for display encoding. Converted to log domain. +fn applyAgXLog(Image: vec3) -> vec3 { + var prepared_image = max(vec3(0.0), Image); // clamp negatives + let r = dot(prepared_image, vec3(0.84247906, 0.0784336, 0.07922375)); + let g = dot(prepared_image, vec3(0.04232824, 0.87846864, 0.07916613)); + let b = dot(prepared_image, vec3(0.04237565, 0.0784336, 0.87914297)); + prepared_image = vec3(r, g, b); -``` + prepared_image = convertOpenDomainToNormalizedLog2_(prepared_image, -10.0, 6.5); -### crates/bevy_render/src/view/window/screenshot + prepared_image = clamp(prepared_image, vec3(0.0), vec3(1.0)); + return prepared_image; +} -```rust -// This vertex shader will create a triangle that will cover the entire screen -// with minimal effort, avoiding the need for a vertex buffer etc. -@vertex -fn vs_main(@builtin(vertex_index) in_vertex_index: u32) -> @builtin(position) vec4 { - let x = f32((in_vertex_index & 1u) << 2u); - let y = f32((in_vertex_index & 2u) << 1u); - return vec4(x - 1.0, y - 1.0, 0.0, 1.0); +fn applyLUT3D(Image: vec3, block_size: f32) -> vec3 { + return sample_current_lut(Image * ((block_size - 1.0) / block_size) + 0.5 / block_size).rgb; } -@group(0) @binding(0) var t: texture_2d; +// ------------------------- +// ------------------------- +// ------------------------- -@fragment -fn fs_main(@builtin(position) pos: vec4) -> @location(0) vec4 { - let coords = floor(pos.xy); - return textureLoad(t, vec2(coords), 0i); +fn sample_blender_filmic_lut(stimulus: vec3) -> vec3 { + let block_size = 64.0; + let normalized = saturate(convertOpenDomainToNormalizedLog2_(stimulus, -11.0, 12.0)); + return applyLUT3D(normalized, block_size); } -``` +// from https://64.github.io/tonemapping/ +// reinhard on RGB oversaturates colors +fn tonemapping_reinhard(color: vec3) -> vec3 { + return color / (1.0 + color); +} -### crates/bevy_pbr/src/render/pbr_functions +fn tonemapping_reinhard_extended(color: vec3, max_white: f32) -> vec3 { + let numerator = color * (1.0 + (color / vec3(max_white * max_white))); + return numerator / (1.0 + color); +} -```rust -#define_import_path bevy_pbr::pbr_functions +// luminance coefficients from Rec. 709. +// https://en.wikipedia.org/wiki/Rec._709 +fn tonemapping_luminance(v: vec3) -> f32 { + return dot(v, vec3(0.2126, 0.7152, 0.0722)); +} -#import bevy_pbr::{ - pbr_types, - pbr_bindings, - mesh_view_bindings as view_bindings, - mesh_view_types, - lighting, - transmission, - clustered_forward as clustering, - shadows, - ambient, - irradiance_volume, - mesh_types::{MESH_FLAGS_SHADOW_RECEIVER_BIT, MESH_FLAGS_TRANSMITTED_SHADOW_RECEIVER_BIT}, - utils::E, +fn tonemapping_change_luminance(c_in: vec3, l_out: f32) -> vec3 { + let l_in = tonemapping_luminance(c_in); + return c_in * (l_out / l_in); } -#ifdef ENVIRONMENT_MAP -#import bevy_pbr::environment_map -#endif +fn tonemapping_reinhard_luminance(color: vec3) -> vec3 { + let l_old = tonemapping_luminance(color); + let l_new = l_old / (1.0 + l_old); + return tonemapping_change_luminance(color, l_new); +} -#import bevy_core_pipeline::tonemapping::{screen_space_dither, powsafe, tone_mapping} +fn rgb_to_srgb_simple(color: vec3) -> vec3 { + return pow(color, vec3(1.0 / 2.2)); +} -fn alpha_discard(material: pbr_types::StandardMaterial, output_color: vec4) -> vec4 { - var color = output_color; - let alpha_mode = material.flags & pbr_types::STANDARD_MATERIAL_FLAGS_ALPHA_MODE_RESERVED_BITS; - if alpha_mode == pbr_types::STANDARD_MATERIAL_FLAGS_ALPHA_MODE_OPAQUE { - // NOTE: If rendering as opaque, alpha should be ignored so set to 1.0 - color.a = 1.0; - } +// Source: Advanced VR Rendering, GDC 2015, Alex Vlachos, Valve, Slide 49 +// https://media.steampowered.com/apps/valve/2015/Alex_Vlachos_Advanced_VR_Rendering_GDC2015.pdf +fn screen_space_dither(frag_coord: vec2) -> vec3 { + var dither = vec3(dot(vec2(171.0, 231.0), frag_coord)).xxx; + dither = fract(dither.rgb / vec3(103.0, 71.0, 97.0)); + return (dither - 0.5) / 255.0; +} -#ifdef MAY_DISCARD - // NOTE: `MAY_DISCARD` is only defined in the alpha to coverage case if MSAA - // was off. This special situation causes alpha to coverage to fall back to - // alpha mask. - else if alpha_mode == pbr_types::STANDARD_MATERIAL_FLAGS_ALPHA_MODE_MASK || - alpha_mode == pbr_types::STANDARD_MATERIAL_FLAGS_ALPHA_MODE_ALPHA_TO_COVERAGE { - if color.a >= material.alpha_cutoff { - // NOTE: If rendering as masked alpha and >= the cutoff, render as fully opaque - color.a = 1.0; - } else { - // NOTE: output_color.a < in.material.alpha_cutoff should not be rendered - discard; - } +// Performs the "sectional" color grading: i.e. the color grading that applies +// individually to shadows, midtones, and highlights. +fn sectional_color_grading( + in: vec3, + color_grading: ptr, +) -> vec3 { + var color = in; + + // Determine whether the color is a shadow, midtone, or highlight. Colors + // close to the edges are considered a mix of both, to avoid sharp + // discontinuities. The formulas are taken from Blender's compositor. + + let level = (color.r + color.g + color.b) / 3.0; + + // Determine whether this color is a shadow, midtone, or highlight. If close + // to the cutoff points, blend between the two to avoid sharp color + // discontinuities. + var levels = vec3(0.0); + let midtone_range = (*color_grading).midtone_range; + if (level < midtone_range.x - LEVEL_MARGIN) { + levels.x = 1.0; + } else if (level < midtone_range.x + LEVEL_MARGIN) { + levels.y = ((level - midtone_range.x) * LEVEL_MARGIN_DIV) + 0.5; + levels.z = 1.0 - levels.y; + } else if (level < midtone_range.y - LEVEL_MARGIN) { + levels.y = 1.0; + } else if (level < midtone_range.y + LEVEL_MARGIN) { + levels.z = ((level - midtone_range.y) * LEVEL_MARGIN_DIV) + 0.5; + levels.y = 1.0 - levels.z; + } else { + levels.z = 1.0; } -#endif - return color; -} + // Calculate contrast/saturation/gamma/gain/lift. + let contrast = dot(levels, (*color_grading).contrast); + let saturation = dot(levels, (*color_grading).saturation); + let gamma = dot(levels, (*color_grading).gamma); + let gain = dot(levels, (*color_grading).gain); + let lift = dot(levels, (*color_grading).lift); -fn prepare_world_normal( - world_normal: vec3, - double_sided: bool, - is_front: bool, -) -> vec3 { - var output: vec3 = world_normal; -#ifndef VERTEX_TANGENTS -#ifndef STANDARD_MATERIAL_NORMAL_MAP - // NOTE: When NOT using normal-mapping, if looking at the back face of a double-sided - // material, the normal needs to be inverted. This is a branchless version of that. - output = (f32(!double_sided || is_front) * 2.0 - 1.0) * output; -#endif -#endif - return output; + // Adjust saturation and contrast. + let luma = tonemapping_luminance(color); + color = luma + saturation * (color - luma); + color = 0.5 + (color - 0.5) * contrast; + + // The [ASC CDL] formula for color correction. Given *i*, an input color, we + // have: + // + // out = (i × s + o)ⁿ + // + // Following the normal photographic naming convention, *gain* is the *s* + // factor, *lift* is the *o* term, and the inverse of *gamma* is the *n* + // exponent. + // + // [ASC CDL]: https://en.wikipedia.org/wiki/ASC_CDL#Combined_Function + color = powsafe(color * gain + lift, 1.0 / gamma); + + // Account for exposure. + color = color * powsafe(vec3(2.0), (*color_grading).exposure); + return max(color, vec3(0.0)); } -fn apply_normal_mapping( - standard_material_flags: u32, - world_normal: vec3, - double_sided: bool, - is_front: bool, -#ifdef VERTEX_TANGENTS -#ifdef STANDARD_MATERIAL_NORMAL_MAP - world_tangent: vec4, -#endif -#endif -#ifdef VERTEX_UVS - uv: vec2, -#endif - mip_bias: f32, -#ifdef MESHLET_MESH_MATERIAL_PASS - ddx_uv: vec2, - ddy_uv: vec2, +fn tone_mapping(in: vec4, in_color_grading: ColorGrading) -> vec4 { + var color = max(in.rgb, vec3(0.0)); + var color_grading = in_color_grading; // So we can take pointers to it. + + // Rotate hue if needed, by converting to and from HSV. Remember that hue is + // an angle, so it needs to be modulo 2π. +#ifdef HUE_ROTATE + var hsv = rgb_to_hsv(color); + hsv.r = (hsv.r + color_grading.hue) % PI_2; + color = hsv_to_rgb(hsv); #endif -) -> vec3 { - // NOTE: The mikktspace method of normal mapping explicitly requires that the world normal NOT - // be re-normalized in the fragment shader. This is primarily to match the way mikktspace - // bakes vertex tangents and normal maps so that this is the exact inverse. Blender, Unity, - // Unreal Engine, Godot, and more all use the mikktspace method. Do not change this code - // unless you really know what you are doing. - // http://www.mikktspace.com/ - var N: vec3 = world_normal; -#ifdef VERTEX_TANGENTS -#ifdef STANDARD_MATERIAL_NORMAL_MAP - // NOTE: The mikktspace method of normal mapping explicitly requires that these NOT be - // normalized nor any Gram-Schmidt applied to ensure the vertex normal is orthogonal to the - // vertex tangent! Do not change this code unless you really know what you are doing. - // http://www.mikktspace.com/ - var T: vec3 = world_tangent.xyz; - var B: vec3 = world_tangent.w * cross(N, T); -#endif + // Perform white balance correction. Conveniently, this is a linear + // transform. The matrix was pre-calculated from the temperature and tint + // values on the CPU. +#ifdef WHITE_BALANCE + color = max(color_grading.balance * color, vec3(0.0)); #endif -#ifdef VERTEX_TANGENTS -#ifdef VERTEX_UVS -#ifdef STANDARD_MATERIAL_NORMAL_MAP - // Nt is the tangent-space normal. -#ifdef MESHLET_MESH_MATERIAL_PASS - var Nt = textureSampleGrad(pbr_bindings::normal_map_texture, pbr_bindings::normal_map_sampler, uv, ddx_uv, ddy_uv).rgb; + // Perform the "sectional" color grading: i.e. the color grading that + // applies individually to shadows, midtones, and highlights. +#ifdef SECTIONAL_COLOR_GRADING + color = sectional_color_grading(color, &color_grading); #else - var Nt = textureSampleBias(pbr_bindings::normal_map_texture, pbr_bindings::normal_map_sampler, uv, mip_bias).rgb; + // If we're not doing sectional color grading, the exposure might still need + // to be applied, for example when using auto exposure. + color = color * powsafe(vec3(2.0), color_grading.exposure); #endif - if (standard_material_flags & pbr_types::STANDARD_MATERIAL_FLAGS_TWO_COMPONENT_NORMAL_MAP) != 0u { - // Only use the xy components and derive z for 2-component normal maps. - Nt = vec3(Nt.rg * 2.0 - 1.0, 0.0); - Nt.z = sqrt(1.0 - Nt.x * Nt.x - Nt.y * Nt.y); - } else { - Nt = Nt * 2.0 - 1.0; - } - // Normal maps authored for DirectX require flipping the y component - if (standard_material_flags & pbr_types::STANDARD_MATERIAL_FLAGS_FLIP_NORMAL_MAP_Y) != 0u { - Nt.y = -Nt.y; - } - if double_sided && !is_front { - Nt = -Nt; - } - - // NOTE: The mikktspace method of normal mapping applies maps the tangent-space normal from - // the normal map texture in this way to be an EXACT inverse of how the normal map baker - // calculates the normal maps so there is no error introduced. Do not change this code - // unless you really know what you are doing. - // http://www.mikktspace.com/ - N = Nt.x * T + Nt.y * B + Nt.z * N; -#endif -#endif + // tone_mapping +#ifdef TONEMAP_METHOD_NONE + color = color; +#else ifdef TONEMAP_METHOD_REINHARD + color = tonemapping_reinhard(color.rgb); +#else ifdef TONEMAP_METHOD_REINHARD_LUMINANCE + color = tonemapping_reinhard_luminance(color.rgb); +#else ifdef TONEMAP_METHOD_ACES_FITTED + color = ACESFitted(color.rgb); +#else ifdef TONEMAP_METHOD_AGX + color = applyAgXLog(color); + color = applyLUT3D(color, 32.0); +#else ifdef TONEMAP_METHOD_SOMEWHAT_BORING_DISPLAY_TRANSFORM + color = somewhat_boring_display_transform(color.rgb); +#else ifdef TONEMAP_METHOD_TONY_MC_MAPFACE + color = sample_tony_mc_mapface_lut(color); +#else ifdef TONEMAP_METHOD_BLENDER_FILMIC + color = sample_blender_filmic_lut(color.rgb); #endif - return normalize(N); + // Perceptual post tonemapping grading + color = saturation(color, color_grading.post_saturation); + + return vec4(color, in.a); } -// NOTE: Correctly calculates the view vector depending on whether -// the projection is orthographic or perspective. -fn calculate_view( - world_position: vec4, - is_orthographic: bool, -) -> vec3 { - var V: vec3; - if is_orthographic { - // Orthographic view vector - V = normalize(vec3(view_bindings::view.view_proj[0].z, view_bindings::view.view_proj[1].z, view_bindings::view.view_proj[2].z)); - } else { - // Only valid for a perspective projection - V = normalize(view_bindings::view.world_position.xyz - world_position.xyz); - } - return V; +// This is an **incredibly crude** approximation of the inverse of the tone mapping function. +// We assume here that there's a simple linear relationship between the input and output +// which is not true at all, but useful to at least preserve the overall luminance of colors +// when sampling from an already tonemapped image. (e.g. for transmissive materials when HDR is off) +fn approximate_inverse_tone_mapping(in: vec4, color_grading: ColorGrading) -> vec4 { + let out = tone_mapping(in, color_grading); + let approximate_ratio = length(in.rgb) / length(out.rgb); + return vec4(in.rgb * approximate_ratio, in.a); } -#ifndef PREPASS_FRAGMENT -fn apply_pbr_lighting( - in: pbr_types::PbrInput, -) -> vec4 { - var output_color: vec4 = in.material.base_color; +``` - // TODO use .a for exposure compensation in HDR - let emissive = in.material.emissive; +### bevy_shaders/line_material - // calculate non-linear roughness from linear perceptualRoughness - let metallic = in.material.metallic; - let perceptual_roughness = in.material.perceptual_roughness; - let roughness = lighting::perceptualRoughnessToRoughness(perceptual_roughness); - let ior = in.material.ior; - let thickness = in.material.thickness; - let diffuse_transmission = in.material.diffuse_transmission; - let specular_transmission = in.material.specular_transmission; +```rust +#import bevy_pbr::forward_io::VertexOutput + +struct LineMaterial { + color: vec4, +}; + +@group(2) @binding(0) var material: LineMaterial; + +@fragment +fn fragment( + mesh: VertexOutput, +) -> @location(0) vec4 { + return material.color; +} + +``` + +### bevy_shaders/animate_shader + +```rust +// The time since startup data is in the globals binding which is part of the mesh_view_bindings import +#import bevy_pbr::{ + mesh_view_bindings::globals, + forward_io::VertexOutput, +} - let specular_transmissive_color = specular_transmission * in.material.base_color.rgb; +fn oklab_to_linear_srgb(c: vec3) -> vec3 { + let L = c.x; + let a = c.y; + let b = c.z; - let diffuse_occlusion = in.diffuse_occlusion; - let specular_occlusion = in.specular_occlusion; + let l_ = L + 0.3963377774 * a + 0.2158037573 * b; + let m_ = L - 0.1055613458 * a - 0.0638541728 * b; + let s_ = L - 0.0894841775 * a - 1.2914855480 * b; - // Neubelt and Pettineo 2013, "Crafting a Next-gen Material Pipeline for The Order: 1886" - let NdotV = max(dot(in.N, in.V), 0.0001); + let l = l_ * l_ * l_; + let m = m_ * m_ * m_; + let s = s_ * s_ * s_; - // Remapping [0,1] reflectance to F0 - // See https://google.github.io/filament/Filament.html#materialsystem/parameterization/remapping - let reflectance = in.material.reflectance; - let F0 = 0.16 * reflectance * reflectance * (1.0 - metallic) + output_color.rgb * metallic; + return vec3( + 4.0767416621 * l - 3.3077115913 * m + 0.2309699292 * s, + -1.2684380046 * l + 2.6097574011 * m - 0.3413193965 * s, + -0.0041960863 * l - 0.7034186147 * m + 1.7076147010 * s, + ); +} - // Diffuse strength is inversely related to metallicity, specular and diffuse transmission - let diffuse_color = output_color.rgb * (1.0 - metallic) * (1.0 - specular_transmission) * (1.0 - diffuse_transmission); +@fragment +fn fragment(in: VertexOutput) -> @location(0) vec4 { + let speed = 2.0; + // The globals binding contains various global values like time + // which is the time since startup in seconds + let t_1 = sin(globals.time * speed) * 0.5 + 0.5; + let t_2 = cos(globals.time * speed); - // Diffuse transmissive strength is inversely related to metallicity and specular transmission, but directly related to diffuse transmission - let diffuse_transmissive_color = output_color.rgb * (1.0 - metallic) * (1.0 - specular_transmission) * diffuse_transmission; + let distance_to_center = distance(in.uv, vec2(0.5)) * 1.4; - // Calculate the world position of the second Lambertian lobe used for diffuse transmission, by subtracting material thickness - let diffuse_transmissive_lobe_world_position = in.world_position - vec4(in.world_normal, 0.0) * thickness; + // blending is done in a perceptual color space: https://bottosson.github.io/posts/oklab/ + let red = vec3(0.627955, 0.224863, 0.125846); + let green = vec3(0.86644, -0.233887, 0.179498); + let blue = vec3(0.701674, 0.274566, -0.169156); + let white = vec3(1.0, 0.0, 0.0); + let mixed = mix(mix(red, blue, t_1), mix(green, white, t_2), distance_to_center); - let R = reflect(-in.V, in.N); + return vec4(oklab_to_linear_srgb(mixed), 1.0); +} - let f_ab = lighting::F_AB(perceptual_roughness, NdotV); +``` - var direct_light: vec3 = vec3(0.0); +### bevy_shaders/copy_deferred_lighting_id - // Transmitted Light (Specular and Diffuse) - var transmitted_light: vec3 = vec3(0.0); +```rust +#import bevy_core_pipeline::fullscreen_vertex_shader::FullscreenVertexOutput - let view_z = dot(vec4( - view_bindings::view.inverse_view[0].z, - view_bindings::view.inverse_view[1].z, - view_bindings::view.inverse_view[2].z, - view_bindings::view.inverse_view[3].z - ), in.world_position); - let cluster_index = clustering::fragment_cluster_index(in.frag_coord.xy, view_z, in.is_orthographic); - let offset_and_counts = clustering::unpack_offset_and_counts(cluster_index); +@group(0) @binding(0) +var material_id_texture: texture_2d; - // Point lights (direct) - for (var i: u32 = offset_and_counts[0]; i < offset_and_counts[0] + offset_and_counts[1]; i = i + 1u) { - let light_id = clustering::get_light_id(i); - var shadow: f32 = 1.0; - if ((in.flags & MESH_FLAGS_SHADOW_RECEIVER_BIT) != 0u - && (view_bindings::point_lights.data[light_id].flags & mesh_view_types::POINT_LIGHT_FLAGS_SHADOWS_ENABLED_BIT) != 0u) { - shadow = shadows::fetch_point_shadow(light_id, in.world_position, in.world_normal); - } - let light_contrib = lighting::point_light(in.world_position.xyz, light_id, roughness, NdotV, in.N, in.V, R, F0, f_ab, diffuse_color); - direct_light += light_contrib * shadow; +struct FragmentOutput { + @builtin(frag_depth) frag_depth: f32, -#ifdef STANDARD_MATERIAL_DIFFUSE_TRANSMISSION - // NOTE: We use the diffuse transmissive color, the second Lambertian lobe's calculated - // world position, inverted normal and view vectors, and the following simplified - // values for a fully diffuse transmitted light contribution approximation: - // - // roughness = 1.0; - // NdotV = 1.0; - // R = vec3(0.0) // doesn't really matter - // f_ab = vec2(0.1) - // F0 = vec3(0.0) - var transmitted_shadow: f32 = 1.0; - if ((in.flags & (MESH_FLAGS_SHADOW_RECEIVER_BIT | MESH_FLAGS_TRANSMITTED_SHADOW_RECEIVER_BIT)) == (MESH_FLAGS_SHADOW_RECEIVER_BIT | MESH_FLAGS_TRANSMITTED_SHADOW_RECEIVER_BIT) - && (view_bindings::point_lights.data[light_id].flags & mesh_view_types::POINT_LIGHT_FLAGS_SHADOWS_ENABLED_BIT) != 0u) { - transmitted_shadow = shadows::fetch_point_shadow(light_id, diffuse_transmissive_lobe_world_position, -in.world_normal); - } - let transmitted_light_contrib = lighting::point_light(diffuse_transmissive_lobe_world_position.xyz, light_id, 1.0, 1.0, -in.N, -in.V, vec3(0.0), vec3(0.0), vec2(0.1), diffuse_transmissive_color); - transmitted_light += transmitted_light_contrib * transmitted_shadow; -#endif - } +} - // Spot lights (direct) - for (var i: u32 = offset_and_counts[0] + offset_and_counts[1]; i < offset_and_counts[0] + offset_and_counts[1] + offset_and_counts[2]; i = i + 1u) { - let light_id = clustering::get_light_id(i); +@fragment +fn fragment(in: FullscreenVertexOutput) -> FragmentOutput { + var out: FragmentOutput; + // Depth is stored as unorm, so we are dividing the u8 by 255.0 here. + out.frag_depth = f32(textureLoad(material_id_texture, vec2(in.position.xy), 0).x) / 255.0; + return out; +} - var shadow: f32 = 1.0; - if ((in.flags & MESH_FLAGS_SHADOW_RECEIVER_BIT) != 0u - && (view_bindings::point_lights.data[light_id].flags & mesh_view_types::POINT_LIGHT_FLAGS_SHADOWS_ENABLED_BIT) != 0u) { - shadow = shadows::fetch_spot_shadow(light_id, in.world_position, in.world_normal); - } - let light_contrib = lighting::spot_light(in.world_position.xyz, light_id, roughness, NdotV, in.N, in.V, R, F0, f_ab, diffuse_color); - direct_light += light_contrib * shadow; -#ifdef STANDARD_MATERIAL_DIFFUSE_TRANSMISSION - // NOTE: We use the diffuse transmissive color, the second Lambertian lobe's calculated - // world position, inverted normal and view vectors, and the following simplified - // values for a fully diffuse transmitted light contribution approximation: - // - // roughness = 1.0; - // NdotV = 1.0; - // R = vec3(0.0) // doesn't really matter - // f_ab = vec2(0.1) - // F0 = vec3(0.0) - var transmitted_shadow: f32 = 1.0; - if ((in.flags & (MESH_FLAGS_SHADOW_RECEIVER_BIT | MESH_FLAGS_TRANSMITTED_SHADOW_RECEIVER_BIT)) == (MESH_FLAGS_SHADOW_RECEIVER_BIT | MESH_FLAGS_TRANSMITTED_SHADOW_RECEIVER_BIT) - && (view_bindings::point_lights.data[light_id].flags & mesh_view_types::POINT_LIGHT_FLAGS_SHADOWS_ENABLED_BIT) != 0u) { - transmitted_shadow = shadows::fetch_spot_shadow(light_id, diffuse_transmissive_lobe_world_position, -in.world_normal); - } - let transmitted_light_contrib = lighting::spot_light(diffuse_transmissive_lobe_world_position.xyz, light_id, 1.0, 1.0, -in.N, -in.V, vec3(0.0), vec3(0.0), vec2(0.1), diffuse_transmissive_color); - transmitted_light += transmitted_light_contrib * transmitted_shadow; -#endif - } +``` - // directional lights (direct) - let n_directional_lights = view_bindings::lights.n_directional_lights; - for (var i: u32 = 0u; i < n_directional_lights; i = i + 1u) { - // check the directional light render layers intersect the view render layers - // note this is not necessary for point and spot lights, as the relevant lights are filtered in `assign_lights_to_clusters` - let light = &view_bindings::lights.directional_lights[i]; - if ((*light).render_layers & view_bindings::view.render_layers) == 0u { - continue; - } +### bevy_shaders/bloom - var shadow: f32 = 1.0; - if ((in.flags & MESH_FLAGS_SHADOW_RECEIVER_BIT) != 0u - && (view_bindings::lights.directional_lights[i].flags & mesh_view_types::DIRECTIONAL_LIGHT_FLAGS_SHADOWS_ENABLED_BIT) != 0u) { - shadow = shadows::fetch_directional_shadow(i, in.world_position, in.world_normal, view_z); - } - var light_contrib = lighting::directional_light(i, roughness, NdotV, in.N, in.V, R, F0, f_ab, diffuse_color); -#ifdef DIRECTIONAL_LIGHT_SHADOW_MAP_DEBUG_CASCADES - light_contrib = shadows::cascade_debug_visualization(light_contrib, i, view_z); -#endif - direct_light += light_contrib * shadow; +```rust +// Bloom works by creating an intermediate texture with a bunch of mip levels, each half the size of the previous. +// You then downsample each mip (starting with the original texture) to the lower resolution mip under it, going in order. +// You then upsample each mip (starting from the smallest mip) and blend with the higher resolution mip above it (ending on the original texture). +// +// References: +// * [COD] - Next Generation Post Processing in Call of Duty - http://www.iryoku.com/next-generation-post-processing-in-call-of-duty-advanced-warfare +// * [PBB] - Physically Based Bloom - https://learnopengl.com/Guest-Articles/2022/Phys.-Based-Bloom -#ifdef STANDARD_MATERIAL_DIFFUSE_TRANSMISSION - // NOTE: We use the diffuse transmissive color, the second Lambertian lobe's calculated - // world position, inverted normal and view vectors, and the following simplified - // values for a fully diffuse transmitted light contribution approximation: - // - // roughness = 1.0; - // NdotV = 1.0; - // R = vec3(0.0) // doesn't really matter - // f_ab = vec2(0.1) - // F0 = vec3(0.0) - var transmitted_shadow: f32 = 1.0; - if ((in.flags & (MESH_FLAGS_SHADOW_RECEIVER_BIT | MESH_FLAGS_TRANSMITTED_SHADOW_RECEIVER_BIT)) == (MESH_FLAGS_SHADOW_RECEIVER_BIT | MESH_FLAGS_TRANSMITTED_SHADOW_RECEIVER_BIT) - && (view_bindings::lights.directional_lights[i].flags & mesh_view_types::DIRECTIONAL_LIGHT_FLAGS_SHADOWS_ENABLED_BIT) != 0u) { - transmitted_shadow = shadows::fetch_directional_shadow(i, diffuse_transmissive_lobe_world_position, -in.world_normal, view_z); - } - let transmitted_light_contrib = lighting::directional_light(i, 1.0, 1.0, -in.N, -in.V, vec3(0.0), vec3(0.0), vec2(0.1), diffuse_transmissive_color); - transmitted_light += transmitted_light_contrib * transmitted_shadow; -#endif - } +struct BloomUniforms { + threshold_precomputations: vec4, + viewport: vec4, + aspect: f32, +}; - var indirect_light = vec3(0.0f); +@group(0) @binding(0) var input_texture: texture_2d; +@group(0) @binding(1) var s: sampler; -#ifdef STANDARD_MATERIAL_DIFFUSE_TRANSMISSION - // NOTE: We use the diffuse transmissive color, the second Lambertian lobe's calculated - // world position, inverted normal and view vectors, and the following simplified - // values for a fully diffuse transmitted light contribution approximation: - // - // perceptual_roughness = 1.0; - // NdotV = 1.0; - // F0 = vec3(0.0) - // diffuse_occlusion = vec3(1.0) - transmitted_light += ambient::ambient_light(diffuse_transmissive_lobe_world_position, -in.N, -in.V, 1.0, diffuse_transmissive_color, vec3(0.0), 1.0, vec3(1.0)); +@group(0) @binding(2) var uniforms: BloomUniforms; + +#ifdef FIRST_DOWNSAMPLE +// https://catlikecoding.com/unity/tutorials/advanced-rendering/bloom/#3.4 +fn soft_threshold(color: vec3) -> vec3 { + let brightness = max(color.r, max(color.g, color.b)); + var softness = brightness - uniforms.threshold_precomputations.y; + softness = clamp(softness, 0.0, uniforms.threshold_precomputations.z); + softness = softness * softness * uniforms.threshold_precomputations.w; + var contribution = max(brightness - uniforms.threshold_precomputations.x, softness); + contribution /= max(brightness, 0.00001); // Prevent division by 0 + return color * contribution; +} #endif - // Diffuse indirect lighting can come from a variety of sources. The - // priority goes like this: - // - // 1. Lightmap (highest) - // 2. Irradiance volume - // 3. Environment map (lowest) - // - // When we find a source of diffuse indirect lighting, we stop accumulating - // any more diffuse indirect light. This avoids double-counting if, for - // example, both lightmaps and irradiance volumes are present. +// luminance coefficients from Rec. 709. +// https://en.wikipedia.org/wiki/Rec._709 +fn tonemapping_luminance(v: vec3) -> f32 { + return dot(v, vec3(0.2126, 0.7152, 0.0722)); +} -#ifdef LIGHTMAP - if (all(indirect_light == vec3(0.0f))) { - indirect_light += in.lightmap_light * diffuse_color; - } -#endif +fn rgb_to_srgb_simple(color: vec3) -> vec3 { + return pow(color, vec3(1.0 / 2.2)); +} -#ifdef IRRADIANCE_VOLUME { - // Irradiance volume light (indirect) - if (all(indirect_light == vec3(0.0f))) { - let irradiance_volume_light = irradiance_volume::irradiance_volume_light( - in.world_position.xyz, in.N); - indirect_light += irradiance_volume_light * diffuse_color * diffuse_occlusion; - } -#endif +// http://graphicrants.blogspot.com/2013/12/tone-mapping.html +fn karis_average(color: vec3) -> f32 { + // Luminance calculated by gamma-correcting linear RGB to non-linear sRGB using pow(color, 1.0 / 2.2) + // and then calculating luminance based on Rec. 709 color primaries. + let luma = tonemapping_luminance(rgb_to_srgb_simple(color)) / 4.0; + return 1.0 / (1.0 + luma); +} + +// [COD] slide 153 +fn sample_input_13_tap(uv: vec2) -> vec3 { + let a = textureSample(input_texture, s, uv, vec2(-2, 2)).rgb; + let b = textureSample(input_texture, s, uv, vec2(0, 2)).rgb; + let c = textureSample(input_texture, s, uv, vec2(2, 2)).rgb; + let d = textureSample(input_texture, s, uv, vec2(-2, 0)).rgb; + let e = textureSample(input_texture, s, uv).rgb; + let f = textureSample(input_texture, s, uv, vec2(2, 0)).rgb; + let g = textureSample(input_texture, s, uv, vec2(-2, -2)).rgb; + let h = textureSample(input_texture, s, uv, vec2(0, -2)).rgb; + let i = textureSample(input_texture, s, uv, vec2(2, -2)).rgb; + let j = textureSample(input_texture, s, uv, vec2(-1, 1)).rgb; + let k = textureSample(input_texture, s, uv, vec2(1, 1)).rgb; + let l = textureSample(input_texture, s, uv, vec2(-1, -1)).rgb; + let m = textureSample(input_texture, s, uv, vec2(1, -1)).rgb; - // Environment map light (indirect) +#ifdef FIRST_DOWNSAMPLE + // [COD] slide 168 // - // Note that up until this point, we have only accumulated diffuse light. - // This call is the first call that can accumulate specular light. -#ifdef ENVIRONMENT_MAP - let environment_light = environment_map::environment_map_light( - perceptual_roughness, - roughness, - diffuse_color, - NdotV, - f_ab, - in.N, - R, - F0, - in.world_position.xyz, - any(indirect_light != vec3(0.0f))); - - indirect_light += environment_light.diffuse * diffuse_occlusion + - environment_light.specular * specular_occlusion; + // The first downsample pass reads from the rendered frame which may exhibit + // 'fireflies' (individual very bright pixels) that should not cause the bloom effect. + // + // The first downsample uses a firefly-reduction method proposed by Brian Karis + // which takes a weighted-average of the samples to limit their luma range to [0, 1]. + // This implementation matches the LearnOpenGL article [PBB]. + var group0 = (a + b + d + e) * (0.125f / 4.0f); + var group1 = (b + c + e + f) * (0.125f / 4.0f); + var group2 = (d + e + g + h) * (0.125f / 4.0f); + var group3 = (e + f + h + i) * (0.125f / 4.0f); + var group4 = (j + k + l + m) * (0.5f / 4.0f); + group0 *= karis_average(group0); + group1 *= karis_average(group1); + group2 *= karis_average(group2); + group3 *= karis_average(group3); + group4 *= karis_average(group4); + return group0 + group1 + group2 + group3 + group4; +#else + var sample = (a + c + g + i) * 0.03125; + sample += (b + d + f + h) * 0.0625; + sample += (e + j + k + l + m) * 0.125; + return sample; +#endif +} - // we'll use the specular component of the transmitted environment - // light in the call to `specular_transmissive_light()` below - var specular_transmitted_environment_light = vec3(0.0); +// [COD] slide 162 +fn sample_input_3x3_tent(uv: vec2) -> vec3 { + // Radius. Empirically chosen by and tweaked from the LearnOpenGL article. + let x = 0.004 / uniforms.aspect; + let y = 0.004; -#ifdef STANDARD_MATERIAL_SPECULAR_OR_DIFFUSE_TRANSMISSION - // NOTE: We use the diffuse transmissive color, inverted normal and view vectors, - // and the following simplified values for the transmitted environment light contribution - // approximation: - // - // diffuse_color = vec3(1.0) // later we use `diffuse_transmissive_color` and `specular_transmissive_color` - // NdotV = 1.0; - // R = T // see definition below - // F0 = vec3(1.0) - // diffuse_occlusion = 1.0 - // - // (This one is slightly different from the other light types above, because the environment - // map light returns both diffuse and specular components separately, and we want to use both) + let a = textureSample(input_texture, s, vec2(uv.x - x, uv.y + y)).rgb; + let b = textureSample(input_texture, s, vec2(uv.x, uv.y + y)).rgb; + let c = textureSample(input_texture, s, vec2(uv.x + x, uv.y + y)).rgb; - let T = -normalize( - in.V + // start with view vector at entry point - refract(in.V, -in.N, 1.0 / ior) * thickness // add refracted vector scaled by thickness, towards exit point - ); // normalize to find exit point view vector + let d = textureSample(input_texture, s, vec2(uv.x - x, uv.y)).rgb; + let e = textureSample(input_texture, s, vec2(uv.x, uv.y)).rgb; + let f = textureSample(input_texture, s, vec2(uv.x + x, uv.y)).rgb; - let transmitted_environment_light = bevy_pbr::environment_map::environment_map_light( - perceptual_roughness, - roughness, - vec3(1.0), - 1.0, - f_ab, - -in.N, - T, - vec3(1.0), - in.world_position.xyz, - false); -#ifdef STANDARD_MATERIAL_DIFFUSE_TRANSMISSION - transmitted_light += transmitted_environment_light.diffuse * diffuse_transmissive_color; -#endif -#ifdef STANDARD_MATERIAL_SPECULAR_TRANSMISSION - specular_transmitted_environment_light = transmitted_environment_light.specular * specular_transmissive_color; -#endif -#endif // STANDARD_MATERIAL_SPECULAR_OR_DIFFUSE_TRANSMISSION -#else - // If there's no environment map light, there's no transmitted environment - // light specular component, so we can just hardcode it to zero. - let specular_transmitted_environment_light = vec3(0.0); -#endif + let g = textureSample(input_texture, s, vec2(uv.x - x, uv.y - y)).rgb; + let h = textureSample(input_texture, s, vec2(uv.x, uv.y - y)).rgb; + let i = textureSample(input_texture, s, vec2(uv.x + x, uv.y - y)).rgb; - // Ambient light (indirect) - indirect_light += ambient::ambient_light(in.world_position, in.N, in.V, NdotV, diffuse_color, F0, perceptual_roughness, diffuse_occlusion); + var sample = e * 0.25; + sample += (b + d + f + h) * 0.125; + sample += (a + c + g + i) * 0.0625; - let emissive_light = emissive.rgb * output_color.a; + return sample; +} -#ifdef STANDARD_MATERIAL_SPECULAR_TRANSMISSION - transmitted_light += transmission::specular_transmissive_light(in.world_position, in.frag_coord.xyz, view_z, in.N, in.V, F0, ior, thickness, perceptual_roughness, specular_transmissive_color, specular_transmitted_environment_light).rgb; +#ifdef FIRST_DOWNSAMPLE +@fragment +fn downsample_first(@location(0) output_uv: vec2) -> @location(0) vec4 { + let sample_uv = uniforms.viewport.xy + output_uv * uniforms.viewport.zw; + var sample = sample_input_13_tap(sample_uv); + // Lower bound of 0.0001 is to avoid propagating multiplying by 0.0 through the + // downscaling and upscaling which would result in black boxes. + // The upper bound is to prevent NaNs. + // with f32::MAX (E+38) Chrome fails with ":value 340282346999999984391321947108527833088.0 cannot be represented as 'f32'" + sample = clamp(sample, vec3(0.0001), vec3(3.40282347E+37)); - if (in.material.flags & pbr_types::STANDARD_MATERIAL_FLAGS_ATTENUATION_ENABLED_BIT) != 0u { - // We reuse the `atmospheric_fog()` function here, as it's fundamentally - // equivalent to the attenuation that takes place inside the material volume, - // and will allow us to eventually hook up subsurface scattering more easily - var attenuation_fog: mesh_view_types::Fog; - attenuation_fog.base_color.a = 1.0; - attenuation_fog.be = pow(1.0 - in.material.attenuation_color.rgb, vec3(E)) / in.material.attenuation_distance; - // TODO: Add the subsurface scattering factor below - // attenuation_fog.bi = /* ... */ - transmitted_light = bevy_pbr::fog::atmospheric_fog( - attenuation_fog, vec4(transmitted_light, 1.0), thickness, - vec3(0.0) // TODO: Pass in (pre-attenuated) scattered light contribution here - ).rgb; - } +#ifdef USE_THRESHOLD + sample = soft_threshold(sample); #endif - // Total light - output_color = vec4( - view_bindings::view.exposure * (transmitted_light + direct_light + indirect_light + emissive_light), - output_color.a - ); + return vec4(sample, 1.0); +} +#endif - output_color = clustering::cluster_debug_visualization( - output_color, - view_z, - in.is_orthographic, - offset_and_counts, - cluster_index, - ); +@fragment +fn downsample(@location(0) uv: vec2) -> @location(0) vec4 { + return vec4(sample_input_13_tap(uv), 1.0); +} - return output_color; +@fragment +fn upsample(@location(0) uv: vec2) -> @location(0) vec4 { + return vec4(sample_input_3x3_tent(uv), 1.0); } -#endif // PREPASS_FRAGMENT -fn apply_fog(fog_params: mesh_view_types::Fog, input_color: vec4, fragment_world_position: vec3, view_world_position: vec3) -> vec4 { - let view_to_world = fragment_world_position.xyz - view_world_position.xyz; +``` - // `length()` is used here instead of just `view_to_world.z` since that produces more - // high quality results, especially for denser/smaller fogs. we get a "curved" - // fog shape that remains consistent with camera rotation, instead of a "linear" - // fog shape that looks a bit fake - let distance = length(view_to_world); +### bevy_shaders/parallax_mapping - var scattering = vec3(0.0); - if fog_params.directional_light_color.a > 0.0 { - let view_to_world_normalized = view_to_world / distance; - let n_directional_lights = view_bindings::lights.n_directional_lights; - for (var i: u32 = 0u; i < n_directional_lights; i = i + 1u) { - let light = view_bindings::lights.directional_lights[i]; - scattering += pow( - max( - dot(view_to_world_normalized, light.direction_to_light), - 0.0 - ), - fog_params.directional_light_exponent - ) * light.color.rgb * view_bindings::view.exposure; - } - } +```rust +#define_import_path bevy_pbr::parallax_mapping - if fog_params.mode == mesh_view_types::FOG_MODE_LINEAR { - return bevy_pbr::fog::linear_fog(fog_params, input_color, distance, scattering); - } else if fog_params.mode == mesh_view_types::FOG_MODE_EXPONENTIAL { - return bevy_pbr::fog::exponential_fog(fog_params, input_color, distance, scattering); - } else if fog_params.mode == mesh_view_types::FOG_MODE_EXPONENTIAL_SQUARED { - return bevy_pbr::fog::exponential_squared_fog(fog_params, input_color, distance, scattering); - } else if fog_params.mode == mesh_view_types::FOG_MODE_ATMOSPHERIC { - return bevy_pbr::fog::atmospheric_fog(fog_params, input_color, distance, scattering); - } else { - return input_color; - } +#import bevy_pbr::pbr_bindings::{depth_map_texture, depth_map_sampler} + +fn sample_depth_map(uv: vec2) -> f32 { + // We use `textureSampleLevel` over `textureSample` because the wgpu DX12 + // backend (Fxc) panics when using "gradient instructions" inside a loop. + // It results in the whole loop being unrolled by the shader compiler, + // which it can't do because the upper limit of the loop in steep parallax + // mapping is a variable set by the user. + // The "gradient instructions" comes from `textureSample` computing MIP level + // based on UV derivative. With `textureSampleLevel`, we provide ourselves + // the MIP level, so no gradient instructions are used, and we can use + // sample_depth_map in our loop. + // See https://stackoverflow.com/questions/56581141/direct3d11-gradient-instruction-used-in-a-loop-with-varying-iteration-forcing + return textureSampleLevel(depth_map_texture, depth_map_sampler, uv, 0.0).r; } -#ifdef PREMULTIPLY_ALPHA -fn premultiply_alpha(standard_material_flags: u32, color: vec4) -> vec4 { -// `Blend`, `Premultiplied` and `Alpha` all share the same `BlendState`. Depending -// on the alpha mode, we premultiply the color channels by the alpha channel value, -// (and also optionally replace the alpha value with 0.0) so that the result produces -// the desired blend mode when sent to the blending operation. -#ifdef BLEND_PREMULTIPLIED_ALPHA - // For `BlendState::PREMULTIPLIED_ALPHA_BLENDING` the blend function is: - // - // result = 1 * src_color + (1 - src_alpha) * dst_color - let alpha_mode = standard_material_flags & pbr_types::STANDARD_MATERIAL_FLAGS_ALPHA_MODE_RESERVED_BITS; - if alpha_mode == pbr_types::STANDARD_MATERIAL_FLAGS_ALPHA_MODE_ADD { - // Here, we premultiply `src_color` by `src_alpha`, and replace `src_alpha` with 0.0: - // - // src_color *= src_alpha - // src_alpha = 0.0 - // - // We end up with: - // - // result = 1 * (src_alpha * src_color) + (1 - 0) * dst_color - // result = src_alpha * src_color + 1 * dst_color - // - // Which is the blend operation for additive blending - return vec4(color.rgb * color.a, 0.0); - } else { - // Here, we don't do anything, so that we get premultiplied alpha blending. (As expected) - return color.rgba; +// An implementation of parallax mapping, see https://en.wikipedia.org/wiki/Parallax_mapping +// Code derived from: https://web.archive.org/web/20150419215321/http://sunandblackcat.com/tipFullView.php?l=eng&topicid=28 +fn parallaxed_uv( + depth_scale: f32, + max_layer_count: f32, + max_steps: u32, + // The original interpolated uv + original_uv: vec2, + // The vector from the camera to the fragment at the surface in tangent space + Vt: vec3, +) -> vec2 { + if max_layer_count < 1.0 { + return original_uv; } -#endif -// `Multiply` uses its own `BlendState`, but we still need to premultiply here in the -// shader so that we get correct results as we tweak the alpha channel -#ifdef BLEND_MULTIPLY - // The blend function is: - // - // result = dst_color * src_color + (1 - src_alpha) * dst_color - // - // We premultiply `src_color` by `src_alpha`: - // - // src_color *= src_alpha - // - // We end up with: - // - // result = dst_color * (src_color * src_alpha) + (1 - src_alpha) * dst_color - // result = src_alpha * (src_color * dst_color) + (1 - src_alpha) * dst_color + var uv = original_uv; + + // Steep Parallax Mapping + // ====================== + // Split the depth map into `layer_count` layers. + // When Vt hits the surface of the mesh (excluding depth displacement), + // if the depth is not below or on surface including depth displacement (textureSample), then + // look forward (+= delta_uv) on depth texture according to + // Vt and distance between hit surface and depth map surface, + // repeat until below the surface. // - // Which is the blend operation for multiplicative blending with arbitrary mixing - // controlled by the source alpha channel - return vec4(color.rgb * color.a, color.a); -#endif -} -#endif + // Where `layer_count` is interpolated between `1.0` and + // `max_layer_count` according to the steepness of Vt. + + let view_steepness = abs(Vt.z); + // We mix with minimum value 1.0 because otherwise, + // with 0.0, we get a division by zero in surfaces parallel to viewport, + // resulting in a singularity. + let layer_count = mix(max_layer_count, 1.0, view_steepness); + let layer_depth = 1.0 / layer_count; + var delta_uv = depth_scale * layer_depth * Vt.xy * vec2(1.0, -1.0) / view_steepness; -// fog, alpha premultiply -// for non-hdr cameras, tonemapping and debanding -fn main_pass_post_lighting_processing( - pbr_input: pbr_types::PbrInput, - input_color: vec4, -) -> vec4 { - var output_color = input_color; + var current_layer_depth = 0.0; + var texture_depth = sample_depth_map(uv); - // fog - if (view_bindings::fog.mode != mesh_view_types::FOG_MODE_OFF && (pbr_input.material.flags & pbr_types::STANDARD_MATERIAL_FLAGS_FOG_ENABLED_BIT) != 0u) { - output_color = apply_fog(view_bindings::fog, output_color, pbr_input.world_position.xyz, view_bindings::view.world_position.xyz); + // texture_depth > current_layer_depth means the depth map depth is deeper + // than the depth the ray would be at this UV offset so the ray has not + // intersected the surface + for (var i: i32 = 0; texture_depth > current_layer_depth && i <= i32(layer_count); i++) { + current_layer_depth += layer_depth; + uv += delta_uv; + texture_depth = sample_depth_map(uv); } -#ifdef TONEMAP_IN_SHADER - output_color = tone_mapping(output_color, view_bindings::view.color_grading); -#ifdef DEBAND_DITHER - var output_rgb = output_color.rgb; - output_rgb = powsafe(output_rgb, 1.0 / 2.2); - output_rgb += screen_space_dither(pbr_input.frag_coord.xy); - // This conversion back to linear space is required because our output texture format is - // SRGB; the GPU will assume our output is linear and will apply an SRGB conversion. - output_rgb = powsafe(output_rgb, 2.2); - output_color = vec4(output_rgb, output_color.a); -#endif -#endif -#ifdef PREMULTIPLY_ALPHA - output_color = premultiply_alpha(pbr_input.material.flags, output_color); +#ifdef RELIEF_MAPPING + // Relief Mapping + // ============== + // "Refine" the rough result from Steep Parallax Mapping + // with a **binary search** between the layer selected by steep parallax + // and the next one to find a point closer to the depth map surface. + // This reduces the jaggy step artifacts from steep parallax mapping. + + delta_uv *= 0.5; + var delta_depth = 0.5 * layer_depth; + + uv -= delta_uv; + current_layer_depth -= delta_depth; + + for (var i: u32 = 0u; i < max_steps; i++) { + texture_depth = sample_depth_map(uv); + + // Halve the deltas for the next step + delta_uv *= 0.5; + delta_depth *= 0.5; + + // Step based on whether the current depth is above or below the depth map + if (texture_depth > current_layer_depth) { + uv += delta_uv; + current_layer_depth += delta_depth; + } else { + uv -= delta_uv; + current_layer_depth -= delta_depth; + } + } +#else + // Parallax Occlusion mapping + // ========================== + // "Refine" Steep Parallax Mapping by interpolating between the + // previous layer's depth and the computed layer depth. + // Only requires a single lookup, unlike Relief Mapping, but + // may skip small details and result in writhing material artifacts. + let previous_uv = uv - delta_uv; + let next_depth = texture_depth - current_layer_depth; + let previous_depth = sample_depth_map(previous_uv) - current_layer_depth + layer_depth; + + let weight = next_depth / (next_depth - previous_depth); + + uv = mix(uv, previous_uv, weight); + + current_layer_depth += mix(next_depth, previous_depth, weight); #endif - return output_color; + + // Note: `current_layer_depth` is not returned, but may be useful + // for light computation later on in future improvements of the pbr shader. + return uv; } ``` -### crates/bevy_pbr/src/render/pbr +### bevy_shaders/extended_material ```rust #import bevy_pbr::{ - pbr_functions::alpha_discard, pbr_fragment::pbr_input_from_standard_material, + pbr_functions::alpha_discard, } #ifdef PREPASS_PIPELINE @@ -3145,50 +28491,47 @@ fn main_pass_post_lighting_processing( #import bevy_pbr::{ forward_io::{VertexOutput, FragmentOutput}, pbr_functions::{apply_pbr_lighting, main_pass_post_lighting_processing}, - pbr_types::STANDARD_MATERIAL_FLAGS_UNLIT_BIT, } #endif -#ifdef MESHLET_MESH_MATERIAL_PASS -#import bevy_pbr::meshlet_visibility_buffer_resolve::resolve_vertex_output -#endif +struct MyExtendedMaterial { + quantize_steps: u32, +} + +@group(2) @binding(100) +var my_extended_material: MyExtendedMaterial; @fragment fn fragment( -#ifdef MESHLET_MESH_MATERIAL_PASS - @builtin(position) frag_coord: vec4, -#else in: VertexOutput, @builtin(front_facing) is_front: bool, -#endif ) -> FragmentOutput { -#ifdef MESHLET_MESH_MATERIAL_PASS - let in = resolve_vertex_output(frag_coord); - let is_front = true; -#endif - // generate a PbrInput struct from the StandardMaterial bindings var pbr_input = pbr_input_from_standard_material(in, is_front); + // we can optionally modify the input before lighting and alpha_discard is applied + pbr_input.material.base_color.b = pbr_input.material.base_color.r; + // alpha discard pbr_input.material.base_color = alpha_discard(pbr_input.material, pbr_input.material.base_color); #ifdef PREPASS_PIPELINE - // write the gbuffer, lighting pass id, and optionally normal and motion_vector textures + // in deferred mode we can't modify anything after that, as lighting is run in a separate fullscreen shader. let out = deferred_output(in, pbr_input); #else - // in forward mode, we calculate the lit color immediately, and then apply some post-lighting effects here. - // in deferred mode the lit color and these effects will be calculated in the deferred lighting shader var out: FragmentOutput; - if (pbr_input.material.flags & STANDARD_MATERIAL_FLAGS_UNLIT_BIT) == 0u { - out.color = apply_pbr_lighting(pbr_input); - } else { - out.color = pbr_input.material.base_color; - } + // apply lighting + out.color = apply_pbr_lighting(pbr_input); + + // we can optionally modify the lit color before post-processing is applied + out.color = vec4(vec4(out.color * f32(my_extended_material.quantize_steps))) / f32(my_extended_material.quantize_steps); // apply in-shader post processing (fog, alpha-premultiply, and also tonemapping, debanding if the camera is non-hdr) // note this does not include fullscreen postprocessing effects like bloom. out.color = main_pass_post_lighting_processing(pbr_input, out.color); + + // we can optionally modify the final result here + out.color = out.color * 2.0; #endif return out; @@ -3196,319 +28539,301 @@ fn fragment( ``` -### crates/bevy_pbr/src/render/pbr_lighting +### bevy_shaders/prepass_bindings ```rust -#define_import_path bevy_pbr::lighting +#define_import_path bevy_pbr::prepass_bindings -#import bevy_pbr::{ - utils::PI, - mesh_view_types::POINT_LIGHT_FLAGS_SPOT_LIGHT_Y_NEGATIVE, - mesh_view_bindings as view_bindings, +struct PreviousViewUniforms { + view_from_world: mat4x4, + clip_from_world: mat4x4, } -// From the Filament design doc -// https://google.github.io/filament/Filament.html#table_symbols -// Symbol Definition -// v View unit vector -// l Incident light unit vector -// n Surface normal unit vector -// h Half unit vector between l and v -// f BRDF -// f_d Diffuse component of a BRDF -// f_r Specular component of a BRDF -// α Roughness, remapped from using input perceptualRoughness -// σ Diffuse reflectance -// Ω Spherical domain -// f0 Reflectance at normal incidence -// f90 Reflectance at grazing angle -// χ+(a) Heaviside function (1 if a>0 and 0 otherwise) -// nior Index of refraction (IOR) of an interface -// ⟨n⋅l⟩ Dot product clamped to [0..1] -// ⟨a⟩ Saturated value (clamped to [0..1]) +#ifdef MOTION_VECTOR_PREPASS +@group(0) @binding(2) var previous_view_uniforms: PreviousViewUniforms; +#endif // MOTION_VECTOR_PREPASS -// The Bidirectional Reflectance Distribution Function (BRDF) describes the surface response of a standard material -// and consists of two components, the diffuse component (f_d) and the specular component (f_r): -// f(v,l) = f_d(v,l) + f_r(v,l) -// -// The form of the microfacet model is the same for diffuse and specular -// f_r(v,l) = f_d(v,l) = 1 / { |n⋅v||n⋅l| } ∫_Ω D(m,α) G(v,l,m) f_m(v,l,m) (v⋅m) (l⋅m) dm -// -// In which: -// D, also called the Normal Distribution Function (NDF) models the distribution of the microfacets -// G models the visibility (or occlusion or shadow-masking) of the microfacets -// f_m is the microfacet BRDF and differs between specular and diffuse components -// -// The above integration needs to be approximated. +// Material bindings will be in @group(2) -// distanceAttenuation is simply the square falloff of light intensity -// combined with a smooth attenuation at the edge of the light radius -// -// light radius is a non-physical construct for efficiency purposes, -// because otherwise every light affects every fragment in the scene -fn getDistanceAttenuation(distanceSquare: f32, inverseRangeSquared: f32) -> f32 { - let factor = distanceSquare * inverseRangeSquared; - let smoothFactor = saturate(1.0 - factor * factor); - let attenuation = smoothFactor * smoothFactor; - return attenuation * 1.0 / max(distanceSquare, 0.0001); -} +``` -// Normal distribution function (specular D) -// Based on https://google.github.io/filament/Filament.html#citation-walter07 +### bevy_shaders/fog -// D_GGX(h,α) = α^2 / { π ((n⋅h)^2 (α2−1) + 1)^2 } +```rust +#define_import_path bevy_pbr::fog -// Simple implementation, has precision problems when using fp16 instead of fp32 -// see https://google.github.io/filament/Filament.html#listing_speculardfp16 -fn D_GGX(roughness: f32, NoH: f32, h: vec3) -> f32 { - let oneMinusNoHSquared = 1.0 - NoH * NoH; - let a = NoH * roughness; - let k = roughness / (oneMinusNoHSquared + a * a); - let d = k * k * (1.0 / PI); - return d; +#import bevy_pbr::{ + mesh_view_bindings::fog, + mesh_view_types::Fog, } -// Visibility function (Specular G) -// V(v,l,a) = G(v,l,α) / { 4 (n⋅v) (n⋅l) } -// such that f_r becomes -// f_r(v,l) = D(h,α) V(v,l,α) F(v,h,f0) -// where -// V(v,l,α) = 0.5 / { n⋅l sqrt((n⋅v)^2 (1−α2) + α2) + n⋅v sqrt((n⋅l)^2 (1−α2) + α2) } -// Note the two sqrt's, that may be slow on mobile, see https://google.github.io/filament/Filament.html#listing_approximatedspecularv -fn V_SmithGGXCorrelated(roughness: f32, NoV: f32, NoL: f32) -> f32 { - let a2 = roughness * roughness; - let lambdaV = NoL * sqrt((NoV - a2 * NoV) * NoV + a2); - let lambdaL = NoV * sqrt((NoL - a2 * NoL) * NoL + a2); - let v = 0.5 / (lambdaV + lambdaL); - return v; +// Fog formulas adapted from: +// https://learn.microsoft.com/en-us/windows/win32/direct3d9/fog-formulas +// https://catlikecoding.com/unity/tutorials/rendering/part-14/ +// https://iquilezles.org/articles/fog/ (Atmospheric Fog and Scattering) + +fn scattering_adjusted_fog_color( + fog_params: Fog, + scattering: vec3, +) -> vec4 { + if (fog_params.directional_light_color.a > 0.0) { + return vec4( + fog_params.base_color.rgb + + scattering * fog_params.directional_light_color.rgb * fog_params.directional_light_color.a, + fog_params.base_color.a, + ); + } else { + return fog_params.base_color; + } } -// Fresnel function -// see https://google.github.io/filament/Filament.html#citation-schlick94 -// F_Schlick(v,h,f_0,f_90) = f_0 + (f_90 − f_0) (1 − v⋅h)^5 -fn F_Schlick_vec(f0: vec3, f90: f32, VoH: f32) -> vec3 { - // not using mix to keep the vec3 and float versions identical - return f0 + (f90 - f0) * pow(1.0 - VoH, 5.0); +fn linear_fog( + fog_params: Fog, + input_color: vec4, + distance: f32, + scattering: vec3, +) -> vec4 { + var fog_color = scattering_adjusted_fog_color(fog_params, scattering); + let start = fog_params.be.x; + let end = fog_params.be.y; + fog_color.a *= 1.0 - clamp((end - distance) / (end - start), 0.0, 1.0); + return vec4(mix(input_color.rgb, fog_color.rgb, fog_color.a), input_color.a); } -fn F_Schlick(f0: f32, f90: f32, VoH: f32) -> f32 { - // not using mix to keep the vec3 and float versions identical - return f0 + (f90 - f0) * pow(1.0 - VoH, 5.0); +fn exponential_fog( + fog_params: Fog, + input_color: vec4, + distance: f32, + scattering: vec3, +) -> vec4 { + var fog_color = scattering_adjusted_fog_color(fog_params, scattering); + let density = fog_params.be.x; + fog_color.a *= 1.0 - 1.0 / exp(distance * density); + return vec4(mix(input_color.rgb, fog_color.rgb, fog_color.a), input_color.a); +} + +fn exponential_squared_fog( + fog_params: Fog, + input_color: vec4, + distance: f32, + scattering: vec3, +) -> vec4 { + var fog_color = scattering_adjusted_fog_color(fog_params, scattering); + let distance_times_density = distance * fog_params.be.x; + fog_color.a *= 1.0 - 1.0 / exp(distance_times_density * distance_times_density); + return vec4(mix(input_color.rgb, fog_color.rgb, fog_color.a), input_color.a); } -fn fresnel(f0: vec3, LoH: f32) -> vec3 { - // f_90 suitable for ambient occlusion - // see https://google.github.io/filament/Filament.html#lighting/occlusion - let f90 = saturate(dot(f0, vec3(50.0 * 0.33))); - return F_Schlick_vec(f0, f90, LoH); +fn atmospheric_fog( + fog_params: Fog, + input_color: vec4, + distance: f32, + scattering: vec3, +) -> vec4 { + var fog_color = scattering_adjusted_fog_color(fog_params, scattering); + let extinction_factor = 1.0 - 1.0 / exp(distance * fog_params.be); + let inscattering_factor = 1.0 - 1.0 / exp(distance * fog_params.bi); + return vec4( + input_color.rgb * (1.0 - extinction_factor * fog_color.a) + + fog_color.rgb * inscattering_factor * fog_color.a, + input_color.a + ); } -// Specular BRDF -// https://google.github.io/filament/Filament.html#materialsystem/specularbrdf - -// Cook-Torrance approximation of the microfacet model integration using Fresnel law F to model f_m -// f_r(v,l) = { D(h,α) G(v,l,α) F(v,h,f0) } / { 4 (n⋅v) (n⋅l) } -fn specular( - f0: vec3, - roughness: f32, - h: vec3, - NoV: f32, - NoL: f32, - NoH: f32, - LoH: f32, - specularIntensity: f32, - f_ab: vec2 -) -> vec3 { - let D = D_GGX(roughness, NoH, h); - let V = V_SmithGGXCorrelated(roughness, NoV, NoL); - let F = fresnel(f0, LoH); - - var Fr = (specularIntensity * D * V) * F; +``` - // Multiscattering approximation: https://google.github.io/filament/Filament.html#listing_energycompensationimpl - Fr *= 1.0 + f0 * (1.0 / f_ab.x - 1.0); +### bevy_shaders/pbr_prepass - return Fr; +```rust +#import bevy_pbr::{ + pbr_prepass_functions, + pbr_bindings, + pbr_bindings::material, + pbr_types, + pbr_functions, + pbr_functions::SampleBias, + prepass_io, + mesh_view_bindings::view, } -// Diffuse BRDF -// https://google.github.io/filament/Filament.html#materialsystem/diffusebrdf -// fd(v,l) = σ/π * 1 / { |n⋅v||n⋅l| } ∫Ω D(m,α) G(v,l,m) (v⋅m) (l⋅m) dm -// -// simplest approximation -// float Fd_Lambert() { -// return 1.0 / PI; -// } -// -// vec3 Fd = diffuseColor * Fd_Lambert(); -// -// Disney approximation -// See https://google.github.io/filament/Filament.html#citation-burley12 -// minimal quality difference -fn Fd_Burley(roughness: f32, NoV: f32, NoL: f32, LoH: f32) -> f32 { - let f90 = 0.5 + 2.0 * roughness * LoH * LoH; - let lightScatter = F_Schlick(1.0, f90, NoL); - let viewScatter = F_Schlick(1.0, f90, NoV); - return lightScatter * viewScatter * (1.0 / PI); -} +#ifdef MESHLET_MESH_MATERIAL_PASS +#import bevy_pbr::meshlet_visibility_buffer_resolve::resolve_vertex_output +#endif -// Scale/bias approximation -// https://www.unrealengine.com/en-US/blog/physically-based-shading-on-mobile -// TODO: Use a LUT (more accurate) -fn F_AB(perceptual_roughness: f32, NoV: f32) -> vec2 { - let c0 = vec4(-1.0, -0.0275, -0.572, 0.022); - let c1 = vec4(1.0, 0.0425, 1.04, -0.04); - let r = perceptual_roughness * c0 + c1; - let a004 = min(r.x * r.x, exp2(-9.28 * NoV)) * r.x + r.y; - return vec2(-1.04, 1.04) * a004 + r.zw; -} +#ifdef PREPASS_FRAGMENT +@fragment +fn fragment( +#ifdef MESHLET_MESH_MATERIAL_PASS + @builtin(position) frag_coord: vec4, +#else + in: prepass_io::VertexOutput, + @builtin(front_facing) is_front: bool, +#endif +) -> prepass_io::FragmentOutput { +#ifdef MESHLET_MESH_MATERIAL_PASS + let in = resolve_vertex_output(frag_coord); + let is_front = true; +#else + pbr_prepass_functions::prepass_alpha_discard(in); +#endif -fn EnvBRDFApprox(f0: vec3, f_ab: vec2) -> vec3 { - return f0 * f_ab.x + f_ab.y; -} + var out: prepass_io::FragmentOutput; -fn perceptualRoughnessToRoughness(perceptualRoughness: f32) -> f32 { - // clamp perceptual roughness to prevent precision problems - // According to Filament design 0.089 is recommended for mobile - // Filament uses 0.045 for non-mobile - let clampedPerceptualRoughness = clamp(perceptualRoughness, 0.089, 1.0); - return clampedPerceptualRoughness * clampedPerceptualRoughness; -} +#ifdef DEPTH_CLAMP_ORTHO + out.frag_depth = in.clip_position_unclamped.z; +#endif // DEPTH_CLAMP_ORTHO -fn point_light( - world_position: vec3, - light_id: u32, - roughness: f32, - NdotV: f32, - N: vec3, - V: vec3, - R: vec3, - F0: vec3, - f_ab: vec2, - diffuseColor: vec3 -) -> vec3 { - let light = &view_bindings::point_lights.data[light_id]; - let light_to_frag = (*light).position_radius.xyz - world_position.xyz; - let distance_square = dot(light_to_frag, light_to_frag); - let rangeAttenuation = getDistanceAttenuation(distance_square, (*light).color_inverse_square_range.w); +#ifdef NORMAL_PREPASS + // NOTE: Unlit bit not set means == 0 is true, so the true case is if lit + if (material.flags & pbr_types::STANDARD_MATERIAL_FLAGS_UNLIT_BIT) == 0u { + let double_sided = (material.flags & pbr_types::STANDARD_MATERIAL_FLAGS_DOUBLE_SIDED_BIT) != 0u; - // Specular. - // Representative Point Area Lights. - // see http://blog.selfshadow.com/publications/s2013-shading-course/karis/s2013_pbs_epic_notes_v2.pdf p14-16 - let a = roughness; - let centerToRay = dot(light_to_frag, R) * R - light_to_frag; - let closestPoint = light_to_frag + centerToRay * saturate((*light).position_radius.w * inverseSqrt(dot(centerToRay, centerToRay))); - let LspecLengthInverse = inverseSqrt(dot(closestPoint, closestPoint)); - let normalizationFactor = a / saturate(a + ((*light).position_radius.w * 0.5 * LspecLengthInverse)); - let specularIntensity = normalizationFactor * normalizationFactor; + let world_normal = pbr_functions::prepare_world_normal( + in.world_normal, + double_sided, + is_front, + ); - var L: vec3 = closestPoint * LspecLengthInverse; // normalize() equivalent? - var H: vec3 = normalize(L + V); - var NoL: f32 = saturate(dot(N, L)); - var NoH: f32 = saturate(dot(N, H)); - var LoH: f32 = saturate(dot(L, H)); + var normal = world_normal; - let specular_light = specular(F0, roughness, H, NdotV, NoL, NoH, LoH, specularIntensity, f_ab); +#ifdef VERTEX_UVS +#ifdef VERTEX_TANGENTS +#ifdef STANDARD_MATERIAL_NORMAL_MAP - // Diffuse. - // Comes after specular since its NoL is used in the lighting equation. - L = normalize(light_to_frag); - H = normalize(L + V); - NoL = saturate(dot(N, L)); - NoH = saturate(dot(N, H)); - LoH = saturate(dot(L, H)); +#ifdef STANDARD_MATERIAL_NORMAL_MAP_UV_B + let uv = (material.uv_transform * vec3(in.uv_b, 1.0)).xy; +#else + let uv = (material.uv_transform * vec3(in.uv, 1.0)).xy; +#endif - let diffuse = diffuseColor * Fd_Burley(roughness, NdotV, NoL, LoH); + // Fill in the sample bias so we can sample from textures. + var bias: SampleBias; +#ifdef MESHLET_MESH_MATERIAL_PASS + bias.ddx_uv = in.ddx_uv; + bias.ddy_uv = in.ddy_uv; +#else // MESHLET_MESH_MATERIAL_PASS + bias.mip_bias = view.mip_bias; +#endif // MESHLET_MESH_MATERIAL_PASS + + let Nt = pbr_functions::sample_texture( + pbr_bindings::normal_map_texture, + pbr_bindings::normal_map_sampler, + uv, + bias, + ).rgb; + let TBN = pbr_functions::calculate_tbn_mikktspace(normal, in.world_tangent); - // See https://google.github.io/filament/Filament.html#mjx-eqn-pointLightLuminanceEquation - // Lout = f(v,l) Φ / { 4 π d^2 }⟨n⋅l⟩ - // where - // f(v,l) = (f_d(v,l) + f_r(v,l)) * light_color - // Φ is luminous power in lumens - // our rangeAttenuation = 1 / d^2 multiplied with an attenuation factor for smoothing at the edge of the non-physical maximum light radius + normal = pbr_functions::apply_normal_mapping( + material.flags, + TBN, + double_sided, + is_front, + Nt, + ); - // For a point light, luminous intensity, I, in lumens per steradian is given by: - // I = Φ / 4 π - // The derivation of this can be seen here: https://google.github.io/filament/Filament.html#mjx-eqn-pointLightLuminousPower +#endif // STANDARD_MATERIAL_NORMAL_MAP +#endif // VERTEX_TANGENTS +#endif // VERTEX_UVS - // NOTE: (*light).color.rgb is premultiplied with (*light).intensity / 4 π (which would be the luminous intensity) on the CPU + out.normal = vec4(normal * 0.5 + vec3(0.5), 1.0); + } else { + out.normal = vec4(in.world_normal * 0.5 + vec3(0.5), 1.0); + } +#endif // NORMAL_PREPASS + +#ifdef MOTION_VECTOR_PREPASS +#ifdef MESHLET_MESH_MATERIAL_PASS + out.motion_vector = in.motion_vector; +#else + out.motion_vector = pbr_prepass_functions::calculate_motion_vector(in.world_position, in.previous_world_position); +#endif +#endif - return ((diffuse + specular_light) * (*light).color_inverse_square_range.rgb) * (rangeAttenuation * NoL); + return out; +} +#else +@fragment +fn fragment(in: prepass_io::VertexOutput) { + pbr_prepass_functions::prepass_alpha_discard(in); } +#endif // PREPASS_FRAGMENT -fn spot_light( - world_position: vec3, - light_id: u32, - roughness: f32, - NdotV: f32, - N: vec3, - V: vec3, - R: vec3, - F0: vec3, - f_ab: vec2, - diffuseColor: vec3 -) -> vec3 { - // reuse the point light calculations - let point_light = point_light(world_position, light_id, roughness, NdotV, N, V, R, F0, f_ab, diffuseColor); +``` - let light = &view_bindings::point_lights.data[light_id]; +### bevy_shaders/meshlet_mesh_material - // reconstruct spot dir from x/z and y-direction flag - var spot_dir = vec3((*light).light_custom_data.x, 0.0, (*light).light_custom_data.y); - spot_dir.y = sqrt(max(0.0, 1.0 - spot_dir.x * spot_dir.x - spot_dir.z * spot_dir.z)); - if ((*light).flags & POINT_LIGHT_FLAGS_SPOT_LIGHT_Y_NEGATIVE) != 0u { - spot_dir.y = -spot_dir.y; - } - let light_to_frag = (*light).position_radius.xyz - world_position.xyz; +```rust +#import bevy_pbr::{ + meshlet_visibility_buffer_resolve::resolve_vertex_output, + view_transformations::uv_to_ndc, + prepass_io, + pbr_prepass_functions, + utils::rand_f, +} - // calculate attenuation based on filament formula https://google.github.io/filament/Filament.html#listing_glslpunctuallight - // spot_scale and spot_offset have been precomputed - // note we normalize here to get "l" from the filament listing. spot_dir is already normalized - let cd = dot(-spot_dir, normalize(light_to_frag)); - let attenuation = saturate(cd * (*light).light_custom_data.z + (*light).light_custom_data.w); - let spot_attenuation = attenuation * attenuation; +@vertex +fn vertex(@builtin(vertex_index) vertex_input: u32) -> @builtin(position) vec4 { + let vertex_index = vertex_input % 3u; + let material_id = vertex_input / 3u; + let material_depth = f32(material_id) / 65535.0; + let uv = vec2(vec2(vertex_index >> 1u, vertex_index & 1u)) * 2.0; + return vec4(uv_to_ndc(uv), material_depth, 1.0); +} - return point_light * spot_attenuation; +@fragment +fn fragment(@builtin(position) frag_coord: vec4) -> @location(0) vec4 { + let vertex_output = resolve_vertex_output(frag_coord); + var rng = vertex_output.cluster_id; + let color = vec3(rand_f(&rng), rand_f(&rng), rand_f(&rng)); + return vec4(color, 1.0); } -fn directional_light(light_id: u32, roughness: f32, NdotV: f32, normal: vec3, view: vec3, R: vec3, F0: vec3, f_ab: vec2, diffuseColor: vec3) -> vec3 { - let light = &view_bindings::lights.directional_lights[light_id]; +#ifdef PREPASS_FRAGMENT +@fragment +fn prepass_fragment(@builtin(position) frag_coord: vec4) -> prepass_io::FragmentOutput { + let vertex_output = resolve_vertex_output(frag_coord); + + var out: prepass_io::FragmentOutput; - let incident_light = (*light).direction_to_light.xyz; +#ifdef NORMAL_PREPASS + out.normal = vec4(vertex_output.world_normal * 0.5 + vec3(0.5), 1.0); +#endif - let half_vector = normalize(incident_light + view); - let NoL = saturate(dot(normal, incident_light)); - let NoH = saturate(dot(normal, half_vector)); - let LoH = saturate(dot(incident_light, half_vector)); +#ifdef MOTION_VECTOR_PREPASS + out.motion_vector = vertex_output.motion_vector; +#endif - let diffuse = diffuseColor * Fd_Burley(roughness, NdotV, NoL, LoH); - let specularIntensity = 1.0; - let specular_light = specular(F0, roughness, half_vector, NdotV, NoL, NoH, LoH, specularIntensity, f_ab); +#ifdef DEFERRED_PREPASS + // There isn't any material info available for this default prepass shader so we are just writing  + // emissive magenta out to the deferred gbuffer to be rendered by the first deferred lighting pass layer. + // This is here so if the default prepass fragment is used for deferred magenta will be rendered, and also + // as an example to show that a user could write to the deferred gbuffer if they were to start from this shader. + out.deferred = vec4(0u, bevy_pbr::rgb9e5::vec3_to_rgb9e5_(vec3(1.0, 0.0, 1.0)), 0u, 0u); + out.deferred_lighting_pass_id = 1u; +#endif - return (specular_light + diffuse) * (*light).color.rgb * NoL; + return out; } +#endif ``` -### crates/bevy_pbr/src/render/wireframe +### bevy_shaders/mesh2d_bindings ```rust -#import bevy_pbr::forward_io::VertexOutput +#define_import_path bevy_sprite::mesh2d_bindings -struct WireframeMaterial { - color: vec4, -}; +#import bevy_sprite::mesh2d_types::Mesh2d -@group(2) @binding(0) -var material: WireframeMaterial; -@fragment -fn fragment(in: VertexOutput) -> @location(0) vec4 { - return material.color; -} +#ifdef PER_OBJECT_BUFFER_BATCH_SIZE +@group(1) @binding(0) var mesh: array; +#else +@group(1) @binding(0) var mesh: array; +#endif // PER_OBJECT_BUFFER_BATCH_SIZE ``` -### crates/bevy_pbr/src/render/mesh_preprocess +### bevy_shaders/mesh_preprocess ```rust // GPU mesh uniform building. @@ -3522,11 +28847,12 @@ fn fragment(in: VertexOutput) -> @location(0) vec4 { #import bevy_pbr::mesh_types::Mesh #import bevy_render::maths +#import bevy_render::view::View // Per-frame data that the CPU supplies to the GPU. struct MeshInput { // The model transform. - model: mat3x4, + world_from_local: mat3x4, // The lightmap UV rect, packed into 64 bits. lightmap_uv_rect: vec2, // Various flags. @@ -3536,15 +28862,45 @@ struct MeshInput { previous_input_index: u32, } +// Information about each mesh instance needed to cull it on GPU. +// +// At the moment, this just consists of its axis-aligned bounding box (AABB). +struct MeshCullingData { + // The 3D center of the AABB in model space, padded with an extra unused + // float value. + aabb_center: vec4, + // The 3D extents of the AABB in model space, divided by two, padded with + // an extra unused float value. + aabb_half_extents: vec4, +} + // One invocation of this compute shader: i.e. one mesh instance in a view. struct PreprocessWorkItem { // The index of the `MeshInput` in the `current_input` buffer that we read // from. input_index: u32, - // The index of the `Mesh` in `output` that we write to. + // In direct mode, the index of the `Mesh` in `output` that we write to. In + // indirect mode, the index of the `IndirectParameters` in + // `indirect_parameters` that we write to. output_index: u32, } +// The `wgpu` indirect parameters structure. This is a union of two structures. +// For more information, see the corresponding comment in +// `gpu_preprocessing.rs`. +struct IndirectParameters { + // `vertex_count` or `index_count`. + data0: u32, + // `instance_count` in both structures. + instance_count: atomic, + // `first_vertex` in both structures. + first_vertex: u32, + // `first_instance` or `base_vertex`. + data1: u32, + // A read-only copy of `instance_index`. + instance_index: u32, +} + // The current frame's `MeshInput`. @group(0) @binding(0) var current_input: array; // The `MeshInput` values from the previous frame. @@ -3556,276 +28912,592 @@ struct PreprocessWorkItem { // The output array of `Mesh`es. @group(0) @binding(3) var output: array; +#ifdef INDIRECT +// The array of indirect parameters for drawcalls. +@group(0) @binding(4) var indirect_parameters: array; +#endif + +#ifdef FRUSTUM_CULLING +// Data needed to cull the meshes. +// +// At the moment, this consists only of AABBs. +@group(0) @binding(5) var mesh_culling_data: array; + +// The view data, including the view matrix. +@group(0) @binding(6) var view: View; + +// Returns true if the view frustum intersects an oriented bounding box (OBB). +// +// `aabb_center.w` should be 1.0. +fn view_frustum_intersects_obb( + world_from_local: mat4x4, + aabb_center: vec4, + aabb_half_extents: vec3, +) -> bool { + + for (var i = 0; i < 5; i += 1) { + // Calculate relative radius of the sphere associated with this plane. + let plane_normal = view.frustum[i]; + let relative_radius = dot( + abs( + vec3( + dot(plane_normal, world_from_local[0]), + dot(plane_normal, world_from_local[1]), + dot(plane_normal, world_from_local[2]), + ) + ), + aabb_half_extents + ); + + // Check the frustum plane. + if (!maths::sphere_intersects_plane_half_space( + plane_normal, aabb_center, relative_radius)) { + return false; + } + } + + return true; +} +#endif + @compute @workgroup_size(64) fn main(@builtin(global_invocation_id) global_invocation_id: vec3) { + // Figure out our instance index. If this thread doesn't correspond to any + // index, bail. let instance_index = global_invocation_id.x; if (instance_index >= arrayLength(&work_items)) { return; } // Unpack. - let mesh_index = work_items[instance_index].input_index; + let input_index = work_items[instance_index].input_index; let output_index = work_items[instance_index].output_index; - let model_affine_transpose = current_input[mesh_index].model; - let model = maths::affine3_to_square(model_affine_transpose); + let world_from_local_affine_transpose = current_input[input_index].world_from_local; + let world_from_local = maths::affine3_to_square(world_from_local_affine_transpose); + + // Cull if necessary. +#ifdef FRUSTUM_CULLING + let aabb_center = mesh_culling_data[input_index].aabb_center.xyz; + let aabb_half_extents = mesh_culling_data[input_index].aabb_half_extents.xyz; + + // Do an OBB-based frustum cull. + let model_center = world_from_local * vec4(aabb_center, 1.0); + if (!view_frustum_intersects_obb(world_from_local, model_center, aabb_half_extents)) { + return; + } +#endif // Calculate inverse transpose. - let inverse_transpose_model = transpose(maths::inverse_affine3(transpose( - model_affine_transpose))); + let local_from_world_transpose = transpose(maths::inverse_affine3(transpose( + world_from_local_affine_transpose))); // Pack inverse transpose. - let inverse_transpose_model_a = mat2x4( - vec4(inverse_transpose_model[0].xyz, inverse_transpose_model[1].x), - vec4(inverse_transpose_model[1].yz, inverse_transpose_model[2].xy)); - let inverse_transpose_model_b = inverse_transpose_model[2].z; + let local_from_world_transpose_a = mat2x4( + vec4(local_from_world_transpose[0].xyz, local_from_world_transpose[1].x), + vec4(local_from_world_transpose[1].yz, local_from_world_transpose[2].xy)); + let local_from_world_transpose_b = local_from_world_transpose[2].z; // Look up the previous model matrix. - let previous_input_index = current_input[mesh_index].previous_input_index; - var previous_model: mat3x4; + let previous_input_index = current_input[input_index].previous_input_index; + var previous_world_from_local: mat3x4; if (previous_input_index == 0xffffffff) { - previous_model = model_affine_transpose; + previous_world_from_local = world_from_local_affine_transpose; } else { - previous_model = previous_input[previous_input_index].model; + previous_world_from_local = previous_input[previous_input_index].world_from_local; } + // Figure out the output index. In indirect mode, this involves bumping the + // instance index in the indirect parameters structure. Otherwise, this + // index was directly supplied to us. +#ifdef INDIRECT + let mesh_output_index = indirect_parameters[output_index].instance_index + + atomicAdd(&indirect_parameters[output_index].instance_count, 1u); +#else + let mesh_output_index = output_index; +#endif + // Write the output. - output[output_index].model = model_affine_transpose; - output[output_index].previous_model = previous_model; - output[output_index].inverse_transpose_model_a = inverse_transpose_model_a; - output[output_index].inverse_transpose_model_b = inverse_transpose_model_b; - output[output_index].flags = current_input[mesh_index].flags; - output[output_index].lightmap_uv_rect = current_input[mesh_index].lightmap_uv_rect; + output[mesh_output_index].world_from_local = world_from_local_affine_transpose; + output[mesh_output_index].previous_world_from_local = previous_world_from_local; + output[mesh_output_index].local_from_world_transpose_a = local_from_world_transpose_a; + output[mesh_output_index].local_from_world_transpose_b = local_from_world_transpose_b; + output[mesh_output_index].flags = current_input[input_index].flags; + output[mesh_output_index].lightmap_uv_rect = current_input[input_index].lightmap_uv_rect; +} + +``` + +### bevy_shaders/mesh2d_view_bindings + +```rust +#define_import_path bevy_sprite::mesh2d_view_bindings + +#import bevy_render::view::View +#import bevy_render::globals::Globals + +@group(0) @binding(0) var view: View; + +@group(0) @binding(1) var globals: Globals; + +@group(0) @binding(2) var dt_lut_texture: texture_3d; +@group(0) @binding(3) var dt_lut_sampler: sampler; + +``` + +### bevy_shaders/custom_vertex_attribute + +```rust +#import bevy_pbr::mesh_functions::{get_world_from_local, mesh_position_local_to_clip} + +struct CustomMaterial { + color: vec4, +}; +@group(2) @binding(0) var material: CustomMaterial; + +struct Vertex { + @builtin(instance_index) instance_index: u32, + @location(0) position: vec3, + @location(1) blend_color: vec4, +}; + +struct VertexOutput { + @builtin(position) clip_position: vec4, + @location(0) blend_color: vec4, +}; + +@vertex +fn vertex(vertex: Vertex) -> VertexOutput { + var out: VertexOutput; + out.clip_position = mesh_position_local_to_clip( + get_world_from_local(vertex.instance_index), + vec4(vertex.position, 1.0), + ); + out.blend_color = vertex.blend_color; + return out; +} + +struct FragmentInput { + @location(0) blend_color: vec4, +}; + +@fragment +fn fragment(input: FragmentInput) -> @location(0) vec4 { + return material.color * input.blend_color; } ``` -### crates/bevy_pbr/src/render/pbr_prepass +### bevy_shaders/ssr ```rust +// A postprocessing pass that performs screen-space reflections. + +#define_import_path bevy_pbr::ssr + +#import bevy_core_pipeline::fullscreen_vertex_shader::FullscreenVertexOutput #import bevy_pbr::{ - pbr_prepass_functions, - pbr_bindings::material, - pbr_types, + lighting, + lighting::{LAYER_BASE, LAYER_CLEARCOAT}, + mesh_view_bindings::{view, depth_prepass_texture, deferred_prepass_texture, ssr_settings}, + pbr_deferred_functions::pbr_input_from_deferred_gbuffer, + pbr_deferred_types, pbr_functions, - prepass_io, - mesh_view_bindings::view, + prepass_utils, + raymarch::{ + depth_ray_march_from_cs, + depth_ray_march_march, + depth_ray_march_new_from_depth, + depth_ray_march_to_ws_dir, + }, + utils, + view_transformations::{ + depth_ndc_to_view_z, + frag_coord_to_ndc, + ndc_to_frag_coord, + ndc_to_uv, + position_view_to_ndc, + position_world_to_ndc, + position_world_to_view, + }, } +#import bevy_render::view::View -#ifdef MESHLET_MESH_MATERIAL_PASS -#import bevy_pbr::meshlet_visibility_buffer_resolve::resolve_vertex_output +#ifdef ENVIRONMENT_MAP +#import bevy_pbr::environment_map #endif -#ifdef PREPASS_FRAGMENT +// The texture representing the color framebuffer. +@group(1) @binding(0) var color_texture: texture_2d; + +// The sampler that lets us sample from the color framebuffer. +@group(1) @binding(1) var color_sampler: sampler; + +// Group 1, bindings 2 and 3 are in `raymarch.wgsl`. + +// Returns the reflected color in the RGB channel and the specular occlusion in +// the alpha channel. +// +// The general approach here is similar to [1]. We first project the reflection +// ray into screen space. Then we perform uniform steps along that screen-space +// reflected ray, converting each step to view space. +// +// The arguments are: +// +// * `R_world`: The reflection vector in world space. +// +// * `P_world`: The current position in world space. +// +// [1]: https://lettier.github.io/3d-game-shaders-for-beginners/screen-space-reflection.html +fn evaluate_ssr(R_world: vec3, P_world: vec3) -> vec4 { + let depth_size = vec2(textureDimensions(depth_prepass_texture)); + + var raymarch = depth_ray_march_new_from_depth(depth_size); + depth_ray_march_from_cs(&raymarch, position_world_to_ndc(P_world)); + depth_ray_march_to_ws_dir(&raymarch, normalize(R_world)); + raymarch.linear_steps = ssr_settings.linear_steps; + raymarch.bisection_steps = ssr_settings.bisection_steps; + raymarch.use_secant = ssr_settings.use_secant != 0u; + raymarch.depth_thickness_linear_z = ssr_settings.thickness; + raymarch.jitter = 1.0; // Disable jitter for now. + raymarch.march_behind_surfaces = false; + + let raymarch_result = depth_ray_march_march(&raymarch); + if (raymarch_result.hit) { + return vec4( + textureSampleLevel(color_texture, color_sampler, raymarch_result.hit_uv, 0.0).rgb, + 0.0 + ); + } + + return vec4(0.0, 0.0, 0.0, 1.0); +} + @fragment -fn fragment( -#ifdef MESHLET_MESH_MATERIAL_PASS - @builtin(position) frag_coord: vec4, -#else - in: prepass_io::VertexOutput, - @builtin(front_facing) is_front: bool, +fn fragment(in: FullscreenVertexOutput) -> @location(0) vec4 { + // Sample the depth. + var frag_coord = in.position; + frag_coord.z = prepass_utils::prepass_depth(in.position, 0u); + + // Load the G-buffer data. + let fragment = textureLoad(color_texture, vec2(frag_coord.xy), 0); + let gbuffer = textureLoad(deferred_prepass_texture, vec2(frag_coord.xy), 0); + let pbr_input = pbr_input_from_deferred_gbuffer(frag_coord, gbuffer); + + // Don't do anything if the surface is too rough, since we can't blur or do + // temporal accumulation yet. + let perceptual_roughness = pbr_input.material.perceptual_roughness; + if (perceptual_roughness > ssr_settings.perceptual_roughness_threshold) { + return fragment; + } + + // Unpack the PBR input. + var specular_occlusion = pbr_input.specular_occlusion; + let world_position = pbr_input.world_position.xyz; + let N = pbr_input.N; + let V = pbr_input.V; + + // Calculate the reflection vector. + let R = reflect(-V, N); + + // Do the raymarching. + let ssr_specular = evaluate_ssr(R, world_position); + var indirect_light = ssr_specular.rgb; + specular_occlusion *= ssr_specular.a; + + // Sample the environment map if necessary. + // + // This will take the specular part of the environment map into account if + // the ray missed. Otherwise, it only takes the diffuse part. + // + // TODO: Merge this with the duplicated code in `apply_pbr_lighting`. +#ifdef ENVIRONMENT_MAP + // Unpack values required for environment mapping. + let base_color = pbr_input.material.base_color.rgb; + let metallic = pbr_input.material.metallic; + let reflectance = pbr_input.material.reflectance; + let specular_transmission = pbr_input.material.specular_transmission; + let diffuse_transmission = pbr_input.material.diffuse_transmission; + let diffuse_occlusion = pbr_input.diffuse_occlusion; + +#ifdef STANDARD_MATERIAL_CLEARCOAT + // Do the above calculations again for the clearcoat layer. Remember that + // the clearcoat can have its own roughness and its own normal. + let clearcoat = pbr_input.material.clearcoat; + let clearcoat_perceptual_roughness = pbr_input.material.clearcoat_perceptual_roughness; + let clearcoat_roughness = lighting::perceptualRoughnessToRoughness(clearcoat_perceptual_roughness); + let clearcoat_N = pbr_input.clearcoat_N; + let clearcoat_NdotV = max(dot(clearcoat_N, pbr_input.V), 0.0001); + let clearcoat_R = reflect(-pbr_input.V, clearcoat_N); +#endif // STANDARD_MATERIAL_CLEARCOAT + + // Calculate various other values needed for environment mapping. + let roughness = lighting::perceptualRoughnessToRoughness(perceptual_roughness); + let diffuse_color = pbr_functions::calculate_diffuse_color( + base_color, + metallic, + specular_transmission, + diffuse_transmission + ); + let NdotV = max(dot(N, V), 0.0001); + let F_ab = lighting::F_AB(perceptual_roughness, NdotV); + let F0 = pbr_functions::calculate_F0(base_color, metallic, reflectance); + + // Pack all the values into a structure. + var lighting_input: lighting::LightingInput; + lighting_input.layers[LAYER_BASE].NdotV = NdotV; + lighting_input.layers[LAYER_BASE].N = N; + lighting_input.layers[LAYER_BASE].R = R; + lighting_input.layers[LAYER_BASE].perceptual_roughness = perceptual_roughness; + lighting_input.layers[LAYER_BASE].roughness = roughness; + lighting_input.P = world_position.xyz; + lighting_input.V = V; + lighting_input.diffuse_color = diffuse_color; + lighting_input.F0_ = F0; + lighting_input.F_ab = F_ab; +#ifdef STANDARD_MATERIAL_CLEARCOAT + lighting_input.layers[LAYER_CLEARCOAT].NdotV = clearcoat_NdotV; + lighting_input.layers[LAYER_CLEARCOAT].N = clearcoat_N; + lighting_input.layers[LAYER_CLEARCOAT].R = clearcoat_R; + lighting_input.layers[LAYER_CLEARCOAT].perceptual_roughness = clearcoat_perceptual_roughness; + lighting_input.layers[LAYER_CLEARCOAT].roughness = clearcoat_roughness; + lighting_input.clearcoat_strength = clearcoat; +#endif // STANDARD_MATERIAL_CLEARCOAT + + // Sample the environment map. + let environment_light = environment_map::environment_map_light(&lighting_input, false); + + // Accumulate the environment map light. + indirect_light += view.exposure * + (environment_light.diffuse * diffuse_occlusion + + environment_light.specular * specular_occlusion); #endif -) -> prepass_io::FragmentOutput { -#ifdef MESHLET_MESH_MATERIAL_PASS - let in = resolve_vertex_output(frag_coord); - let is_front = true; -#else - pbr_prepass_functions::prepass_alpha_discard(in); + + // Write the results. + return vec4(fragment.rgb + indirect_light, 1.0); +} + +``` + +### bevy_shaders/gtao_utils + +```rust +#define_import_path bevy_pbr::gtao_utils + +#import bevy_render::maths::{PI, HALF_PI} + +// Approximates single-bounce ambient occlusion to multi-bounce ambient occlusion +// https://blog.selfshadow.com/publications/s2016-shading-course/activision/s2016_pbs_activision_occlusion.pdf#page=78 +fn gtao_multibounce(visibility: f32, base_color: vec3) -> vec3 { + let a = 2.0404 * base_color - 0.3324; + let b = -4.7951 * base_color + 0.6417; + let c = 2.7552 * base_color + 0.6903; + let x = vec3(visibility); + return max(x, ((x * a + b) * x + c) * x); +} + +fn fast_sqrt(x: f32) -> f32 { + return bitcast(0x1fbd1df5 + (bitcast(x) >> 1u)); +} + +fn fast_acos(in_x: f32) -> f32 { + let x = abs(in_x); + var res = -0.156583 * x + HALF_PI; + res *= fast_sqrt(1.0 - x); + return select(PI - res, res, in_x >= 0.0); +} + +``` + +### bevy_shaders/globals + +```rust +#define_import_path bevy_render::globals + +struct Globals { + // The time since startup in seconds + // Wraps to 0 after 1 hour. + time: f32, + // The delta time since the previous frame in seconds + delta_time: f32, + // Frame count since the start of the app. + // It wraps to zero when it reaches the maximum value of a u32. + frame_count: u32, +#ifdef SIXTEEN_BYTE_ALIGNMENT + // WebGL2 structs must be 16 byte aligned. + _webgl2_padding: f32 #endif +}; - var out: prepass_io::FragmentOutput; +``` -#ifdef DEPTH_CLAMP_ORTHO - out.frag_depth = in.clip_position_unclamped.z; -#endif // DEPTH_CLAMP_ORTHO +### bevy_shaders/sprite_view_bindings -#ifdef NORMAL_PREPASS - // NOTE: Unlit bit not set means == 0 is true, so the true case is if lit - if (material.flags & pbr_types::STANDARD_MATERIAL_FLAGS_UNLIT_BIT) == 0u { - let double_sided = (material.flags & pbr_types::STANDARD_MATERIAL_FLAGS_DOUBLE_SIDED_BIT) != 0u; +```rust +#define_import_path bevy_sprite::sprite_view_bindings - let world_normal = pbr_functions::prepare_world_normal( - in.world_normal, - double_sided, - is_front, - ); +#import bevy_render::view::View - let normal = pbr_functions::apply_normal_mapping( - material.flags, - world_normal, - double_sided, - is_front, -#ifdef VERTEX_TANGENTS -#ifdef STANDARD_MATERIAL_NORMAL_MAP - in.world_tangent, -#endif // STANDARD_MATERIAL_NORMAL_MAP -#endif // VERTEX_TANGENTS -#ifdef VERTEX_UVS - in.uv, -#endif // VERTEX_UVS - view.mip_bias, -#ifdef MESHLET_MESH_MATERIAL_PASS - in.ddx_uv, - in.ddy_uv, -#endif // MESHLET_MESH_MATERIAL_PASS - ); +@group(0) @binding(0) var view: View; - out.normal = vec4(normal * 0.5 + vec3(0.5), 1.0); - } else { - out.normal = vec4(in.world_normal * 0.5 + vec3(0.5), 1.0); - } -#endif // NORMAL_PREPASS +@group(0) @binding(1) var dt_lut_texture: texture_3d; +@group(0) @binding(2) var dt_lut_sampler: sampler; -#ifdef MOTION_VECTOR_PREPASS -#ifdef MESHLET_MESH_MATERIAL_PASS - out.motion_vector = in.motion_vector; -#else - out.motion_vector = pbr_prepass_functions::calculate_motion_vector(in.world_position, in.previous_world_position); + +``` + +### bevy_shaders/prepass_io + +```rust +#define_import_path bevy_pbr::prepass_io + +// Most of these attributes are not used in the default prepass fragment shader, but they are still needed so we can +// pass them to custom prepass shaders like pbr_prepass.wgsl. +struct Vertex { + @builtin(instance_index) instance_index: u32, + @location(0) position: vec3, + +#ifdef VERTEX_UVS_A + @location(1) uv: vec2, +#endif + +#ifdef VERTEX_UVS_B + @location(2) uv_b: vec2, +#endif + +#ifdef NORMAL_PREPASS_OR_DEFERRED_PREPASS + @location(3) normal: vec3, +#ifdef VERTEX_TANGENTS + @location(4) tangent: vec4, +#endif +#endif // NORMAL_PREPASS_OR_DEFERRED_PREPASS + +#ifdef SKINNED + @location(5) joint_indices: vec4, + @location(6) joint_weights: vec4, #endif + +#ifdef VERTEX_COLORS + @location(7) color: vec4, #endif - return out; -} -#else -@fragment -fn fragment(in: prepass_io::VertexOutput) { - pbr_prepass_functions::prepass_alpha_discard(in); +#ifdef MORPH_TARGETS + @builtin(vertex_index) index: u32, +#endif // MORPH_TARGETS } -#endif // PREPASS_FRAGMENT - -``` -### crates/bevy_pbr/src/render/parallax_mapping - -```rust -#define_import_path bevy_pbr::parallax_mapping - -#import bevy_pbr::pbr_bindings::{depth_map_texture, depth_map_sampler} +struct VertexOutput { + // This is `clip position` when the struct is used as a vertex stage output + // and `frag coord` when used as a fragment stage input + @builtin(position) position: vec4, -fn sample_depth_map(uv: vec2) -> f32 { - // We use `textureSampleLevel` over `textureSample` because the wgpu DX12 - // backend (Fxc) panics when using "gradient instructions" inside a loop. - // It results in the whole loop being unrolled by the shader compiler, - // which it can't do because the upper limit of the loop in steep parallax - // mapping is a variable set by the user. - // The "gradient instructions" comes from `textureSample` computing MIP level - // based on UV derivative. With `textureSampleLevel`, we provide ourselves - // the MIP level, so no gradient instructions are used, and we can use - // sample_depth_map in our loop. - // See https://stackoverflow.com/questions/56581141/direct3d11-gradient-instruction-used-in-a-loop-with-varying-iteration-forcing - return textureSampleLevel(depth_map_texture, depth_map_sampler, uv, 0.0).r; -} +#ifdef VERTEX_UVS_A + @location(0) uv: vec2, +#endif -// An implementation of parallax mapping, see https://en.wikipedia.org/wiki/Parallax_mapping -// Code derived from: https://web.archive.org/web/20150419215321/http://sunandblackcat.com/tipFullView.php?l=eng&topicid=28 -fn parallaxed_uv( - depth_scale: f32, - max_layer_count: f32, - max_steps: u32, - // The original interpolated uv - original_uv: vec2, - // The vector from the camera to the fragment at the surface in tangent space - Vt: vec3, -) -> vec2 { - if max_layer_count < 1.0 { - return original_uv; - } - var uv = original_uv; +#ifdef VERTEX_UVS_B + @location(1) uv_b: vec2, +#endif - // Steep Parallax Mapping - // ====================== - // Split the depth map into `layer_count` layers. - // When Vt hits the surface of the mesh (excluding depth displacement), - // if the depth is not below or on surface including depth displacement (textureSample), then - // look forward (+= delta_uv) on depth texture according to - // Vt and distance between hit surface and depth map surface, - // repeat until below the surface. - // - // Where `layer_count` is interpolated between `1.0` and - // `max_layer_count` according to the steepness of Vt. +#ifdef NORMAL_PREPASS_OR_DEFERRED_PREPASS + @location(2) world_normal: vec3, +#ifdef VERTEX_TANGENTS + @location(3) world_tangent: vec4, +#endif +#endif // NORMAL_PREPASS_OR_DEFERRED_PREPASS - let view_steepness = abs(Vt.z); - // We mix with minimum value 1.0 because otherwise, - // with 0.0, we get a division by zero in surfaces parallel to viewport, - // resulting in a singularity. - let layer_count = mix(max_layer_count, 1.0, view_steepness); - let layer_depth = 1.0 / layer_count; - var delta_uv = depth_scale * layer_depth * Vt.xy * vec2(1.0, -1.0) / view_steepness; + @location(4) world_position: vec4, +#ifdef MOTION_VECTOR_PREPASS + @location(5) previous_world_position: vec4, +#endif - var current_layer_depth = 0.0; - var texture_depth = sample_depth_map(uv); +#ifdef DEPTH_CLAMP_ORTHO + @location(6) clip_position_unclamped: vec4, +#endif // DEPTH_CLAMP_ORTHO +#ifdef VERTEX_OUTPUT_INSTANCE_INDEX + @location(7) instance_index: u32, +#endif - // texture_depth > current_layer_depth means the depth map depth is deeper - // than the depth the ray would be at at this UV offset so the ray has not - // intersected the surface - for (var i: i32 = 0; texture_depth > current_layer_depth && i <= i32(layer_count); i++) { - current_layer_depth += layer_depth; - uv += delta_uv; - texture_depth = sample_depth_map(uv); - } +#ifdef VERTEX_COLORS + @location(8) color: vec4, +#endif +} -#ifdef RELIEF_MAPPING - // Relief Mapping - // ============== - // "Refine" the rough result from Steep Parallax Mapping - // with a **binary search** between the layer selected by steep parallax - // and the next one to find a point closer to the depth map surface. - // This reduces the jaggy step artifacts from steep parallax mapping. +#ifdef PREPASS_FRAGMENT +struct FragmentOutput { +#ifdef NORMAL_PREPASS + @location(0) normal: vec4, +#endif - delta_uv *= 0.5; - var delta_depth = 0.5 * layer_depth; +#ifdef MOTION_VECTOR_PREPASS + @location(1) motion_vector: vec2, +#endif - uv -= delta_uv; - current_layer_depth -= delta_depth; +#ifdef DEFERRED_PREPASS + @location(2) deferred: vec4, + @location(3) deferred_lighting_pass_id: u32, +#endif - for (var i: u32 = 0u; i < max_steps; i++) { - texture_depth = sample_depth_map(uv); +#ifdef DEPTH_CLAMP_ORTHO + @builtin(frag_depth) frag_depth: f32, +#endif // DEPTH_CLAMP_ORTHO +} +#endif //PREPASS_FRAGMENT - // Halve the deltas for the next step - delta_uv *= 0.5; - delta_depth *= 0.5; +``` - // Step based on whether the current depth is above or below the depth map - if (texture_depth > current_layer_depth) { - uv += delta_uv; - current_layer_depth += delta_depth; - } else { - uv -= delta_uv; - current_layer_depth -= delta_depth; - } - } -#else - // Parallax Occlusion mapping - // ========================== - // "Refine" Steep Parallax Mapping by interpolating between the - // previous layer's depth and the computed layer depth. - // Only requires a single lookup, unlike Relief Mapping, but - // may skip small details and result in writhing material artifacts. - let previous_uv = uv - delta_uv; - let next_depth = texture_depth - current_layer_depth; - let previous_depth = sample_depth_map(previous_uv) - current_layer_depth + layer_depth; +### bevy_shaders/show_prepass - let weight = next_depth / (next_depth - previous_depth); +```rust +#import bevy_pbr::{ + mesh_view_bindings::globals, + prepass_utils, + forward_io::VertexOutput, +} - uv = mix(uv, previous_uv, weight); +struct ShowPrepassSettings { + show_depth: u32, + show_normals: u32, + show_motion_vectors: u32, + padding_1: u32, + padding_2: u32, +} +@group(2) @binding(0) var settings: ShowPrepassSettings; - current_layer_depth += mix(next_depth, previous_depth, weight); +@fragment +fn fragment( +#ifdef MULTISAMPLED + @builtin(sample_index) sample_index: u32, +#endif + mesh: VertexOutput, +) -> @location(0) vec4 { +#ifndef MULTISAMPLED + let sample_index = 0u; #endif + if settings.show_depth == 1u { + let depth = bevy_pbr::prepass_utils::prepass_depth(mesh.position, sample_index); + return vec4(depth, depth, depth, 1.0); + } else if settings.show_normals == 1u { + let normal = bevy_pbr::prepass_utils::prepass_normal(mesh.position, sample_index); + return vec4(normal, 1.0); + } else if settings.show_motion_vectors == 1u { + let motion_vector = bevy_pbr::prepass_utils::prepass_motion_vector(mesh.position, sample_index); + return vec4(motion_vector / globals.delta_time, 0.0, 1.0); + } - // Note: `current_layer_depth` is not returned, but may be useful - // for light computation later on in future improvements of the pbr shader. - return uv; + return vec4(0.0); } ``` -### crates/bevy_pbr/src/render/pbr_fragment +### bevy_shaders/pbr_fragment ```rust #define_import_path bevy_pbr::pbr_fragment #import bevy_pbr::{ pbr_functions, + pbr_functions::SampleBias, pbr_bindings, pbr_types, prepass_utils, @@ -3863,7 +29535,7 @@ fn pbr_input_from_vertex_output( pbr_input.flags = mesh[in.instance_index].flags; #endif - pbr_input.is_orthographic = view.projection[3].w == 1.0; + pbr_input.is_orthographic = view.clip_from_view[3].w == 1.0; pbr_input.V = pbr_functions::calculate_view(in.world_position, pbr_input.is_orthographic); pbr_input.frag_coord = in.position; pbr_input.world_position = in.world_position; @@ -3903,9 +29575,26 @@ fn pbr_input_from_standard_material( // Neubelt and Pettineo 2013, "Crafting a Next-gen Material Pipeline for The Order: 1886" let NdotV = max(dot(pbr_input.N, pbr_input.V), 0.0001); + // Fill in the sample bias so we can sample from textures. + var bias: SampleBias; +#ifdef MESHLET_MESH_MATERIAL_PASS + bias.ddx_uv = in.ddx_uv; + bias.ddy_uv = in.ddy_uv; +#else // MESHLET_MESH_MATERIAL_PASS + bias.mip_bias = view.mip_bias; +#endif // MESHLET_MESH_MATERIAL_PASS + #ifdef VERTEX_UVS let uv_transform = pbr_bindings::material.uv_transform; +#ifdef VERTEX_UVS_A var uv = (uv_transform * vec3(in.uv, 1.0)).xy; +#endif + +#ifdef VERTEX_UVS_B + var uv_b = (uv_transform * vec3(in.uv_b, 1.0)).xy; +#else + var uv_b = uv; +#endif #ifdef VERTEX_TANGENTS if ((pbr_bindings::material.flags & pbr_types::STANDARD_MATERIAL_FLAGS_DEPTH_MAP_BIT) != 0u) { @@ -3915,6 +29604,7 @@ fn pbr_input_from_standard_material( let B = in.world_tangent.w * cross(N, T); // Transform V from fragment to camera in world space to tangent space. let Vt = vec3(dot(V, T), dot(V, B), dot(V, N)); +#ifdef VERTEX_UVS_A uv = parallaxed_uv( pbr_bindings::material.parallax_depth_scale, pbr_bindings::material.max_parallax_layer_count, @@ -3925,15 +29615,36 @@ fn pbr_input_from_standard_material( // about. -Vt, ); +#endif + +#ifdef VERTEX_UVS_B + uv_b = parallaxed_uv( + pbr_bindings::material.parallax_depth_scale, + pbr_bindings::material.max_parallax_layer_count, + pbr_bindings::material.max_relief_mapping_search_steps, + uv_b, + // Flip the direction of Vt to go toward the surface to make the + // parallax mapping algorithm easier to understand and reason + // about. + -Vt, + ); +#else + uv_b = uv; +#endif } #endif // VERTEX_TANGENTS if ((pbr_bindings::material.flags & pbr_types::STANDARD_MATERIAL_FLAGS_BASE_COLOR_TEXTURE_BIT) != 0u) { -#ifdef MESHLET_MESH_MATERIAL_PASS - pbr_input.material.base_color *= textureSampleGrad(pbr_bindings::base_color_texture, pbr_bindings::base_color_sampler, uv, in.ddx_uv, in.ddy_uv); + pbr_input.material.base_color *= pbr_functions::sample_texture( + pbr_bindings::base_color_texture, + pbr_bindings::base_color_sampler, +#ifdef STANDARD_MATERIAL_BASE_COLOR_UV_B + uv_b, #else - pbr_input.material.base_color *= textureSampleBias(pbr_bindings::base_color_texture, pbr_bindings::base_color_sampler, uv, view.mip_bias); + uv, #endif + bias, + ); #ifdef ALPHA_TO_COVERAGE // Sharpen alpha edges. @@ -3962,15 +29673,19 @@ fn pbr_input_from_standard_material( pbr_input.material.alpha_cutoff = pbr_bindings::material.alpha_cutoff; // emissive - // TODO use .a for exposure compensation in HDR var emissive: vec4 = pbr_bindings::material.emissive; #ifdef VERTEX_UVS if ((pbr_bindings::material.flags & pbr_types::STANDARD_MATERIAL_FLAGS_EMISSIVE_TEXTURE_BIT) != 0u) { -#ifdef MESHLET_MESH_MATERIAL_PASS - emissive = vec4(emissive.rgb * textureSampleGrad(pbr_bindings::emissive_texture, pbr_bindings::emissive_sampler, uv, in.ddx_uv, in.ddy_uv).rgb, 1.0); + emissive = vec4(emissive.rgb * pbr_functions::sample_texture( + pbr_bindings::emissive_texture, + pbr_bindings::emissive_sampler, +#ifdef STANDARD_MATERIAL_EMISSIVE_UV_B + uv_b, #else - emissive = vec4(emissive.rgb * textureSampleBias(pbr_bindings::emissive_texture, pbr_bindings::emissive_sampler, uv, view.mip_bias).rgb, 1.0); + uv, #endif + bias, + ).rgb, emissive.a); } #endif pbr_input.material.emissive = emissive; @@ -3981,11 +29696,16 @@ fn pbr_input_from_standard_material( let roughness = lighting::perceptualRoughnessToRoughness(perceptual_roughness); #ifdef VERTEX_UVS if ((pbr_bindings::material.flags & pbr_types::STANDARD_MATERIAL_FLAGS_METALLIC_ROUGHNESS_TEXTURE_BIT) != 0u) { -#ifdef MESHLET_MESH_MATERIAL_PASS - let metallic_roughness = textureSampleGrad(pbr_bindings::metallic_roughness_texture, pbr_bindings::metallic_roughness_sampler, uv, in.ddx_uv, in.ddy_uv); + let metallic_roughness = pbr_functions::sample_texture( + pbr_bindings::metallic_roughness_texture, + pbr_bindings::metallic_roughness_sampler, +#ifdef STANDARD_MATERIAL_METALLIC_ROUGHNESS_UV_B + uv_b, #else - let metallic_roughness = textureSampleBias(pbr_bindings::metallic_roughness_texture, pbr_bindings::metallic_roughness_sampler, uv, view.mip_bias); + uv, #endif + bias, + ); // Sampling from GLTF standard channels for now metallic *= metallic_roughness.b; perceptual_roughness *= metallic_roughness.g; @@ -3994,46 +29714,105 @@ fn pbr_input_from_standard_material( pbr_input.material.metallic = metallic; pbr_input.material.perceptual_roughness = perceptual_roughness; + // Clearcoat factor + pbr_input.material.clearcoat = pbr_bindings::material.clearcoat; +#ifdef VERTEX_UVS +#ifdef PBR_MULTI_LAYER_MATERIAL_TEXTURES_SUPPORTED + if ((pbr_bindings::material.flags & pbr_types::STANDARD_MATERIAL_FLAGS_CLEARCOAT_TEXTURE_BIT) != 0u) { + pbr_input.material.clearcoat *= pbr_functions::sample_texture( + pbr_bindings::clearcoat_texture, + pbr_bindings::clearcoat_sampler, +#ifdef STANDARD_MATERIAL_CLEARCOAT_UV_B + uv_b, +#else + uv, +#endif + bias, + ).r; + } +#endif // PBR_MULTI_LAYER_MATERIAL_TEXTURES_SUPPORTED +#endif // VERTEX_UVS + + // Clearcoat roughness + pbr_input.material.clearcoat_perceptual_roughness = pbr_bindings::material.clearcoat_perceptual_roughness; +#ifdef VERTEX_UVS +#ifdef PBR_MULTI_LAYER_MATERIAL_TEXTURES_SUPPORTED + if ((pbr_bindings::material.flags & pbr_types::STANDARD_MATERIAL_FLAGS_CLEARCOAT_ROUGHNESS_TEXTURE_BIT) != 0u) { + pbr_input.material.clearcoat_perceptual_roughness *= pbr_functions::sample_texture( + pbr_bindings::clearcoat_roughness_texture, + pbr_bindings::clearcoat_roughness_sampler, +#ifdef STANDARD_MATERIAL_CLEARCOAT_ROUGHNESS_UV_B + uv_b, +#else + uv, +#endif + bias, + ).g; + } +#endif // PBR_MULTI_LAYER_MATERIAL_TEXTURES_SUPPORTED +#endif // VERTEX_UVS + var specular_transmission: f32 = pbr_bindings::material.specular_transmission; +#ifdef VERTEX_UVS #ifdef PBR_TRANSMISSION_TEXTURES_SUPPORTED if ((pbr_bindings::material.flags & pbr_types::STANDARD_MATERIAL_FLAGS_SPECULAR_TRANSMISSION_TEXTURE_BIT) != 0u) { -#ifdef MESHLET_MESH_MATERIAL_PASS - specular_transmission *= textureSampleGrad(pbr_bindings::specular_transmission_texture, pbr_bindings::specular_transmission_sampler, uv, in.ddx_uv, in.ddy_uv).r; + specular_transmission *= pbr_functions::sample_texture( + pbr_bindings::specular_transmission_texture, + pbr_bindings::specular_transmission_sampler, +#ifdef STANDARD_MATERIAL_SPECULAR_TRANSMISSION_UV_B + uv_b, #else - specular_transmission *= textureSampleBias(pbr_bindings::specular_transmission_texture, pbr_bindings::specular_transmission_sampler, uv, view.mip_bias).r; + uv, #endif + bias, + ).r; } +#endif #endif pbr_input.material.specular_transmission = specular_transmission; var thickness: f32 = pbr_bindings::material.thickness; +#ifdef VERTEX_UVS #ifdef PBR_TRANSMISSION_TEXTURES_SUPPORTED if ((pbr_bindings::material.flags & pbr_types::STANDARD_MATERIAL_FLAGS_THICKNESS_TEXTURE_BIT) != 0u) { -#ifdef MESHLET_MESH_MATERIAL_PASS - thickness *= textureSampleGrad(pbr_bindings::thickness_texture, pbr_bindings::thickness_sampler, uv, in.ddx_uv, in.ddy_uv).g; + thickness *= pbr_functions::sample_texture( + pbr_bindings::thickness_texture, + pbr_bindings::thickness_sampler, +#ifdef STANDARD_MATERIAL_THICKNESS_UV_B + uv_b, #else - thickness *= textureSampleBias(pbr_bindings::thickness_texture, pbr_bindings::thickness_sampler, uv, view.mip_bias).g; + uv, #endif + bias, + ).g; } +#endif #endif // scale thickness, accounting for non-uniform scaling (e.g. a “squished” mesh) // TODO: Meshlet support #ifndef MESHLET_MESH_MATERIAL_PASS thickness *= length( - (transpose(mesh[in.instance_index].model) * vec4(pbr_input.N, 0.0)).xyz + (transpose(mesh[in.instance_index].world_from_local) * vec4(pbr_input.N, 0.0)).xyz ); #endif pbr_input.material.thickness = thickness; var diffuse_transmission = pbr_bindings::material.diffuse_transmission; +#ifdef VERTEX_UVS #ifdef PBR_TRANSMISSION_TEXTURES_SUPPORTED if ((pbr_bindings::material.flags & pbr_types::STANDARD_MATERIAL_FLAGS_DIFFUSE_TRANSMISSION_TEXTURE_BIT) != 0u) { -#ifdef MESHLET_MESH_MATERIAL_PASS - diffuse_transmission *= textureSampleGrad(pbr_bindings::diffuse_transmission_texture, pbr_bindings::diffuse_transmission_sampler, uv, in.ddx_uv, in.ddy_uv).a; + diffuse_transmission *= pbr_functions::sample_texture( + pbr_bindings::diffuse_transmission_texture, + pbr_bindings::diffuse_transmission_sampler, +#ifdef STANDARD_MATERIAL_DIFFUSE_TRANSMISSION_UV_B + uv_b, #else - diffuse_transmission *= textureSampleBias(pbr_bindings::diffuse_transmission_texture, pbr_bindings::diffuse_transmission_sampler, uv, view.mip_bias).a; + uv, #endif + bias, + ).a; } +#endif #endif pbr_input.material.diffuse_transmission = diffuse_transmission; @@ -4041,11 +29820,16 @@ fn pbr_input_from_standard_material( var specular_occlusion: f32 = 1.0; #ifdef VERTEX_UVS if ((pbr_bindings::material.flags & pbr_types::STANDARD_MATERIAL_FLAGS_OCCLUSION_TEXTURE_BIT) != 0u) { -#ifdef MESHLET_MESH_MATERIAL_PASS - diffuse_occlusion = vec3(textureSampleGrad(pbr_bindings::occlusion_texture, pbr_bindings::occlusion_sampler, uv, in.ddx_uv, in.ddy_uv).r); + diffuse_occlusion *= pbr_functions::sample_texture( + pbr_bindings::occlusion_texture, + pbr_bindings::occlusion_sampler, +#ifdef STANDARD_MATERIAL_OCCLUSION_UV_B + uv_b, #else - diffuse_occlusion = vec3(textureSampleBias(pbr_bindings::occlusion_texture, pbr_bindings::occlusion_sampler, uv, view.mip_bias).r); + uv, #endif + bias, + ).r; } #endif #ifdef SCREEN_SPACE_AMBIENT_OCCLUSION @@ -4061,26 +29845,116 @@ fn pbr_input_from_standard_material( // N (normal vector) #ifndef LOAD_PREPASS_NORMALS + + pbr_input.N = normalize(pbr_input.world_normal); + pbr_input.clearcoat_N = pbr_input.N; + +#ifdef VERTEX_UVS +#ifdef VERTEX_TANGENTS + + let TBN = pbr_functions::calculate_tbn_mikktspace(pbr_input.world_normal, in.world_tangent); + +#ifdef STANDARD_MATERIAL_NORMAL_MAP + + let Nt = pbr_functions::sample_texture( + pbr_bindings::normal_map_texture, + pbr_bindings::normal_map_sampler, +#ifdef STANDARD_MATERIAL_NORMAL_MAP_UV_B + uv_b, +#else + uv, +#endif + bias, + ).rgb; + pbr_input.N = pbr_functions::apply_normal_mapping( pbr_bindings::material.flags, - pbr_input.world_normal, + TBN, double_sided, is_front, -#ifdef VERTEX_TANGENTS -#ifdef STANDARD_MATERIAL_NORMAL_MAP - in.world_tangent, -#endif -#endif -#ifdef VERTEX_UVS - uv, -#endif - view.mip_bias, -#ifdef MESHLET_MESH_MATERIAL_PASS - in.ddx_uv, - in.ddy_uv, -#endif + Nt, ); + +#endif // STANDARD_MATERIAL_NORMAL_MAP + +#ifdef STANDARD_MATERIAL_CLEARCOAT + + // Note: `KHR_materials_clearcoat` specifies that, if there's no + // clearcoat normal map, we must set the normal to the mesh's normal, + // and not to the main layer's bumped normal. + +#ifdef STANDARD_MATERIAL_CLEARCOAT_NORMAL_MAP + + let clearcoat_Nt = pbr_functions::sample_texture( + pbr_bindings::clearcoat_normal_texture, + pbr_bindings::clearcoat_normal_sampler, +#ifdef STANDARD_MATERIAL_CLEARCOAT_NORMAL_UV_B + uv_b, +#else + uv, #endif + bias, + ).rgb; + + pbr_input.clearcoat_N = pbr_functions::apply_normal_mapping( + pbr_bindings::material.flags, + TBN, + double_sided, + is_front, + clearcoat_Nt, + ); + +#endif // STANDARD_MATERIAL_CLEARCOAT_NORMAL_MAP + +#endif // STANDARD_MATERIAL_CLEARCOAT + +#endif // VERTEX_TANGENTS +#endif // VERTEX_UVS + + // Take anisotropy into account. + // + // This code comes from the `KHR_materials_anisotropy` spec: + // +#ifdef PBR_ANISOTROPY_TEXTURE_SUPPORTED +#ifdef VERTEX_TANGENTS +#ifdef STANDARD_MATERIAL_ANISOTROPY + + var anisotropy_strength = pbr_bindings::material.anisotropy_strength; + var anisotropy_direction = pbr_bindings::material.anisotropy_rotation; + + // Adjust based on the anisotropy map if there is one. + if ((pbr_bindings::material.flags & pbr_types::STANDARD_MATERIAL_FLAGS_ANISOTROPY_TEXTURE_BIT) != 0u) { + let anisotropy_texel = pbr_functions::sample_texture( + pbr_bindings::anisotropy_texture, + pbr_bindings::anisotropy_sampler, +#ifdef STANDARD_MATERIAL_ANISOTROPY_UV_B + uv_b, +#else // STANDARD_MATERIAL_ANISOTROPY_UV_B + uv, +#endif // STANDARD_MATERIAL_ANISOTROPY_UV_B + bias, + ).rgb; + + let anisotropy_direction_from_texture = normalize(anisotropy_texel.rg * 2.0 - 1.0); + // Rotate by the anisotropy direction. + anisotropy_direction = + mat2x2(anisotropy_direction.xy, anisotropy_direction.yx * vec2(-1.0, 1.0)) * + anisotropy_direction_from_texture; + anisotropy_strength *= anisotropy_texel.b; + } + + pbr_input.anisotropy_strength = anisotropy_strength; + + let anisotropy_T = normalize(TBN * vec3(anisotropy_direction, 0.0)); + let anisotropy_B = normalize(cross(pbr_input.world_normal, anisotropy_T)); + pbr_input.anisotropy_T = anisotropy_T; + pbr_input.anisotropy_B = anisotropy_B; + +#endif // STANDARD_MATERIAL_ANISOTROPY +#endif // VERTEX_TANGENTS +#endif // PBR_ANISOTROPY_TEXTURE_SUPPORTED + +#endif // LOAD_PREPASS_NORMALS // TODO: Meshlet support #ifdef LIGHTMAP @@ -4096,5158 +29970,5898 @@ fn pbr_input_from_standard_material( ``` -### crates/bevy_pbr/src/render/utils +### bevy_shaders/spatial_denoise ```rust -#define_import_path bevy_pbr::utils +// 3x3 bilaterial filter (edge-preserving blur) +// https://people.csail.mit.edu/sparis/bf_course/course_notes.pdf -#import bevy_pbr::rgb9e5 +// Note: Does not use the Gaussian kernel part of a typical bilateral blur +// From the paper: "use the information gathered on a neighborhood of 4 × 4 using a bilateral filter for +// reconstruction, using _uniform_ convolution weights" -const PI: f32 = 3.141592653589793; -const HALF_PI: f32 = 1.57079632679; -const E: f32 = 2.718281828459045; +// Note: The paper does a 4x4 (not quite centered) filter, offset by +/- 1 pixel every other frame +// XeGTAO does a 3x3 filter, on two pixels at a time per compute thread, applied twice +// We do a 3x3 filter, on 1 pixel per compute thread, applied once -fn hsv2rgb(hue: f32, saturation: f32, value: f32) -> vec3 { - let rgb = clamp( - abs( - ((hue * 6.0 + vec3(0.0, 4.0, 2.0)) % 6.0) - 3.0 - ) - 1.0, - vec3(0.0), - vec3(1.0) - ); +#import bevy_render::view::View - return value * mix(vec3(1.0), rgb, vec3(saturation)); -} +@group(0) @binding(0) var ambient_occlusion_noisy: texture_2d; +@group(0) @binding(1) var depth_differences: texture_2d; +@group(0) @binding(2) var ambient_occlusion: texture_storage_2d; +@group(1) @binding(0) var point_clamp_sampler: sampler; +@group(1) @binding(1) var view: View; -// Generates a random u32 in range [0, u32::MAX]. -// -// `state` is a mutable reference to a u32 used as the seed. -// -// Values are generated via "white noise", with no correlation between values. -// In shaders, you often want spatial and/or temporal correlation. Use a different RNG method for these use cases. -// -// https://www.pcg-random.org -// https://www.reedbeta.com/blog/hash-functions-for-gpu-rendering -fn rand_u(state: ptr) -> u32 { - *state = *state * 747796405u + 2891336453u; - let word = ((*state >> ((*state >> 28u) + 4u)) ^ *state) * 277803737u; - return (word >> 22u) ^ word; -} +@compute +@workgroup_size(8, 8, 1) +fn spatial_denoise(@builtin(global_invocation_id) global_id: vec3) { + let pixel_coordinates = vec2(global_id.xy); + let uv = vec2(pixel_coordinates) / view.viewport.zw; -// Generates a random f32 in range [0, 1.0]. -fn rand_f(state: ptr) -> f32 { - *state = *state * 747796405u + 2891336453u; - let word = ((*state >> ((*state >> 28u) + 4u)) ^ *state) * 277803737u; - return f32((word >> 22u) ^ word) * bitcast(0x2f800004u); -} + let edges0 = textureGather(0, depth_differences, point_clamp_sampler, uv); + let edges1 = textureGather(0, depth_differences, point_clamp_sampler, uv, vec2(2i, 0i)); + let edges2 = textureGather(0, depth_differences, point_clamp_sampler, uv, vec2(1i, 2i)); + let visibility0 = textureGather(0, ambient_occlusion_noisy, point_clamp_sampler, uv); + let visibility1 = textureGather(0, ambient_occlusion_noisy, point_clamp_sampler, uv, vec2(2i, 0i)); + let visibility2 = textureGather(0, ambient_occlusion_noisy, point_clamp_sampler, uv, vec2(0i, 2i)); + let visibility3 = textureGather(0, ambient_occlusion_noisy, point_clamp_sampler, uv, vec2(2i, 2i)); -// Generates a random vec2 where each value is in range [0, 1.0]. -fn rand_vec2f(state: ptr) -> vec2 { - return vec2(rand_f(state), rand_f(state)); -} + let left_edges = unpack4x8unorm(edges0.x); + let right_edges = unpack4x8unorm(edges1.x); + let top_edges = unpack4x8unorm(edges0.z); + let bottom_edges = unpack4x8unorm(edges2.w); + var center_edges = unpack4x8unorm(edges0.y); + center_edges *= vec4(left_edges.y, right_edges.x, top_edges.w, bottom_edges.z); -// Generates a random u32 in range [0, n). -fn rand_range_u(n: u32, state: ptr) -> u32 { - return rand_u(state) % n; -} + let center_weight = 1.2; + let left_weight = center_edges.x; + let right_weight = center_edges.y; + let top_weight = center_edges.z; + let bottom_weight = center_edges.w; + let top_left_weight = 0.425 * (top_weight * top_edges.x + left_weight * left_edges.z); + let top_right_weight = 0.425 * (top_weight * top_edges.y + right_weight * right_edges.z); + let bottom_left_weight = 0.425 * (bottom_weight * bottom_edges.x + left_weight * left_edges.w); + let bottom_right_weight = 0.425 * (bottom_weight * bottom_edges.y + right_weight * right_edges.w); -// returns the (0-1, 0-1) position within the given viewport for the current buffer coords . -// buffer coords can be obtained from `@builtin(position).xy`. -// the view uniform struct contains the current camera viewport in `view.viewport`. -// topleft = 0,0 -fn coords_to_viewport_uv(position: vec2, viewport: vec4) -> vec2 { - return (position - viewport.xy) / viewport.zw; -} + let center_visibility = visibility0.y; + let left_visibility = visibility0.x; + let right_visibility = visibility0.z; + let top_visibility = visibility1.x; + let bottom_visibility = visibility2.z; + let top_left_visibility = visibility0.w; + let top_right_visibility = visibility1.w; + let bottom_left_visibility = visibility2.w; + let bottom_right_visibility = visibility3.w; -// https://jcgt.org/published/0003/02/01/paper.pdf + var sum = center_visibility; + sum += left_visibility * left_weight; + sum += right_visibility * right_weight; + sum += top_visibility * top_weight; + sum += bottom_visibility * bottom_weight; + sum += top_left_visibility * top_left_weight; + sum += top_right_visibility * top_right_weight; + sum += bottom_left_visibility * bottom_left_weight; + sum += bottom_right_visibility * bottom_right_weight; -// For encoding normals or unit direction vectors as octahedral coordinates. -fn octahedral_encode(v: vec3) -> vec2 { - var n = v / (abs(v.x) + abs(v.y) + abs(v.z)); - let octahedral_wrap = (1.0 - abs(n.yx)) * select(vec2(-1.0), vec2(1.0), n.xy > vec2f(0.0)); - let n_xy = select(octahedral_wrap, n.xy, n.z >= 0.0); - return n_xy * 0.5 + 0.5; -} + var sum_weight = center_weight; + sum_weight += left_weight; + sum_weight += right_weight; + sum_weight += top_weight; + sum_weight += bottom_weight; + sum_weight += top_left_weight; + sum_weight += top_right_weight; + sum_weight += bottom_left_weight; + sum_weight += bottom_right_weight; -// For decoding normals or unit direction vectors from octahedral coordinates. -fn octahedral_decode(v: vec2) -> vec3 { - let f = v * 2.0 - 1.0; - var n = vec3(f.xy, 1.0 - abs(f.x) - abs(f.y)); - let t = saturate(-n.z); - let w = select(vec2(t), vec2(-t), n.xy >= vec2(0.0)); - n = vec3(n.xy + w, n.z); - return normalize(n); -} + let denoised_visibility = sum / sum_weight; -// https://blog.demofox.org/2022/01/01/interleaved-gradient-noise-a-different-kind-of-low-discrepancy-sequence -fn interleaved_gradient_noise(pixel_coordinates: vec2, frame: u32) -> f32 { - let xy = pixel_coordinates + 5.588238 * f32(frame % 64u); - return fract(52.9829189 * fract(0.06711056 * xy.x + 0.00583715 * xy.y)); + textureStore(ambient_occlusion, pixel_coordinates, vec4(denoised_visibility, 0.0, 0.0, 0.0)); } -// https://www.iryoku.com/next-generation-post-processing-in-call-of-duty-advanced-warfare (slides 120-135) -// TODO: Use an array here instead of a bunch of constants, once arrays work properly under DX12. -// NOTE: The names have a final underscore to avoid the following error: -// `Composable module identifiers must not require substitution according to naga writeback rules` -const SPIRAL_OFFSET_0_ = vec2(-0.7071, 0.7071); -const SPIRAL_OFFSET_1_ = vec2(-0.0000, -0.8750); -const SPIRAL_OFFSET_2_ = vec2( 0.5303, 0.5303); -const SPIRAL_OFFSET_3_ = vec2(-0.6250, -0.0000); -const SPIRAL_OFFSET_4_ = vec2( 0.3536, -0.3536); -const SPIRAL_OFFSET_5_ = vec2(-0.0000, 0.3750); -const SPIRAL_OFFSET_6_ = vec2(-0.1768, -0.1768); -const SPIRAL_OFFSET_7_ = vec2( 0.1250, 0.0000); - -``` - -### crates/bevy_pbr/src/render/mesh_types - -```rust -#define_import_path bevy_pbr::mesh_types - -struct Mesh { - // Affine 4x3 matrices transposed to 3x4 - // Use bevy_render::maths::affine3_to_square to unpack - model: mat3x4, - previous_model: mat3x4, - // 3x3 matrix packed in mat2x4 and f32 as: - // [0].xyz, [1].x, - // [1].yz, [2].xy - // [2].z - // Use bevy_pbr::mesh_functions::mat2x4_f32_to_mat3x3_unpack to unpack - inverse_transpose_model_a: mat2x4, - inverse_transpose_model_b: f32, - // 'flags' is a bit field indicating various options. u32 is 32 bits so we have up to 32 options. - flags: u32, - lightmap_uv_rect: vec2, -}; - -#ifdef SKINNED -struct SkinnedMesh { - data: array, 256u>, -}; -#endif - -#ifdef MORPH_TARGETS -struct MorphWeights { - weights: array, 16u>, // 16 = 64 / 4 (64 = MAX_MORPH_WEIGHTS) -}; -#endif - -const MESH_FLAGS_SHADOW_RECEIVER_BIT: u32 = 1u; -const MESH_FLAGS_TRANSMITTED_SHADOW_RECEIVER_BIT: u32 = 2u; -// 2^31 - if the flag is set, the sign is positive, else it is negative -const MESH_FLAGS_SIGN_DETERMINANT_MODEL_3X3_BIT: u32 = 2147483648u; - ``` -### crates/bevy_pbr/src/render/shadows +### bevy_shaders/water_material ```rust -#define_import_path bevy_pbr::shadows +// A shader that creates water ripples by overlaying 4 normal maps on top of one +// another. +// +// This is used in the `ssr` example. It only supports deferred rendering. #import bevy_pbr::{ - mesh_view_types::POINT_LIGHT_FLAGS_SPOT_LIGHT_Y_NEGATIVE, - mesh_view_bindings as view_bindings, - utils::hsv2rgb, - shadow_sampling::{SPOT_SHADOW_TEXEL_SIZE, sample_shadow_cubemap, sample_shadow_map} -} - -const flip_z: vec3 = vec3(1.0, 1.0, -1.0); - -fn fetch_point_shadow(light_id: u32, frag_position: vec4, surface_normal: vec3) -> f32 { - let light = &view_bindings::point_lights.data[light_id]; - - // because the shadow maps align with the axes and the frustum planes are at 45 degrees - // we can get the worldspace depth by taking the largest absolute axis - let surface_to_light = (*light).position_radius.xyz - frag_position.xyz; - let surface_to_light_abs = abs(surface_to_light); - let distance_to_light = max(surface_to_light_abs.x, max(surface_to_light_abs.y, surface_to_light_abs.z)); - - // The normal bias here is already scaled by the texel size at 1 world unit from the light. - // The texel size increases proportionally with distance from the light so multiplying by - // distance to light scales the normal bias to the texel size at the fragment distance. - let normal_offset = (*light).shadow_normal_bias * distance_to_light * surface_normal.xyz; - let depth_offset = (*light).shadow_depth_bias * normalize(surface_to_light.xyz); - let offset_position = frag_position.xyz + normal_offset + depth_offset; - - // similar largest-absolute-axis trick as above, but now with the offset fragment position - let frag_ls = offset_position.xyz - (*light).position_radius.xyz ; - let abs_position_ls = abs(frag_ls); - let major_axis_magnitude = max(abs_position_ls.x, max(abs_position_ls.y, abs_position_ls.z)); - - // NOTE: These simplifications come from multiplying: - // projection * vec4(0, 0, -major_axis_magnitude, 1.0) - // and keeping only the terms that have any impact on the depth. - // Projection-agnostic approach: - let zw = -major_axis_magnitude * (*light).light_custom_data.xy + (*light).light_custom_data.zw; - let depth = zw.x / zw.y; + pbr_deferred_functions::deferred_output, + pbr_fragment::pbr_input_from_standard_material, + prepass_io::{VertexOutput, FragmentOutput}, +} +#import bevy_render::globals::Globals - // Do the lookup, using HW PCF and comparison. Cubemaps assume a left-handed coordinate space, - // so we have to flip the z-axis when sampling. - return sample_shadow_cubemap(frag_ls * flip_z, distance_to_light, depth, light_id); +// Parameters to the water shader. +struct WaterSettings { + // How much to displace each octave each frame, in the u and v directions. + // Two octaves are packed into each `vec4`. + octave_vectors: array, 2>, + // How wide the waves are in each octave. + octave_scales: vec4, + // How high the waves are in each octave. + octave_strengths: vec4, } -fn fetch_spot_shadow(light_id: u32, frag_position: vec4, surface_normal: vec3) -> f32 { - let light = &view_bindings::point_lights.data[light_id]; +@group(0) @binding(1) var globals: Globals; - let surface_to_light = (*light).position_radius.xyz - frag_position.xyz; +@group(2) @binding(100) var water_normals_texture: texture_2d; +@group(2) @binding(101) var water_normals_sampler: sampler; +@group(2) @binding(102) var water_settings: WaterSettings; - // construct the light view matrix - var spot_dir = vec3((*light).light_custom_data.x, 0.0, (*light).light_custom_data.y); - // reconstruct spot dir from x/z and y-direction flag - spot_dir.y = sqrt(max(0.0, 1.0 - spot_dir.x * spot_dir.x - spot_dir.z * spot_dir.z)); - if (((*light).flags & POINT_LIGHT_FLAGS_SPOT_LIGHT_Y_NEGATIVE) != 0u) { - spot_dir.y = -spot_dir.y; - } +// Samples a single octave of noise and returns the resulting normal. +fn sample_noise_octave(uv: vec2, strength: f32) -> vec3 { + let N = textureSample(water_normals_texture, water_normals_sampler, uv).rbg * 2.0 - 1.0; + // This isn't slerp, but it's good enough. + return normalize(mix(vec3(0.0, 1.0, 0.0), N, strength)); +} - // view matrix z_axis is the reverse of transform.forward() - let fwd = -spot_dir; - let distance_to_light = dot(fwd, surface_to_light); - let offset_position = - -surface_to_light - + ((*light).shadow_depth_bias * normalize(surface_to_light)) - + (surface_normal.xyz * (*light).shadow_normal_bias) * distance_to_light; +// Samples all four octaves of noise and returns the resulting normal. +fn sample_noise(uv: vec2, time: f32) -> vec3 { + let uv0 = uv * water_settings.octave_scales[0] + water_settings.octave_vectors[0].xy * time; + let uv1 = uv * water_settings.octave_scales[1] + water_settings.octave_vectors[0].zw * time; + let uv2 = uv * water_settings.octave_scales[2] + water_settings.octave_vectors[1].xy * time; + let uv3 = uv * water_settings.octave_scales[3] + water_settings.octave_vectors[1].zw * time; + return normalize( + sample_noise_octave(uv0, water_settings.octave_strengths[0]) + + sample_noise_octave(uv1, water_settings.octave_strengths[1]) + + sample_noise_octave(uv2, water_settings.octave_strengths[2]) + + sample_noise_octave(uv3, water_settings.octave_strengths[3]) + ); +} - // the construction of the up and right vectors needs to precisely mirror the code - // in render/light.rs:spot_light_view_matrix - var sign = -1.0; - if (fwd.z >= 0.0) { - sign = 1.0; - } - let a = -1.0 / (fwd.z + sign); - let b = fwd.x * fwd.y * a; - let up_dir = vec3(1.0 + sign * fwd.x * fwd.x * a, sign * b, -sign * fwd.x); - let right_dir = vec3(-b, -sign - fwd.y * fwd.y * a, fwd.y); - let light_inv_rot = mat3x3(right_dir, up_dir, fwd); +@fragment +fn fragment(in: VertexOutput, @builtin(front_facing) is_front: bool) -> FragmentOutput { + // Create the PBR input. + var pbr_input = pbr_input_from_standard_material(in, is_front); + // Bump the normal. + pbr_input.N = sample_noise(in.uv, globals.time); + // Send the rest to the deferred shader. + return deferred_output(in, pbr_input); +} - // because the matrix is a pure rotation matrix, the inverse is just the transpose, and to calculate - // the product of the transpose with a vector we can just post-multiply instead of pre-multiplying. - // this allows us to keep the matrix construction code identical between CPU and GPU. - let projected_position = offset_position * light_inv_rot; +``` - // divide xy by perspective matrix "f" and by -projected.z (projected.z is -projection matrix's w) - // to get ndc coordinates - let f_div_minus_z = 1.0 / ((*light).spot_light_tan_angle * -projected_position.z); - let shadow_xy_ndc = projected_position.xy * f_div_minus_z; - // convert to uv coordinates - let shadow_uv = shadow_xy_ndc * vec2(0.5, -0.5) + vec2(0.5, 0.5); +### bevy_shaders/visibility_buffer_resolve - // 0.1 must match POINT_LIGHT_NEAR_Z - let depth = 0.1 / -projected_position.z; +```rust +#define_import_path bevy_pbr::meshlet_visibility_buffer_resolve - return sample_shadow_map( - shadow_uv, - depth, - i32(light_id) + view_bindings::lights.spot_light_shadowmap_offset, - SPOT_SHADOW_TEXEL_SIZE - ); +#import bevy_pbr::{ + meshlet_bindings::{ + meshlet_visibility_buffer, + meshlet_cluster_meshlet_ids, + meshlets, + meshlet_vertex_ids, + meshlet_vertex_data, + meshlet_cluster_instance_ids, + meshlet_instance_uniforms, + get_meshlet_index, + unpack_meshlet_vertex, + }, + mesh_view_bindings::view, + mesh_functions::{mesh_position_local_to_world, sign_determinant_model_3x3m}, + mesh_types::{Mesh, MESH_FLAGS_SIGN_DETERMINANT_MODEL_3X3_BIT}, + view_transformations::{position_world_to_clip, frag_coord_to_ndc}, } +#import bevy_render::maths::{affine3_to_square, mat2x4_f32_to_mat3x3_unpack} -fn get_cascade_index(light_id: u32, view_z: f32) -> u32 { - let light = &view_bindings::lights.directional_lights[light_id]; - - for (var i: u32 = 0u; i < (*light).num_cascades; i = i + 1u) { - if (-view_z < (*light).cascades[i].far_bound) { - return i; - } - } - return (*light).num_cascades; +#ifdef PREPASS_FRAGMENT +#ifdef MOTION_VECTOR_PREPASS +#import bevy_pbr::{ + prepass_bindings::previous_view_uniforms, + pbr_prepass_functions::calculate_motion_vector, } +#endif +#endif -fn sample_directional_cascade(light_id: u32, cascade_index: u32, frag_position: vec4, surface_normal: vec3) -> f32 { - let light = &view_bindings::lights.directional_lights[light_id]; - let cascade = &(*light).cascades[cascade_index]; +/// Functions to be used by materials for reading from a meshlet visibility buffer texture. - // The normal bias is scaled to the texel size. - let normal_offset = (*light).shadow_normal_bias * (*cascade).texel_size * surface_normal.xyz; - let depth_offset = (*light).shadow_depth_bias * (*light).direction_to_light.xyz; - let offset_position = vec4(frag_position.xyz + normal_offset + depth_offset, frag_position.w); +#ifdef MESHLET_MESH_MATERIAL_PASS +struct PartialDerivatives { + barycentrics: vec3, + ddx: vec3, + ddy: vec3, +} - let offset_position_clip = (*cascade).view_projection * offset_position; - if (offset_position_clip.w <= 0.0) { - return 1.0; - } - let offset_position_ndc = offset_position_clip.xyz / offset_position_clip.w; - // No shadow outside the orthographic projection volume - if (any(offset_position_ndc.xy < vec2(-1.0)) || offset_position_ndc.z < 0.0 - || any(offset_position_ndc > vec3(1.0))) { - return 1.0; - } +// https://github.com/ConfettiFX/The-Forge/blob/2d453f376ef278f66f97cbaf36c0d12e4361e275/Examples_3/Visibility_Buffer/src/Shaders/FSL/visibilityBuffer_shade.frag.fsl#L83-L139 +fn compute_partial_derivatives(vertex_clip_positions: array, 3>, ndc_uv: vec2, screen_size: vec2) -> PartialDerivatives { + var result: PartialDerivatives; - // compute texture coordinates for shadow lookup, compensating for the Y-flip difference - // between the NDC and texture coordinates - let flip_correction = vec2(0.5, -0.5); - let light_local = offset_position_ndc.xy * flip_correction + vec2(0.5, 0.5); + let inv_w = 1.0 / vec3(vertex_clip_positions[0].w, vertex_clip_positions[1].w, vertex_clip_positions[2].w); + let ndc_0 = vertex_clip_positions[0].xy * inv_w[0]; + let ndc_1 = vertex_clip_positions[1].xy * inv_w[1]; + let ndc_2 = vertex_clip_positions[2].xy * inv_w[2]; - let depth = offset_position_ndc.z; + let inv_det = 1.0 / determinant(mat2x2(ndc_2 - ndc_1, ndc_0 - ndc_1)); + result.ddx = vec3(ndc_1.y - ndc_2.y, ndc_2.y - ndc_0.y, ndc_0.y - ndc_1.y) * inv_det * inv_w; + result.ddy = vec3(ndc_2.x - ndc_1.x, ndc_0.x - ndc_2.x, ndc_1.x - ndc_0.x) * inv_det * inv_w; - let array_index = i32((*light).depth_texture_base_index + cascade_index); - return sample_shadow_map(light_local, depth, array_index, (*cascade).texel_size); -} + var ddx_sum = dot(result.ddx, vec3(1.0)); + var ddy_sum = dot(result.ddy, vec3(1.0)); -fn fetch_directional_shadow(light_id: u32, frag_position: vec4, surface_normal: vec3, view_z: f32) -> f32 { - let light = &view_bindings::lights.directional_lights[light_id]; - let cascade_index = get_cascade_index(light_id, view_z); + let delta_v = ndc_uv - ndc_0; + let interp_inv_w = inv_w.x + delta_v.x * ddx_sum + delta_v.y * ddy_sum; + let interp_w = 1.0 / interp_inv_w; - if (cascade_index >= (*light).num_cascades) { - return 1.0; - } + result.barycentrics = vec3( + interp_w * (delta_v.x * result.ddx.x + delta_v.y * result.ddy.x + inv_w.x), + interp_w * (delta_v.x * result.ddx.y + delta_v.y * result.ddy.y), + interp_w * (delta_v.x * result.ddx.z + delta_v.y * result.ddy.z), + ); - var shadow = sample_directional_cascade(light_id, cascade_index, frag_position, surface_normal); + result.ddx *= 2.0 / screen_size.x; + result.ddy *= 2.0 / screen_size.y; + ddx_sum *= 2.0 / screen_size.x; + ddy_sum *= 2.0 / screen_size.y; - // Blend with the next cascade, if there is one. - let next_cascade_index = cascade_index + 1u; - if (next_cascade_index < (*light).num_cascades) { - let this_far_bound = (*light).cascades[cascade_index].far_bound; - let next_near_bound = (1.0 - (*light).cascades_overlap_proportion) * this_far_bound; - if (-view_z >= next_near_bound) { - let next_shadow = sample_directional_cascade(light_id, next_cascade_index, frag_position, surface_normal); - shadow = mix(shadow, next_shadow, (-view_z - next_near_bound) / (this_far_bound - next_near_bound)); - } - } - return shadow; + let interp_ddx_w = 1.0 / (interp_inv_w + ddx_sum); + let interp_ddy_w = 1.0 / (interp_inv_w + ddy_sum); + + result.ddx = interp_ddx_w * (result.barycentrics * interp_inv_w + result.ddx) - result.barycentrics; + result.ddy = interp_ddy_w * (result.barycentrics * interp_inv_w + result.ddy) - result.barycentrics; + return result; } -fn cascade_debug_visualization( - output_color: vec3, - light_id: u32, - view_z: f32, -) -> vec3 { - let overlay_alpha = 0.95; - let cascade_index = get_cascade_index(light_id, view_z); - let cascade_color = hsv2rgb(f32(cascade_index) / f32(#{MAX_CASCADES_PER_LIGHT}u + 1u), 1.0, 0.5); - return vec3( - (1.0 - overlay_alpha) * output_color.rgb + overlay_alpha * cascade_color - ); +struct VertexOutput { + position: vec4, + world_position: vec4, + world_normal: vec3, + uv: vec2, + ddx_uv: vec2, + ddy_uv: vec2, + world_tangent: vec4, + mesh_flags: u32, + cluster_id: u32, +#ifdef PREPASS_FRAGMENT +#ifdef MOTION_VECTOR_PREPASS + motion_vector: vec2, +#endif +#endif } -``` +/// Load the visibility buffer texture and resolve it into a VertexOutput. +fn resolve_vertex_output(frag_coord: vec4) -> VertexOutput { + let packed_ids = textureLoad(meshlet_visibility_buffer, vec2(frag_coord.xy), 0).r; + let cluster_id = packed_ids >> 6u; + let meshlet_id = meshlet_cluster_meshlet_ids[cluster_id]; + let meshlet = meshlets[meshlet_id]; -### crates/bevy_pbr/src/render/fog + let triangle_id = extractBits(packed_ids, 0u, 6u); + let index_ids = meshlet.start_index_id + vec3(triangle_id * 3u) + vec3(0u, 1u, 2u); + let indices = meshlet.start_vertex_id + vec3(get_meshlet_index(index_ids.x), get_meshlet_index(index_ids.y), get_meshlet_index(index_ids.z)); + let vertex_ids = vec3(meshlet_vertex_ids[indices.x], meshlet_vertex_ids[indices.y], meshlet_vertex_ids[indices.z]); + let vertex_1 = unpack_meshlet_vertex(meshlet_vertex_data[vertex_ids.x]); + let vertex_2 = unpack_meshlet_vertex(meshlet_vertex_data[vertex_ids.y]); + let vertex_3 = unpack_meshlet_vertex(meshlet_vertex_data[vertex_ids.z]); -```rust -#define_import_path bevy_pbr::fog + let instance_id = meshlet_cluster_instance_ids[cluster_id]; + var instance_uniform = meshlet_instance_uniforms[instance_id]; -#import bevy_pbr::{ - mesh_view_bindings::fog, - mesh_view_types::Fog, -} + let world_from_local = affine3_to_square(instance_uniform.world_from_local); + let world_position_1 = mesh_position_local_to_world(world_from_local, vec4(vertex_1.position, 1.0)); + let world_position_2 = mesh_position_local_to_world(world_from_local, vec4(vertex_2.position, 1.0)); + let world_position_3 = mesh_position_local_to_world(world_from_local, vec4(vertex_3.position, 1.0)); -// Fog formulas adapted from: -// https://learn.microsoft.com/en-us/windows/win32/direct3d9/fog-formulas -// https://catlikecoding.com/unity/tutorials/rendering/part-14/ -// https://iquilezles.org/articles/fog/ (Atmospheric Fog and Scattering) + let clip_position_1 = position_world_to_clip(world_position_1.xyz); + let clip_position_2 = position_world_to_clip(world_position_2.xyz); + let clip_position_3 = position_world_to_clip(world_position_3.xyz); + let frag_coord_ndc = frag_coord_to_ndc(frag_coord).xy; + let partial_derivatives = compute_partial_derivatives( + array(clip_position_1, clip_position_2, clip_position_3), + frag_coord_ndc, + view.viewport.zw, + ); -fn scattering_adjusted_fog_color( - fog_params: Fog, - scattering: vec3, -) -> vec4 { - if (fog_params.directional_light_color.a > 0.0) { - return vec4( - fog_params.base_color.rgb - + scattering * fog_params.directional_light_color.rgb * fog_params.directional_light_color.a, - fog_params.base_color.a, - ); - } else { - return fog_params.base_color; - } -} + let world_position = mat3x4(world_position_1, world_position_2, world_position_3) * partial_derivatives.barycentrics; + let world_normal = mat3x3( + normal_local_to_world(vertex_1.normal, &instance_uniform), + normal_local_to_world(vertex_2.normal, &instance_uniform), + normal_local_to_world(vertex_3.normal, &instance_uniform), + ) * partial_derivatives.barycentrics; + let uv = mat3x2(vertex_1.uv, vertex_2.uv, vertex_3.uv) * partial_derivatives.barycentrics; + let ddx_uv = mat3x2(vertex_1.uv, vertex_2.uv, vertex_3.uv) * partial_derivatives.ddx; + let ddy_uv = mat3x2(vertex_1.uv, vertex_2.uv, vertex_3.uv) * partial_derivatives.ddy; + let world_tangent = mat3x4( + tangent_local_to_world(vertex_1.tangent, world_from_local, instance_uniform.flags), + tangent_local_to_world(vertex_2.tangent, world_from_local, instance_uniform.flags), + tangent_local_to_world(vertex_3.tangent, world_from_local, instance_uniform.flags), + ) * partial_derivatives.barycentrics; -fn linear_fog( - fog_params: Fog, - input_color: vec4, - distance: f32, - scattering: vec3, -) -> vec4 { - var fog_color = scattering_adjusted_fog_color(fog_params, scattering); - let start = fog_params.be.x; - let end = fog_params.be.y; - fog_color.a *= 1.0 - clamp((end - distance) / (end - start), 0.0, 1.0); - return vec4(mix(input_color.rgb, fog_color.rgb, fog_color.a), input_color.a); -} +#ifdef PREPASS_FRAGMENT +#ifdef MOTION_VECTOR_PREPASS + let previous_world_from_local = affine3_to_square(instance_uniform.previous_world_from_local); + let previous_world_position_1 = mesh_position_local_to_world(previous_world_from_local, vec4(vertex_1.position, 1.0)); + let previous_world_position_2 = mesh_position_local_to_world(previous_world_from_local, vec4(vertex_2.position, 1.0)); + let previous_world_position_3 = mesh_position_local_to_world(previous_world_from_local, vec4(vertex_3.position, 1.0)); + let previous_world_position = mat3x4(previous_world_position_1, previous_world_position_2, previous_world_position_3) * partial_derivatives.barycentrics; + let motion_vector = calculate_motion_vector(world_position, previous_world_position); +#endif +#endif -fn exponential_fog( - fog_params: Fog, - input_color: vec4, - distance: f32, - scattering: vec3, -) -> vec4 { - var fog_color = scattering_adjusted_fog_color(fog_params, scattering); - let density = fog_params.be.x; - fog_color.a *= 1.0 - 1.0 / exp(distance * density); - return vec4(mix(input_color.rgb, fog_color.rgb, fog_color.a), input_color.a); + return VertexOutput( + frag_coord, + world_position, + world_normal, + uv, + ddx_uv, + ddy_uv, + world_tangent, + instance_uniform.flags, + cluster_id, +#ifdef PREPASS_FRAGMENT +#ifdef MOTION_VECTOR_PREPASS + motion_vector, +#endif +#endif + ); } -fn exponential_squared_fog( - fog_params: Fog, - input_color: vec4, - distance: f32, - scattering: vec3, -) -> vec4 { - var fog_color = scattering_adjusted_fog_color(fog_params, scattering); - let distance_times_density = distance * fog_params.be.x; - fog_color.a *= 1.0 - 1.0 / exp(distance_times_density * distance_times_density); - return vec4(mix(input_color.rgb, fog_color.rgb, fog_color.a), input_color.a); +fn normal_local_to_world(vertex_normal: vec3, instance_uniform: ptr) -> vec3 { + if any(vertex_normal != vec3(0.0)) { + return normalize( + mat2x4_f32_to_mat3x3_unpack( + (*instance_uniform).local_from_world_transpose_a, + (*instance_uniform).local_from_world_transpose_b, + ) * vertex_normal + ); + } else { + return vertex_normal; + } } -fn atmospheric_fog( - fog_params: Fog, - input_color: vec4, - distance: f32, - scattering: vec3, -) -> vec4 { - var fog_color = scattering_adjusted_fog_color(fog_params, scattering); - let extinction_factor = 1.0 - 1.0 / exp(distance * fog_params.be); - let inscattering_factor = 1.0 - 1.0 / exp(distance * fog_params.bi); - return vec4( - input_color.rgb * (1.0 - extinction_factor * fog_color.a) - + fog_color.rgb * inscattering_factor * fog_color.a, - input_color.a - ); +fn tangent_local_to_world(vertex_tangent: vec4, world_from_local: mat4x4, mesh_flags: u32) -> vec4 { + if any(vertex_tangent != vec4(0.0)) { + return vec4( + normalize( + mat3x3( + world_from_local[0].xyz, + world_from_local[1].xyz, + world_from_local[2].xyz, + ) * vertex_tangent.xyz + ), + vertex_tangent.w * sign_determinant_model_3x3m(mesh_flags) + ); + } else { + return vertex_tangent; + } } +#endif ``` -### crates/bevy_pbr/src/render/pbr_bindings +### bevy_shaders/mesh2d_vertex_output ```rust -#define_import_path bevy_pbr::pbr_bindings - -#import bevy_pbr::pbr_types::StandardMaterial +#define_import_path bevy_sprite::mesh2d_vertex_output -@group(2) @binding(0) var material: StandardMaterial; -@group(2) @binding(1) var base_color_texture: texture_2d; -@group(2) @binding(2) var base_color_sampler: sampler; -@group(2) @binding(3) var emissive_texture: texture_2d; -@group(2) @binding(4) var emissive_sampler: sampler; -@group(2) @binding(5) var metallic_roughness_texture: texture_2d; -@group(2) @binding(6) var metallic_roughness_sampler: sampler; -@group(2) @binding(7) var occlusion_texture: texture_2d; -@group(2) @binding(8) var occlusion_sampler: sampler; -@group(2) @binding(9) var normal_map_texture: texture_2d; -@group(2) @binding(10) var normal_map_sampler: sampler; -@group(2) @binding(11) var depth_map_texture: texture_2d; -@group(2) @binding(12) var depth_map_sampler: sampler; -#ifdef PBR_TRANSMISSION_TEXTURES_SUPPORTED -@group(2) @binding(13) var specular_transmission_texture: texture_2d; -@group(2) @binding(14) var specular_transmission_sampler: sampler; -@group(2) @binding(15) var thickness_texture: texture_2d; -@group(2) @binding(16) var thickness_sampler: sampler; -@group(2) @binding(17) var diffuse_transmission_texture: texture_2d; -@group(2) @binding(18) var diffuse_transmission_sampler: sampler; -#endif +struct VertexOutput { + // this is `clip position` when the struct is used as a vertex stage output + // and `frag coord` when used as a fragment stage input + @builtin(position) position: vec4, + @location(0) world_position: vec4, + @location(1) world_normal: vec3, + @location(2) uv: vec2, + #ifdef VERTEX_TANGENTS + @location(3) world_tangent: vec4, + #endif + #ifdef VERTEX_COLORS + @location(4) color: vec4, + #endif +} ``` -### crates/bevy_pbr/src/render/pbr_types +### bevy_shaders/forward_io ```rust -#define_import_path bevy_pbr::pbr_types +#define_import_path bevy_pbr::forward_io -// Since this is a hot path, try to keep the alignment and size of the struct members in mind. -// You can find the alignment and sizes at . -struct StandardMaterial { - base_color: vec4, - emissive: vec4, - attenuation_color: vec4, - uv_transform: mat3x3, - perceptual_roughness: f32, - metallic: f32, - reflectance: f32, - diffuse_transmission: f32, - specular_transmission: f32, - thickness: f32, - ior: f32, - attenuation_distance: f32, - // 'flags' is a bit field indicating various options. u32 is 32 bits so we have up to 32 options. - flags: u32, - alpha_cutoff: f32, - parallax_depth_scale: f32, - max_parallax_layer_count: f32, - lightmap_exposure: f32, - max_relief_mapping_search_steps: u32, - /// ID for specifying which deferred lighting pass should be used for rendering this material, if any. - deferred_lighting_pass_id: u32, +struct Vertex { + @builtin(instance_index) instance_index: u32, +#ifdef VERTEX_POSITIONS + @location(0) position: vec3, +#endif +#ifdef VERTEX_NORMALS + @location(1) normal: vec3, +#endif +#ifdef VERTEX_UVS_A + @location(2) uv: vec2, +#endif +#ifdef VERTEX_UVS_B + @location(3) uv_b: vec2, +#endif +#ifdef VERTEX_TANGENTS + @location(4) tangent: vec4, +#endif +#ifdef VERTEX_COLORS + @location(5) color: vec4, +#endif +#ifdef SKINNED + @location(6) joint_indices: vec4, + @location(7) joint_weights: vec4, +#endif +#ifdef MORPH_TARGETS + @builtin(vertex_index) index: u32, +#endif }; -// !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! -// NOTE: if these flags are updated or changed. Be sure to also update -// deferred_flags_from_mesh_material_flags and mesh_material_flags_from_deferred_flags -// !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! -const STANDARD_MATERIAL_FLAGS_BASE_COLOR_TEXTURE_BIT: u32 = 1u; -const STANDARD_MATERIAL_FLAGS_EMISSIVE_TEXTURE_BIT: u32 = 2u; -const STANDARD_MATERIAL_FLAGS_METALLIC_ROUGHNESS_TEXTURE_BIT: u32 = 4u; -const STANDARD_MATERIAL_FLAGS_OCCLUSION_TEXTURE_BIT: u32 = 8u; -const STANDARD_MATERIAL_FLAGS_DOUBLE_SIDED_BIT: u32 = 16u; -const STANDARD_MATERIAL_FLAGS_UNLIT_BIT: u32 = 32u; -const STANDARD_MATERIAL_FLAGS_TWO_COMPONENT_NORMAL_MAP: u32 = 64u; -const STANDARD_MATERIAL_FLAGS_FLIP_NORMAL_MAP_Y: u32 = 128u; -const STANDARD_MATERIAL_FLAGS_FOG_ENABLED_BIT: u32 = 256u; -const STANDARD_MATERIAL_FLAGS_DEPTH_MAP_BIT: u32 = 512u; -const STANDARD_MATERIAL_FLAGS_SPECULAR_TRANSMISSION_TEXTURE_BIT: u32 = 1024u; -const STANDARD_MATERIAL_FLAGS_THICKNESS_TEXTURE_BIT: u32 = 2048u; -const STANDARD_MATERIAL_FLAGS_DIFFUSE_TRANSMISSION_TEXTURE_BIT: u32 = 4096u; -const STANDARD_MATERIAL_FLAGS_ATTENUATION_ENABLED_BIT: u32 = 8192u; -const STANDARD_MATERIAL_FLAGS_ALPHA_MODE_RESERVED_BITS: u32 = 3758096384u; // (0b111u32 << 29) -const STANDARD_MATERIAL_FLAGS_ALPHA_MODE_OPAQUE: u32 = 0u; // (0u32 << 29) -const STANDARD_MATERIAL_FLAGS_ALPHA_MODE_MASK: u32 = 536870912u; // (1u32 << 29) -const STANDARD_MATERIAL_FLAGS_ALPHA_MODE_BLEND: u32 = 1073741824u; // (2u32 << 29) -const STANDARD_MATERIAL_FLAGS_ALPHA_MODE_PREMULTIPLIED: u32 = 1610612736u; // (3u32 << 29) -const STANDARD_MATERIAL_FLAGS_ALPHA_MODE_ADD: u32 = 2147483648u; // (4u32 << 29) -const STANDARD_MATERIAL_FLAGS_ALPHA_MODE_MULTIPLY: u32 = 2684354560u; // (5u32 << 29) -const STANDARD_MATERIAL_FLAGS_ALPHA_MODE_ALPHA_TO_COVERAGE: u32 = 3221225472u; // (6u32 << 29) -// ↑ To calculate/verify the values above, use the following playground: -// https://play.rust-lang.org/?version=stable&mode=debug&edition=2021&gist=7792f8dd6fc6a8d4d0b6b1776898a7f4 - - -// Creates a StandardMaterial with default values -fn standard_material_new() -> StandardMaterial { - var material: StandardMaterial; - - // NOTE: Keep in-sync with src/pbr_material.rs! - material.base_color = vec4(1.0, 1.0, 1.0, 1.0); - material.emissive = vec4(0.0, 0.0, 0.0, 1.0); - material.perceptual_roughness = 0.5; - material.metallic = 0.00; - material.reflectance = 0.5; - material.diffuse_transmission = 0.0; - material.specular_transmission = 0.0; - material.thickness = 0.0; - material.ior = 1.5; - material.attenuation_distance = 1.0; - material.attenuation_color = vec4(1.0, 1.0, 1.0, 1.0); - material.flags = STANDARD_MATERIAL_FLAGS_ALPHA_MODE_OPAQUE; - material.alpha_cutoff = 0.5; - material.parallax_depth_scale = 0.1; - material.max_parallax_layer_count = 16.0; - material.max_relief_mapping_search_steps = 5u; - material.deferred_lighting_pass_id = 1u; - // scale 1, translation 0, rotation 0 - material.uv_transform = mat3x3(1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0); - - return material; +struct VertexOutput { + // This is `clip position` when the struct is used as a vertex stage output + // and `frag coord` when used as a fragment stage input + @builtin(position) position: vec4, + @location(0) world_position: vec4, + @location(1) world_normal: vec3, +#ifdef VERTEX_UVS_A + @location(2) uv: vec2, +#endif +#ifdef VERTEX_UVS_B + @location(3) uv_b: vec2, +#endif +#ifdef VERTEX_TANGENTS + @location(4) world_tangent: vec4, +#endif +#ifdef VERTEX_COLORS + @location(5) color: vec4, +#endif +#ifdef VERTEX_OUTPUT_INSTANCE_INDEX + @location(6) @interpolate(flat) instance_index: u32, +#endif +#ifdef VISIBILITY_RANGE_DITHER + @location(7) @interpolate(flat) visibility_range_dither: i32, +#endif } -struct PbrInput { - material: StandardMaterial, - // Note: this gets monochromized upon deferred PbrInput reconstruction. - diffuse_occlusion: vec3, - // Note: this is 1.0 (entirely unoccluded) when SSAO is off. - specular_occlusion: f32, - frag_coord: vec4, - world_position: vec4, - // Normalized world normal used for shadow mapping as normal-mapping is not used for shadow - // mapping - world_normal: vec3, - // Normalized normal-mapped world normal used for lighting - N: vec3, - // Normalized view vector in world space, pointing from the fragment world position toward the - // view world position - V: vec3, - lightmap_light: vec3, - is_orthographic: bool, - flags: u32, -}; +struct FragmentOutput { + @location(0) color: vec4, +} -// Creates a PbrInput with default values -fn pbr_input_new() -> PbrInput { - var pbr_input: PbrInput; +``` - pbr_input.material = standard_material_new(); - pbr_input.diffuse_occlusion = vec3(1.0); - // If SSAO is enabled, then this gets overwritten with proper specular occlusion. If its not, then we get specular environment map unoccluded (we have no data with which to occlude it with). - pbr_input.specular_occlusion = 1.0; +### bevy_shaders/meshlet_bindings - pbr_input.frag_coord = vec4(0.0, 0.0, 0.0, 1.0); - pbr_input.world_position = vec4(0.0, 0.0, 0.0, 1.0); - pbr_input.world_normal = vec3(0.0, 0.0, 1.0); +```rust +#define_import_path bevy_pbr::meshlet_bindings - pbr_input.is_orthographic = false; +#import bevy_pbr::mesh_types::Mesh +#import bevy_render::view::View +#import bevy_pbr::prepass_bindings::PreviousViewUniforms - pbr_input.N = vec3(0.0, 0.0, 1.0); - pbr_input.V = vec3(1.0, 0.0, 0.0); +struct PackedMeshletVertex { + a: vec4, + b: vec4, + tangent: vec4, +} - pbr_input.lightmap_light = vec3(0.0); +// TODO: Octahedral encode normal, remove tangent and derive from UV derivatives +struct MeshletVertex { + position: vec3, + normal: vec3, + uv: vec2, + tangent: vec4, +} - pbr_input.flags = 0u; +fn unpack_meshlet_vertex(packed: PackedMeshletVertex) -> MeshletVertex { + var vertex: MeshletVertex; + vertex.position = packed.a.xyz; + vertex.normal = vec3(packed.a.w, packed.b.xy); + vertex.uv = packed.b.zw; + vertex.tangent = packed.tangent; + return vertex; +} - return pbr_input; +struct Meshlet { + start_vertex_id: u32, + start_index_id: u32, + triangle_count: u32, } -``` +struct MeshletBoundingSpheres { + self_culling: MeshletBoundingSphere, + self_lod: MeshletBoundingSphere, + parent_lod: MeshletBoundingSphere, +} -### crates/bevy_pbr/src/render/mesh +struct MeshletBoundingSphere { + center: vec3, + radius: f32, +} -```rust -#import bevy_pbr::{ - mesh_functions, - skinning, - morph::morph, - forward_io::{Vertex, VertexOutput}, - view_transformations::position_world_to_clip, +struct DrawIndirectArgs { + vertex_count: atomic, + instance_count: u32, + first_vertex: u32, + first_instance: u32, } -#ifdef MORPH_TARGETS -fn morph_vertex(vertex_in: Vertex) -> Vertex { - var vertex = vertex_in; - let weight_count = bevy_pbr::morph::layer_count(); - for (var i: u32 = 0u; i < weight_count; i ++) { - let weight = bevy_pbr::morph::weight_at(i); - if weight == 0.0 { - continue; - } - vertex.position += weight * morph(vertex.index, bevy_pbr::morph::position_offset, i); -#ifdef VERTEX_NORMALS - vertex.normal += weight * morph(vertex.index, bevy_pbr::morph::normal_offset, i); -#endif -#ifdef VERTEX_TANGENTS - vertex.tangent += vec4(weight * morph(vertex.index, bevy_pbr::morph::tangent_offset, i), 0.0); +#ifdef MESHLET_FILL_CLUSTER_BUFFERS_PASS +var cluster_count: u32; +@group(0) @binding(0) var meshlet_instance_meshlet_counts_prefix_sum: array; // Per entity instance +@group(0) @binding(1) var meshlet_instance_meshlet_slice_starts: array; // Per entity instance +@group(0) @binding(2) var meshlet_cluster_instance_ids: array; // Per cluster +@group(0) @binding(3) var meshlet_cluster_meshlet_ids: array; // Per cluster #endif - } - return vertex; + +#ifdef MESHLET_CULLING_PASS +@group(0) @binding(0) var meshlet_cluster_meshlet_ids: array; // Per cluster +@group(0) @binding(1) var meshlet_bounding_spheres: array; // Per meshlet +@group(0) @binding(2) var meshlet_cluster_instance_ids: array; // Per cluster +@group(0) @binding(3) var meshlet_instance_uniforms: array; // Per entity instance +@group(0) @binding(4) var meshlet_view_instance_visibility: array; // 1 bit per entity instance, packed as a bitmask +@group(0) @binding(5) var meshlet_second_pass_candidates: array>; // 1 bit per cluster , packed as a bitmask +@group(0) @binding(6) var meshlets: array; // Per meshlet +@group(0) @binding(7) var draw_indirect_args: DrawIndirectArgs; // Single object shared between all workgroups/meshlets/triangles +@group(0) @binding(8) var draw_triangle_buffer: array; // Single object shared between all workgroups/meshlets/triangles +@group(0) @binding(9) var depth_pyramid: texture_2d; // From the end of the last frame for the first culling pass, and from the first raster pass for the second culling pass +@group(0) @binding(10) var view: View; +@group(0) @binding(11) var previous_view: PreviousViewUniforms; + +fn should_cull_instance(instance_id: u32) -> bool { + let bit_offset = instance_id % 32u; + let packed_visibility = meshlet_view_instance_visibility[instance_id / 32u]; + return bool(extractBits(packed_visibility, bit_offset, 1u)); +} + +fn cluster_is_second_pass_candidate(cluster_id: u32) -> bool { + let packed_candidates = meshlet_second_pass_candidates[cluster_id / 32u]; + let bit_offset = cluster_id % 32u; + return bool(extractBits(packed_candidates, bit_offset, 1u)); } #endif -@vertex -fn vertex(vertex_no_morph: Vertex) -> VertexOutput { - var out: VertexOutput; +#ifdef MESHLET_VISIBILITY_BUFFER_RASTER_PASS +@group(0) @binding(0) var meshlet_cluster_meshlet_ids: array; // Per cluster +@group(0) @binding(1) var meshlets: array; // Per meshlet +@group(0) @binding(2) var meshlet_indices: array; // Many per meshlet +@group(0) @binding(3) var meshlet_vertex_ids: array; // Many per meshlet +@group(0) @binding(4) var meshlet_vertex_data: array; // Many per meshlet +@group(0) @binding(5) var meshlet_cluster_instance_ids: array; // Per cluster +@group(0) @binding(6) var meshlet_instance_uniforms: array; // Per entity instance +@group(0) @binding(7) var meshlet_instance_material_ids: array; // Per entity instance +@group(0) @binding(8) var draw_triangle_buffer: array; // Single object shared between all workgroups/meshlets/triangles +@group(0) @binding(9) var view: View; -#ifdef MORPH_TARGETS - var vertex = morph_vertex(vertex_no_morph); -#else - var vertex = vertex_no_morph; +fn get_meshlet_index(index_id: u32) -> u32 { + let packed_index = meshlet_indices[index_id / 4u]; + let bit_offset = (index_id % 4u) * 8u; + return extractBits(packed_index, bit_offset, 8u); +} #endif -#ifdef SKINNED - var model = skinning::skin_model(vertex.joint_indices, vertex.joint_weights); -#else - // Use vertex_no_morph.instance_index instead of vertex.instance_index to work around a wgpu dx12 bug. - // See https://github.com/gfx-rs/naga/issues/2416 . - var model = mesh_functions::get_model_matrix(vertex_no_morph.instance_index); -#endif +#ifdef MESHLET_MESH_MATERIAL_PASS +@group(1) @binding(0) var meshlet_visibility_buffer: texture_2d; // Generated from the meshlet raster passes +@group(1) @binding(1) var meshlet_cluster_meshlet_ids: array; // Per cluster +@group(1) @binding(2) var meshlets: array; // Per meshlet +@group(1) @binding(3) var meshlet_indices: array; // Many per meshlet +@group(1) @binding(4) var meshlet_vertex_ids: array; // Many per meshlet +@group(1) @binding(5) var meshlet_vertex_data: array; // Many per meshlet +@group(1) @binding(6) var meshlet_cluster_instance_ids: array; // Per cluster +@group(1) @binding(7) var meshlet_instance_uniforms: array; // Per entity instance -#ifdef VERTEX_NORMALS -#ifdef SKINNED - out.world_normal = skinning::skin_normals(model, vertex.normal); -#else - out.world_normal = mesh_functions::mesh_normal_local_to_world( - vertex.normal, - // Use vertex_no_morph.instance_index instead of vertex.instance_index to work around a wgpu dx12 bug. - // See https://github.com/gfx-rs/naga/issues/2416 - vertex_no_morph.instance_index - ); -#endif +fn get_meshlet_index(index_id: u32) -> u32 { + let packed_index = meshlet_indices[index_id / 4u]; + let bit_offset = (index_id % 4u) * 8u; + return extractBits(packed_index, bit_offset, 8u); +} #endif -#ifdef VERTEX_POSITIONS - out.world_position = mesh_functions::mesh_position_local_to_world(model, vec4(vertex.position, 1.0)); - out.position = position_world_to_clip(out.world_position.xyz); -#endif +``` -#ifdef VERTEX_UVS - out.uv = vertex.uv; -#endif +### bevy_shaders/mesh_view_types -#ifdef VERTEX_UVS_B - out.uv_b = vertex.uv_b; -#endif +```rust +#define_import_path bevy_pbr::mesh_view_types -#ifdef VERTEX_TANGENTS - out.world_tangent = mesh_functions::mesh_tangent_local_to_world( - model, - vertex.tangent, - // Use vertex_no_morph.instance_index instead of vertex.instance_index to work around a wgpu dx12 bug. - // See https://github.com/gfx-rs/naga/issues/2416 - vertex_no_morph.instance_index - ); -#endif +struct ClusterableObject { + // For point lights: the lower-right 2x2 values of the projection matrix [2][2] [2][3] [3][2] [3][3] + // For spot lights: the direction (x,z), spot_scale and spot_offset + light_custom_data: vec4, + color_inverse_square_range: vec4, + position_radius: vec4, + // 'flags' is a bit field indicating various options. u32 is 32 bits so we have up to 32 options. + flags: u32, + shadow_depth_bias: f32, + shadow_normal_bias: f32, + spot_light_tan_angle: f32, +}; -#ifdef VERTEX_COLORS - out.color = vertex.color; -#endif +const POINT_LIGHT_FLAGS_SHADOWS_ENABLED_BIT: u32 = 1u; +const POINT_LIGHT_FLAGS_SPOT_LIGHT_Y_NEGATIVE: u32 = 2u; -#ifdef VERTEX_OUTPUT_INSTANCE_INDEX - // Use vertex_no_morph.instance_index instead of vertex.instance_index to work around a wgpu dx12 bug. - // See https://github.com/gfx-rs/naga/issues/2416 - out.instance_index = vertex_no_morph.instance_index; +struct DirectionalCascade { + clip_from_world: mat4x4, + texel_size: f32, + far_bound: f32, +} + +struct DirectionalLight { + cascades: array, + color: vec4, + direction_to_light: vec3, + // 'flags' is a bit field indicating various options. u32 is 32 bits so we have up to 32 options. + flags: u32, + shadow_depth_bias: f32, + shadow_normal_bias: f32, + num_cascades: u32, + cascades_overlap_proportion: f32, + depth_texture_base_index: u32, + skip: u32, +}; + +const DIRECTIONAL_LIGHT_FLAGS_SHADOWS_ENABLED_BIT: u32 = 1u; +const DIRECTIONAL_LIGHT_FLAGS_VOLUMETRIC_BIT: u32 = 2u; + +struct Lights { + // NOTE: this array size must be kept in sync with the constants defined in bevy_pbr/src/render/light.rs + directional_lights: array, + ambient_color: vec4, + // x/y/z dimensions and n_clusters in w + cluster_dimensions: vec4, + // xy are vec2(cluster_dimensions.xy) / vec2(view.width, view.height) + // + // For perspective projections: + // z is cluster_dimensions.z / log(far / near) + // w is cluster_dimensions.z * log(near) / log(far / near) + // + // For orthographic projections: + // NOTE: near and far are +ve but -z is infront of the camera + // z is -near + // w is cluster_dimensions.z / (-far - -near) + cluster_factors: vec4, + n_directional_lights: u32, + spot_light_shadowmap_offset: i32, + environment_map_smallest_specular_mip_level: u32, + environment_map_intensity: f32, +}; + +struct Fog { + base_color: vec4, + directional_light_color: vec4, + // `be` and `bi` are allocated differently depending on the fog mode + // + // For Linear Fog: + // be.x = start, be.y = end + // For Exponential and ExponentialSquared Fog: + // be.x = density + // For Atmospheric Fog: + // be = per-channel extinction density + // bi = per-channel inscattering density + be: vec3, + directional_light_exponent: f32, + bi: vec3, + mode: u32, +} + +// Important: These must be kept in sync with `fog.rs` +const FOG_MODE_OFF: u32 = 0u; +const FOG_MODE_LINEAR: u32 = 1u; +const FOG_MODE_EXPONENTIAL: u32 = 2u; +const FOG_MODE_EXPONENTIAL_SQUARED: u32 = 3u; +const FOG_MODE_ATMOSPHERIC: u32 = 4u; + +#if AVAILABLE_STORAGE_BUFFER_BINDINGS >= 3 +struct ClusterableObjects { + data: array, +}; +struct ClusterLightIndexLists { + data: array, +}; +struct ClusterOffsetsAndCounts { + data: array>, +}; +#else +struct ClusterableObjects { + data: array, +}; +struct ClusterLightIndexLists { + // each u32 contains 4 u8 indices into the ClusterableObjects array + data: array, 1024u>, +}; +struct ClusterOffsetsAndCounts { + // each u32 contains a 24-bit index into ClusterLightIndexLists in the high 24 bits + // and an 8-bit count of the number of lights in the low 8 bits + data: array, 1024u>, +}; #endif - return out; -} +struct LightProbe { + // This is stored as the transpose in order to save space in this structure. + // It'll be transposed in the `environment_map_light` function. + light_from_world_transposed: mat3x4, + cubemap_index: i32, + intensity: f32, +}; -@fragment -fn fragment( - mesh: VertexOutput, -) -> @location(0) vec4 { -#ifdef VERTEX_COLORS - return mesh.color; -#else - return vec4(1.0, 0.0, 1.0, 1.0); -#endif -} +struct LightProbes { + // This must match `MAX_VIEW_REFLECTION_PROBES` on the Rust side. + reflection_probes: array, + irradiance_volumes: array, + reflection_probe_count: i32, + irradiance_volume_count: i32, + // The index of the view environment map cubemap binding, or -1 if there's + // no such cubemap. + view_cubemap_index: i32, + // The smallest valid mipmap level for the specular environment cubemap + // associated with the view. + smallest_specular_mip_level_for_view: u32, + // The intensity of the environment map associated with the view. + intensity_for_view: f32, +}; + +// Settings for screen space reflections. +// +// For more information on these settings, see the documentation for +// `bevy_pbr::ssr::ScreenSpaceReflectionsSettings`. +struct ScreenSpaceReflectionsSettings { + perceptual_roughness_threshold: f32, + thickness: f32, + linear_steps: u32, + linear_march_exponent: f32, + bisection_steps: u32, + use_secant: u32, +}; ``` -### crates/bevy_pbr/src/render/pbr_ambient +### bevy_shaders/custom_material_import ```rust -#define_import_path bevy_pbr::ambient +// this is made available to the importing module +const COLOR_MULTIPLIER: vec4 = vec4(1.0, 1.0, 1.0, 0.5); -#import bevy_pbr::{ - lighting::{EnvBRDFApprox, F_AB}, - mesh_view_bindings::lights, -} +``` -// A precomputed `NdotV` is provided because it is computed regardless, -// but `world_normal` and the view vector `V` are provided separately for more advanced uses. -fn ambient_light( - world_position: vec4, - world_normal: vec3, - V: vec3, - NdotV: f32, - diffuse_color: vec3, - specular_color: vec3, - perceptual_roughness: f32, - occlusion: vec3, -) -> vec3 { - let diffuse_ambient = EnvBRDFApprox(diffuse_color, F_AB(1.0, NdotV)); - let specular_ambient = EnvBRDFApprox(specular_color, F_AB(perceptual_roughness, NdotV)); +### bevy_shaders/copy_material_depth - // No real world material has specular values under 0.02, so we use this range as a - // "pre-baked specular occlusion" that extinguishes the fresnel term, for artistic control. - // See: https://google.github.io/filament/Filament.html#specularocclusion - let specular_occlusion = saturate(dot(specular_color, vec3(50.0 * 0.33))); +```rust +#import bevy_core_pipeline::fullscreen_vertex_shader::FullscreenVertexOutput - return (diffuse_ambient + specular_ambient * specular_occlusion) * lights.ambient_color.rgb * occlusion; +@group(0) @binding(0) var material_depth: texture_2d; + +/// This pass copies the R16Uint material depth texture to an actual Depth16Unorm depth texture. + +@fragment +fn copy_material_depth(in: FullscreenVertexOutput) -> @builtin(frag_depth) f32 { + return f32(textureLoad(material_depth, vec2(in.position.xy), 0).r) / 65535.0; } ``` -### crates/bevy_pbr/src/render/mesh_bindings +### bevy_shaders/lut_bindings ```rust -#define_import_path bevy_pbr::mesh_bindings +#define_import_path bevy_core_pipeline::tonemapping_lut_bindings -#import bevy_pbr::mesh_types::Mesh +@group(0) @binding(#TONEMAPPING_LUT_TEXTURE_BINDING_INDEX) var dt_lut_texture: texture_3d; +@group(0) @binding(#TONEMAPPING_LUT_SAMPLER_BINDING_INDEX) var dt_lut_sampler: sampler; -#ifdef PER_OBJECT_BUFFER_BATCH_SIZE -@group(1) @binding(0) var mesh: array; -#else -@group(1) @binding(0) var mesh: array; -#endif // PER_OBJECT_BUFFER_BATCH_SIZE ``` -### crates/bevy_pbr/src/render/pbr_prepass_functions +### bevy_shaders/game_of_life ```rust -#define_import_path bevy_pbr::pbr_prepass_functions +// The shader reads the previous frame's state from the `input` texture, and writes the new state of +// each pixel to the `output` texture. The textures are flipped each step to progress the +// simulation. +// Two textures are needed for the game of life as each pixel of step N depends on the state of its +// neighbors at step N-1. -#import bevy_pbr::{ - prepass_io::VertexOutput, - prepass_bindings::previous_view_uniforms, - mesh_view_bindings::view, - pbr_bindings, - pbr_types, -} +@group(0) @binding(0) var input: texture_storage_2d; -// Cutoff used for the premultiplied alpha modes BLEND, ADD, and ALPHA_TO_COVERAGE. -const PREMULTIPLIED_ALPHA_CUTOFF = 0.05; +@group(0) @binding(1) var output: texture_storage_2d; -// We can use a simplified version of alpha_discard() here since we only need to handle the alpha_cutoff -fn prepass_alpha_discard(in: VertexOutput) { +fn hash(value: u32) -> u32 { + var state = value; + state = state ^ 2747636419u; + state = state * 2654435769u; + state = state ^ state >> 16u; + state = state * 2654435769u; + state = state ^ state >> 16u; + state = state * 2654435769u; + return state; +} -#ifdef MAY_DISCARD - var output_color: vec4 = pbr_bindings::material.base_color; +fn randomFloat(value: u32) -> f32 { + return f32(hash(value)) / 4294967295.0; +} -#ifdef VERTEX_UVS - let uv_transform = pbr_bindings::material.uv_transform; - let uv = (uv_transform * vec3(in.uv, 1.0)).xy; - if (pbr_bindings::material.flags & pbr_types::STANDARD_MATERIAL_FLAGS_BASE_COLOR_TEXTURE_BIT) != 0u { - output_color = output_color * textureSampleBias(pbr_bindings::base_color_texture, pbr_bindings::base_color_sampler, uv, view.mip_bias); - } -#endif // VERTEX_UVS +@compute @workgroup_size(8, 8, 1) +fn init(@builtin(global_invocation_id) invocation_id: vec3, @builtin(num_workgroups) num_workgroups: vec3) { + let location = vec2(i32(invocation_id.x), i32(invocation_id.y)); - let alpha_mode = pbr_bindings::material.flags & pbr_types::STANDARD_MATERIAL_FLAGS_ALPHA_MODE_RESERVED_BITS; - if alpha_mode == pbr_types::STANDARD_MATERIAL_FLAGS_ALPHA_MODE_MASK { - if output_color.a < pbr_bindings::material.alpha_cutoff { - discard; - } - } else if (alpha_mode == pbr_types::STANDARD_MATERIAL_FLAGS_ALPHA_MODE_BLEND || - alpha_mode == pbr_types::STANDARD_MATERIAL_FLAGS_ALPHA_MODE_ADD || - alpha_mode == pbr_types::STANDARD_MATERIAL_FLAGS_ALPHA_MODE_ALPHA_TO_COVERAGE) { - if output_color.a < PREMULTIPLIED_ALPHA_CUTOFF { - discard; - } - } else if alpha_mode == pbr_types::STANDARD_MATERIAL_FLAGS_ALPHA_MODE_PREMULTIPLIED { - if all(output_color < vec4(PREMULTIPLIED_ALPHA_CUTOFF)) { - discard; - } - } + let randomNumber = randomFloat(invocation_id.y << 16u | invocation_id.x); + let alive = randomNumber > 0.9; + let color = vec4(f32(alive)); -#endif // MAY_DISCARD + textureStore(output, location, color); } -#ifdef MOTION_VECTOR_PREPASS -fn calculate_motion_vector(world_position: vec4, previous_world_position: vec4) -> vec2 { - let clip_position_t = view.unjittered_view_proj * world_position; - let clip_position = clip_position_t.xy / clip_position_t.w; - let previous_clip_position_t = previous_view_uniforms.view_proj * previous_world_position; - let previous_clip_position = previous_clip_position_t.xy / previous_clip_position_t.w; - // These motion vectors are used as offsets to UV positions and are stored - // in the range -1,1 to allow offsetting from the one corner to the - // diagonally-opposite corner in UV coordinates, in either direction. - // A difference between diagonally-opposite corners of clip space is in the - // range -2,2, so this needs to be scaled by 0.5. And the V direction goes - // down where clip space y goes up, so y needs to be flipped. - return (clip_position - previous_clip_position) * vec2(0.5, -0.5); +fn is_alive(location: vec2, offset_x: i32, offset_y: i32) -> i32 { + let value: vec4 = textureLoad(input, location + vec2(offset_x, offset_y)); + return i32(value.x); } -#endif // MOTION_VECTOR_PREPASS -``` +fn count_alive(location: vec2) -> i32 { + return is_alive(location, -1, -1) + + is_alive(location, -1, 0) + + is_alive(location, -1, 1) + + is_alive(location, 0, -1) + + is_alive(location, 0, 1) + + is_alive(location, 1, -1) + + is_alive(location, 1, 0) + + is_alive(location, 1, 1); +} -### crates/bevy_pbr/src/render/shadow_sampling +@compute @workgroup_size(8, 8, 1) +fn update(@builtin(global_invocation_id) invocation_id: vec3) { + let location = vec2(i32(invocation_id.x), i32(invocation_id.y)); -```rust -#define_import_path bevy_pbr::shadow_sampling + let n_alive = count_alive(location); -#import bevy_pbr::{ - mesh_view_bindings as view_bindings, - utils::{PI, interleaved_gradient_noise}, - utils, -} -#import bevy_render::maths::orthonormalize + var alive: bool; + if (n_alive == 3) { + alive = true; + } else if (n_alive == 2) { + let currently_alive = is_alive(location, 0, 0); + alive = bool(currently_alive); + } else { + alive = false; + } + let color = vec4(f32(alive)); -// Do the lookup, using HW 2x2 PCF and comparison -fn sample_shadow_map_hardware(light_local: vec2, depth: f32, array_index: i32) -> f32 { -#ifdef NO_ARRAY_TEXTURES_SUPPORT - return textureSampleCompare( - view_bindings::directional_shadow_textures, - view_bindings::directional_shadow_textures_sampler, - light_local, - depth, - ); -#else - return textureSampleCompareLevel( - view_bindings::directional_shadow_textures, - view_bindings::directional_shadow_textures_sampler, - light_local, - array_index, - depth, - ); -#endif + textureStore(output, location, color); } -// Numbers determined by trial and error that gave nice results. -const SPOT_SHADOW_TEXEL_SIZE: f32 = 0.0134277345; -const POINT_SHADOW_SCALE: f32 = 0.003; -const POINT_SHADOW_TEMPORAL_OFFSET_SCALE: f32 = 0.5; - -// These are the standard MSAA sample point positions from D3D. They were chosen -// to get a reasonable distribution that's not too regular. -// -// https://learn.microsoft.com/en-us/windows/win32/api/d3d11/ne-d3d11-d3d11_standard_multisample_quality_levels?redirectedfrom=MSDN -const D3D_SAMPLE_POINT_POSITIONS: array, 8> = array( - vec2( 0.125, -0.375), - vec2(-0.125, 0.375), - vec2( 0.625, 0.125), - vec2(-0.375, -0.625), - vec2(-0.625, 0.625), - vec2(-0.875, -0.125), - vec2( 0.375, 0.875), - vec2( 0.875, -0.875), -); +``` -// And these are the coefficients corresponding to the probability distribution -// function of a 2D Gaussian lobe with zero mean and the identity covariance -// matrix at those points. -const D3D_SAMPLE_POINT_COEFFS: array = array( - 0.157112, - 0.157112, - 0.138651, - 0.130251, - 0.114946, - 0.114946, - 0.107982, - 0.079001, -); +### bevy_shaders/fallback_image_test -// https://web.archive.org/web/20230210095515/http://the-witness.net/news/2013/09/shadow-mapping-summary-part-1 -fn sample_shadow_map_castano_thirteen(light_local: vec2, depth: f32, array_index: i32) -> f32 { - let shadow_map_size = vec2(textureDimensions(view_bindings::directional_shadow_textures)); - let inv_shadow_map_size = 1.0 / shadow_map_size; +```rust +#import bevy_pbr::forward_io::VertexOutput - let uv = light_local * shadow_map_size; - var base_uv = floor(uv + 0.5); - let s = (uv.x + 0.5 - base_uv.x); - let t = (uv.y + 0.5 - base_uv.y); - base_uv -= 0.5; - base_uv *= inv_shadow_map_size; +@group(2) @binding(0) var test_texture_1d: texture_1d; +@group(2) @binding(1) var test_texture_1d_sampler: sampler; - let uw0 = (4.0 - 3.0 * s); - let uw1 = 7.0; - let uw2 = (1.0 + 3.0 * s); +@group(2) @binding(2) var test_texture_2d: texture_2d; +@group(2) @binding(3) var test_texture_2d_sampler: sampler; - let u0 = (3.0 - 2.0 * s) / uw0 - 2.0; - let u1 = (3.0 + s) / uw1; - let u2 = s / uw2 + 2.0; +@group(2) @binding(4) var test_texture_2d_array: texture_2d_array; +@group(2) @binding(5) var test_texture_2d_array_sampler: sampler; - let vw0 = (4.0 - 3.0 * t); - let vw1 = 7.0; - let vw2 = (1.0 + 3.0 * t); +@group(2) @binding(6) var test_texture_cube: texture_cube; +@group(2) @binding(7) var test_texture_cube_sampler: sampler; - let v0 = (3.0 - 2.0 * t) / vw0 - 2.0; - let v1 = (3.0 + t) / vw1; - let v2 = t / vw2 + 2.0; +@group(2) @binding(8) var test_texture_cube_array: texture_cube_array; +@group(2) @binding(9) var test_texture_cube_array_sampler: sampler; - var sum = 0.0; +@group(2) @binding(10) var test_texture_3d: texture_3d; +@group(2) @binding(11) var test_texture_3d_sampler: sampler; - sum += uw0 * vw0 * sample_shadow_map_hardware(base_uv + (vec2(u0, v0) * inv_shadow_map_size), depth, array_index); - sum += uw1 * vw0 * sample_shadow_map_hardware(base_uv + (vec2(u1, v0) * inv_shadow_map_size), depth, array_index); - sum += uw2 * vw0 * sample_shadow_map_hardware(base_uv + (vec2(u2, v0) * inv_shadow_map_size), depth, array_index); +@fragment +fn fragment(in: VertexOutput) {} - sum += uw0 * vw1 * sample_shadow_map_hardware(base_uv + (vec2(u0, v1) * inv_shadow_map_size), depth, array_index); - sum += uw1 * vw1 * sample_shadow_map_hardware(base_uv + (vec2(u1, v1) * inv_shadow_map_size), depth, array_index); - sum += uw2 * vw1 * sample_shadow_map_hardware(base_uv + (vec2(u2, v1) * inv_shadow_map_size), depth, array_index); +``` - sum += uw0 * vw2 * sample_shadow_map_hardware(base_uv + (vec2(u0, v2) * inv_shadow_map_size), depth, array_index); - sum += uw1 * vw2 * sample_shadow_map_hardware(base_uv + (vec2(u1, v2) * inv_shadow_map_size), depth, array_index); - sum += uw2 * vw2 * sample_shadow_map_hardware(base_uv + (vec2(u2, v2) * inv_shadow_map_size), depth, array_index); +### bevy_shaders/pbr_ambient - return sum * (1.0 / 144.0); -} +```rust +#define_import_path bevy_pbr::ambient -fn map(min1: f32, max1: f32, min2: f32, max2: f32, value: f32) -> f32 { - return min2 + (value - min1) * (max2 - min2) / (max1 - min1); +#import bevy_pbr::{ + lighting::{EnvBRDFApprox, F_AB}, + mesh_view_bindings::lights, } -// Creates a random rotation matrix using interleaved gradient noise. -// -// See: https://www.iryoku.com/next-generation-post-processing-in-call-of-duty-advanced-warfare/ -fn random_rotation_matrix(scale: vec2) -> mat2x2 { - let random_angle = 2.0 * PI * interleaved_gradient_noise( - scale, view_bindings::globals.frame_count); - let m = vec2(sin(random_angle), cos(random_angle)); - return mat2x2( - m.y, -m.x, - m.x, m.y - ); -} +// A precomputed `NdotV` is provided because it is computed regardless, +// but `world_normal` and the view vector `V` are provided separately for more advanced uses. +fn ambient_light( + world_position: vec4, + world_normal: vec3, + V: vec3, + NdotV: f32, + diffuse_color: vec3, + specular_color: vec3, + perceptual_roughness: f32, + occlusion: vec3, +) -> vec3 { + let diffuse_ambient = EnvBRDFApprox(diffuse_color, F_AB(1.0, NdotV)); + let specular_ambient = EnvBRDFApprox(specular_color, F_AB(perceptual_roughness, NdotV)); -fn sample_shadow_map_jimenez_fourteen(light_local: vec2, depth: f32, array_index: i32, texel_size: f32) -> f32 { - let shadow_map_size = vec2(textureDimensions(view_bindings::directional_shadow_textures)); - let rotation_matrix = random_rotation_matrix(light_local * shadow_map_size); + // No real world material has specular values under 0.02, so we use this range as a + // "pre-baked specular occlusion" that extinguishes the fresnel term, for artistic control. + // See: https://google.github.io/filament/Filament.html#specularocclusion + let specular_occlusion = saturate(dot(specular_color, vec3(50.0 * 0.33))); - // Empirically chosen fudge factor to make PCF look better across different CSM cascades - let f = map(0.00390625, 0.022949219, 0.015, 0.035, texel_size); - let uv_offset_scale = f / (texel_size * shadow_map_size); + return (diffuse_ambient + specular_ambient * specular_occlusion) * lights.ambient_color.rgb * occlusion; +} - // https://www.iryoku.com/next-generation-post-processing-in-call-of-duty-advanced-warfare (slides 120-135) - let sample_offset0 = (rotation_matrix * utils::SPIRAL_OFFSET_0_) * uv_offset_scale; - let sample_offset1 = (rotation_matrix * utils::SPIRAL_OFFSET_1_) * uv_offset_scale; - let sample_offset2 = (rotation_matrix * utils::SPIRAL_OFFSET_2_) * uv_offset_scale; - let sample_offset3 = (rotation_matrix * utils::SPIRAL_OFFSET_3_) * uv_offset_scale; - let sample_offset4 = (rotation_matrix * utils::SPIRAL_OFFSET_4_) * uv_offset_scale; - let sample_offset5 = (rotation_matrix * utils::SPIRAL_OFFSET_5_) * uv_offset_scale; - let sample_offset6 = (rotation_matrix * utils::SPIRAL_OFFSET_6_) * uv_offset_scale; - let sample_offset7 = (rotation_matrix * utils::SPIRAL_OFFSET_7_) * uv_offset_scale; +``` - var sum = 0.0; - sum += sample_shadow_map_hardware(light_local + sample_offset0, depth, array_index); - sum += sample_shadow_map_hardware(light_local + sample_offset1, depth, array_index); - sum += sample_shadow_map_hardware(light_local + sample_offset2, depth, array_index); - sum += sample_shadow_map_hardware(light_local + sample_offset3, depth, array_index); - sum += sample_shadow_map_hardware(light_local + sample_offset4, depth, array_index); - sum += sample_shadow_map_hardware(light_local + sample_offset5, depth, array_index); - sum += sample_shadow_map_hardware(light_local + sample_offset6, depth, array_index); - sum += sample_shadow_map_hardware(light_local + sample_offset7, depth, array_index); - return sum / 8.0; +### bevy_shaders/mesh + +```rust +#import bevy_pbr::{ + mesh_functions, + skinning, + morph::morph, + forward_io::{Vertex, VertexOutput}, + view_transformations::position_world_to_clip, } -fn sample_shadow_map(light_local: vec2, depth: f32, array_index: i32, texel_size: f32) -> f32 { -#ifdef SHADOW_FILTER_METHOD_GAUSSIAN - return sample_shadow_map_castano_thirteen(light_local, depth, array_index); -#else ifdef SHADOW_FILTER_METHOD_TEMPORAL - return sample_shadow_map_jimenez_fourteen(light_local, depth, array_index, texel_size); -#else ifdef SHADOW_FILTER_METHOD_HARDWARE_2X2 - return sample_shadow_map_hardware(light_local, depth, array_index); -#else - // This needs a default return value to avoid shader compilation errors if it's compiled with no SHADOW_FILTER_METHOD_* defined. - // (eg. if the normal prepass is enabled it ends up compiling this due to the normal prepass depending on pbr_functions, which depends on shadows) - // This should never actually get used, as anyone using bevy's lighting/shadows should always have a SHADOW_FILTER_METHOD defined. - // Set to 0 to make it obvious that something is wrong. - return 0.0; +#ifdef MORPH_TARGETS +fn morph_vertex(vertex_in: Vertex) -> Vertex { + var vertex = vertex_in; + let weight_count = bevy_pbr::morph::layer_count(); + for (var i: u32 = 0u; i < weight_count; i ++) { + let weight = bevy_pbr::morph::weight_at(i); + if weight == 0.0 { + continue; + } + vertex.position += weight * morph(vertex.index, bevy_pbr::morph::position_offset, i); +#ifdef VERTEX_NORMALS + vertex.normal += weight * morph(vertex.index, bevy_pbr::morph::normal_offset, i); +#endif +#ifdef VERTEX_TANGENTS + vertex.tangent += vec4(weight * morph(vertex.index, bevy_pbr::morph::tangent_offset, i), 0.0); #endif + } + return vertex; } +#endif -// NOTE: Due to the non-uniform control flow in `shadows::fetch_point_shadow`, -// we must use the Level variant of textureSampleCompare to avoid undefined -// behavior due to some of the fragments in a quad (2x2 fragments) being -// processed not being sampled, and this messing with mip-mapping functionality. -// The shadow maps have no mipmaps so Level just samples from LOD 0. -fn sample_shadow_cubemap_hardware(light_local: vec3, depth: f32, light_id: u32) -> f32 { -#ifdef NO_CUBE_ARRAY_TEXTURES_SUPPORT - return textureSampleCompare(view_bindings::point_shadow_textures, view_bindings::point_shadow_textures_sampler, light_local, depth); +@vertex +fn vertex(vertex_no_morph: Vertex) -> VertexOutput { + var out: VertexOutput; + +#ifdef MORPH_TARGETS + var vertex = morph_vertex(vertex_no_morph); #else - return textureSampleCompareLevel(view_bindings::point_shadow_textures, view_bindings::point_shadow_textures_sampler, light_local, i32(light_id), depth); + var vertex = vertex_no_morph; #endif -} - -fn sample_shadow_cubemap_at_offset( - position: vec2, - coeff: f32, - x_basis: vec3, - y_basis: vec3, - light_local: vec3, - depth: f32, - light_id: u32, -) -> f32 { - return sample_shadow_cubemap_hardware( - light_local + position.x * x_basis + position.y * y_basis, - depth, - light_id - ) * coeff; -} -// This more or less does what Castano13 does, but in 3D space. Castano13 is -// essentially an optimized 2D Gaussian filter that takes advantage of the -// bilinear filtering hardware to reduce the number of samples needed. This -// trick doesn't apply to cubemaps, so we manually apply a Gaussian filter over -// the standard 8xMSAA pattern instead. -fn sample_shadow_cubemap_gaussian( - light_local: vec3, - depth: f32, - scale: f32, - distance_to_light: f32, - light_id: u32, -) -> f32 { - // Create an orthonormal basis so we can apply a 2D sampling pattern to a - // cubemap. - var up = vec3(0.0, 1.0, 0.0); - if (dot(up, normalize(light_local)) > 0.99) { - up = vec3(1.0, 0.0, 0.0); // Avoid creating a degenerate basis. - } - let basis = orthonormalize(light_local, up) * scale * distance_to_light; +#ifdef SKINNED + var world_from_local = skinning::skin_model(vertex.joint_indices, vertex.joint_weights); +#else + // Use vertex_no_morph.instance_index instead of vertex.instance_index to work around a wgpu dx12 bug. + // See https://github.com/gfx-rs/naga/issues/2416 . + var world_from_local = mesh_functions::get_world_from_local(vertex_no_morph.instance_index); +#endif - var sum: f32 = 0.0; - sum += sample_shadow_cubemap_at_offset( - D3D_SAMPLE_POINT_POSITIONS[0], D3D_SAMPLE_POINT_COEFFS[0], - basis[0], basis[1], light_local, depth, light_id); - sum += sample_shadow_cubemap_at_offset( - D3D_SAMPLE_POINT_POSITIONS[1], D3D_SAMPLE_POINT_COEFFS[1], - basis[0], basis[1], light_local, depth, light_id); - sum += sample_shadow_cubemap_at_offset( - D3D_SAMPLE_POINT_POSITIONS[2], D3D_SAMPLE_POINT_COEFFS[2], - basis[0], basis[1], light_local, depth, light_id); - sum += sample_shadow_cubemap_at_offset( - D3D_SAMPLE_POINT_POSITIONS[3], D3D_SAMPLE_POINT_COEFFS[3], - basis[0], basis[1], light_local, depth, light_id); - sum += sample_shadow_cubemap_at_offset( - D3D_SAMPLE_POINT_POSITIONS[4], D3D_SAMPLE_POINT_COEFFS[4], - basis[0], basis[1], light_local, depth, light_id); - sum += sample_shadow_cubemap_at_offset( - D3D_SAMPLE_POINT_POSITIONS[5], D3D_SAMPLE_POINT_COEFFS[5], - basis[0], basis[1], light_local, depth, light_id); - sum += sample_shadow_cubemap_at_offset( - D3D_SAMPLE_POINT_POSITIONS[6], D3D_SAMPLE_POINT_COEFFS[6], - basis[0], basis[1], light_local, depth, light_id); - sum += sample_shadow_cubemap_at_offset( - D3D_SAMPLE_POINT_POSITIONS[7], D3D_SAMPLE_POINT_COEFFS[7], - basis[0], basis[1], light_local, depth, light_id); - return sum; -} +#ifdef VERTEX_NORMALS +#ifdef SKINNED + out.world_normal = skinning::skin_normals(world_from_local, vertex.normal); +#else + out.world_normal = mesh_functions::mesh_normal_local_to_world( + vertex.normal, + // Use vertex_no_morph.instance_index instead of vertex.instance_index to work around a wgpu dx12 bug. + // See https://github.com/gfx-rs/naga/issues/2416 + vertex_no_morph.instance_index + ); +#endif +#endif -// This is a port of the Jimenez14 filter above to the 3D space. It jitters the -// points in the spiral pattern after first creating a 2D orthonormal basis -// along the principal light direction. -fn sample_shadow_cubemap_temporal( - light_local: vec3, - depth: f32, - scale: f32, - distance_to_light: f32, - light_id: u32, -) -> f32 { - // Create an orthonormal basis so we can apply a 2D sampling pattern to a - // cubemap. - var up = vec3(0.0, 1.0, 0.0); - if (dot(up, normalize(light_local)) > 0.99) { - up = vec3(1.0, 0.0, 0.0); // Avoid creating a degenerate basis. - } - let basis = orthonormalize(light_local, up) * scale * distance_to_light; +#ifdef VERTEX_POSITIONS + out.world_position = mesh_functions::mesh_position_local_to_world(world_from_local, vec4(vertex.position, 1.0)); + out.position = position_world_to_clip(out.world_position.xyz); +#endif - let rotation_matrix = random_rotation_matrix(vec2(1.0)); +#ifdef VERTEX_UVS_A + out.uv = vertex.uv; +#endif +#ifdef VERTEX_UVS_B + out.uv_b = vertex.uv_b; +#endif - let sample_offset0 = rotation_matrix * utils::SPIRAL_OFFSET_0_ * - POINT_SHADOW_TEMPORAL_OFFSET_SCALE; - let sample_offset1 = rotation_matrix * utils::SPIRAL_OFFSET_1_ * - POINT_SHADOW_TEMPORAL_OFFSET_SCALE; - let sample_offset2 = rotation_matrix * utils::SPIRAL_OFFSET_2_ * - POINT_SHADOW_TEMPORAL_OFFSET_SCALE; - let sample_offset3 = rotation_matrix * utils::SPIRAL_OFFSET_3_ * - POINT_SHADOW_TEMPORAL_OFFSET_SCALE; - let sample_offset4 = rotation_matrix * utils::SPIRAL_OFFSET_4_ * - POINT_SHADOW_TEMPORAL_OFFSET_SCALE; - let sample_offset5 = rotation_matrix * utils::SPIRAL_OFFSET_5_ * - POINT_SHADOW_TEMPORAL_OFFSET_SCALE; - let sample_offset6 = rotation_matrix * utils::SPIRAL_OFFSET_6_ * - POINT_SHADOW_TEMPORAL_OFFSET_SCALE; - let sample_offset7 = rotation_matrix * utils::SPIRAL_OFFSET_7_ * - POINT_SHADOW_TEMPORAL_OFFSET_SCALE; +#ifdef VERTEX_TANGENTS + out.world_tangent = mesh_functions::mesh_tangent_local_to_world( + world_from_local, + vertex.tangent, + // Use vertex_no_morph.instance_index instead of vertex.instance_index to work around a wgpu dx12 bug. + // See https://github.com/gfx-rs/naga/issues/2416 + vertex_no_morph.instance_index + ); +#endif - var sum: f32 = 0.0; - sum += sample_shadow_cubemap_at_offset( - sample_offset0, 0.125, basis[0], basis[1], light_local, depth, light_id); - sum += sample_shadow_cubemap_at_offset( - sample_offset1, 0.125, basis[0], basis[1], light_local, depth, light_id); - sum += sample_shadow_cubemap_at_offset( - sample_offset2, 0.125, basis[0], basis[1], light_local, depth, light_id); - sum += sample_shadow_cubemap_at_offset( - sample_offset3, 0.125, basis[0], basis[1], light_local, depth, light_id); - sum += sample_shadow_cubemap_at_offset( - sample_offset4, 0.125, basis[0], basis[1], light_local, depth, light_id); - sum += sample_shadow_cubemap_at_offset( - sample_offset5, 0.125, basis[0], basis[1], light_local, depth, light_id); - sum += sample_shadow_cubemap_at_offset( - sample_offset6, 0.125, basis[0], basis[1], light_local, depth, light_id); - sum += sample_shadow_cubemap_at_offset( - sample_offset7, 0.125, basis[0], basis[1], light_local, depth, light_id); - return sum; +#ifdef VERTEX_COLORS + out.color = vertex.color; +#endif + +#ifdef VERTEX_OUTPUT_INSTANCE_INDEX + // Use vertex_no_morph.instance_index instead of vertex.instance_index to work around a wgpu dx12 bug. + // See https://github.com/gfx-rs/naga/issues/2416 + out.instance_index = vertex_no_morph.instance_index; +#endif + +#ifdef VISIBILITY_RANGE_DITHER + out.visibility_range_dither = mesh_functions::get_visibility_range_dither_level( + vertex_no_morph.instance_index, world_from_local[3]); +#endif + + return out; } -fn sample_shadow_cubemap( - light_local: vec3, - distance_to_light: f32, - depth: f32, - light_id: u32, -) -> f32 { -#ifdef SHADOW_FILTER_METHOD_GAUSSIAN - return sample_shadow_cubemap_gaussian( - light_local, depth, POINT_SHADOW_SCALE, distance_to_light, light_id); -#else ifdef SHADOW_FILTER_METHOD_TEMPORAL - return sample_shadow_cubemap_temporal( - light_local, depth, POINT_SHADOW_SCALE, distance_to_light, light_id); -#else ifdef SHADOW_FILTER_METHOD_HARDWARE_2X2 - return sample_shadow_cubemap_hardware(light_local, depth, light_id); +@fragment +fn fragment( + mesh: VertexOutput, +) -> @location(0) vec4 { +#ifdef VERTEX_COLORS + return mesh.color; #else - // This needs a default return value to avoid shader compilation errors if it's compiled with no SHADOW_FILTER_METHOD_* defined. - // (eg. if the normal prepass is enabled it ends up compiling this due to the normal prepass depending on pbr_functions, which depends on shadows) - // This should never actually get used, as anyone using bevy's lighting/shadows should always have a SHADOW_FILTER_METHOD defined. - // Set to 0 to make it obvious that something is wrong. - return 0.0; + return vec4(1.0, 0.0, 1.0, 1.0); #endif } ``` -### crates/bevy_pbr/src/render/mesh_view_types +### bevy_shaders/prepass ```rust -#define_import_path bevy_pbr::mesh_view_types - -struct PointLight { - // For point lights: the lower-right 2x2 values of the projection matrix [2][2] [2][3] [3][2] [3][3] - // For spot lights: the direction (x,z), spot_scale and spot_offset - light_custom_data: vec4, - color_inverse_square_range: vec4, - position_radius: vec4, - // 'flags' is a bit field indicating various options. u32 is 32 bits so we have up to 32 options. - flags: u32, - shadow_depth_bias: f32, - shadow_normal_bias: f32, - spot_light_tan_angle: f32, -}; - -const POINT_LIGHT_FLAGS_SHADOWS_ENABLED_BIT: u32 = 1u; -const POINT_LIGHT_FLAGS_SPOT_LIGHT_Y_NEGATIVE: u32 = 2u; - -struct DirectionalCascade { - view_projection: mat4x4, - texel_size: f32, - far_bound: f32, +#import bevy_pbr::{ + prepass_bindings, + mesh_functions, + prepass_io::{Vertex, VertexOutput, FragmentOutput}, + skinning, + morph, + mesh_view_bindings::view, + view_transformations::position_world_to_clip, } -struct DirectionalLight { - cascades: array, - color: vec4, - direction_to_light: vec3, - // 'flags' is a bit field indicating various options. u32 is 32 bits so we have up to 32 options. - flags: u32, - shadow_depth_bias: f32, - shadow_normal_bias: f32, - num_cascades: u32, - cascades_overlap_proportion: f32, - depth_texture_base_index: u32, - render_layers: u32, -}; - -const DIRECTIONAL_LIGHT_FLAGS_SHADOWS_ENABLED_BIT: u32 = 1u; +#ifdef DEFERRED_PREPASS +#import bevy_pbr::rgb9e5 +#endif -struct Lights { - // NOTE: this array size must be kept in sync with the constants defined in bevy_pbr/src/render/light.rs - directional_lights: array, - ambient_color: vec4, - // x/y/z dimensions and n_clusters in w - cluster_dimensions: vec4, - // xy are vec2(cluster_dimensions.xy) / vec2(view.width, view.height) - // - // For perspective projections: - // z is cluster_dimensions.z / log(far / near) - // w is cluster_dimensions.z * log(near) / log(far / near) - // - // For orthographic projections: - // NOTE: near and far are +ve but -z is infront of the camera - // z is -near - // w is cluster_dimensions.z / (-far - -near) - cluster_factors: vec4, - n_directional_lights: u32, - spot_light_shadowmap_offset: i32, - environment_map_smallest_specular_mip_level: u32, - environment_map_intensity: f32, -}; +#ifdef MORPH_TARGETS +fn morph_vertex(vertex_in: Vertex) -> Vertex { + var vertex = vertex_in; + let weight_count = morph::layer_count(); + for (var i: u32 = 0u; i < weight_count; i ++) { + let weight = morph::weight_at(i); + if weight == 0.0 { + continue; + } + vertex.position += weight * morph::morph(vertex.index, morph::position_offset, i); +#ifdef VERTEX_NORMALS + vertex.normal += weight * morph::morph(vertex.index, morph::normal_offset, i); +#endif +#ifdef VERTEX_TANGENTS + vertex.tangent += vec4(weight * morph::morph(vertex.index, morph::tangent_offset, i), 0.0); +#endif + } + return vertex; +} -struct Fog { - base_color: vec4, - directional_light_color: vec4, - // `be` and `bi` are allocated differently depending on the fog mode - // - // For Linear Fog: - // be.x = start, be.y = end - // For Exponential and ExponentialSquared Fog: - // be.x = density - // For Atmospheric Fog: - // be = per-channel extinction density - // bi = per-channel inscattering density - be: vec3, - directional_light_exponent: f32, - bi: vec3, - mode: u32, +// Returns the morphed position of the given vertex from the previous frame. +// +// This function is used for motion vector calculation, and, as such, it doesn't +// bother morphing the normals and tangents. +fn morph_prev_vertex(vertex_in: Vertex) -> Vertex { + var vertex = vertex_in; + let weight_count = morph::layer_count(); + for (var i: u32 = 0u; i < weight_count; i ++) { + let weight = morph::prev_weight_at(i); + if weight == 0.0 { + continue; + } + vertex.position += weight * morph::morph(vertex.index, morph::position_offset, i); + // Don't bother morphing normals and tangents; we don't need them for + // motion vector calculation. + } + return vertex; } +#endif // MORPH_TARGETS -// Important: These must be kept in sync with `fog.rs` -const FOG_MODE_OFF: u32 = 0u; -const FOG_MODE_LINEAR: u32 = 1u; -const FOG_MODE_EXPONENTIAL: u32 = 2u; -const FOG_MODE_EXPONENTIAL_SQUARED: u32 = 3u; -const FOG_MODE_ATMOSPHERIC: u32 = 4u; +@vertex +fn vertex(vertex_no_morph: Vertex) -> VertexOutput { + var out: VertexOutput; -#if AVAILABLE_STORAGE_BUFFER_BINDINGS >= 3 -struct PointLights { - data: array, -}; -struct ClusterLightIndexLists { - data: array, -}; -struct ClusterOffsetsAndCounts { - data: array>, -}; +#ifdef MORPH_TARGETS + var vertex = morph_vertex(vertex_no_morph); #else -struct PointLights { - data: array, -}; -struct ClusterLightIndexLists { - // each u32 contains 4 u8 indices into the PointLights array - data: array, 1024u>, -}; -struct ClusterOffsetsAndCounts { - // each u32 contains a 24-bit index into ClusterLightIndexLists in the high 24 bits - // and an 8-bit count of the number of lights in the low 8 bits - data: array, 1024u>, -}; + var vertex = vertex_no_morph; #endif -struct LightProbe { - // This is stored as the transpose in order to save space in this structure. - // It'll be transposed in the `environment_map_light` function. - inverse_transpose_transform: mat3x4, - cubemap_index: i32, - intensity: f32, -}; +#ifdef SKINNED + var world_from_local = skinning::skin_model(vertex.joint_indices, vertex.joint_weights); +#else // SKINNED + // Use vertex_no_morph.instance_index instead of vertex.instance_index to work around a wgpu dx12 bug. + // See https://github.com/gfx-rs/naga/issues/2416 + var world_from_local = mesh_functions::get_world_from_local(vertex_no_morph.instance_index); +#endif // SKINNED -struct LightProbes { - // This must match `MAX_VIEW_REFLECTION_PROBES` on the Rust side. - reflection_probes: array, - irradiance_volumes: array, - reflection_probe_count: i32, - irradiance_volume_count: i32, - // The index of the view environment map cubemap binding, or -1 if there's - // no such cubemap. - view_cubemap_index: i32, - // The smallest valid mipmap level for the specular environment cubemap - // associated with the view. - smallest_specular_mip_level_for_view: u32, - // The intensity of the environment map associated with the view. - intensity_for_view: f32, -}; + out.world_position = mesh_functions::mesh_position_local_to_world(world_from_local, vec4(vertex.position, 1.0)); + out.position = position_world_to_clip(out.world_position.xyz); +#ifdef DEPTH_CLAMP_ORTHO + out.clip_position_unclamped = out.position; + out.position.z = min(out.position.z, 1.0); +#endif // DEPTH_CLAMP_ORTHO -``` +#ifdef VERTEX_UVS_A + out.uv = vertex.uv; +#endif // VERTEX_UVS_A -### crates/bevy_pbr/src/render/forward_io +#ifdef VERTEX_UVS_B + out.uv_b = vertex.uv_b; +#endif // VERTEX_UVS_B -```rust -#define_import_path bevy_pbr::forward_io +#ifdef NORMAL_PREPASS_OR_DEFERRED_PREPASS +#ifdef SKINNED + out.world_normal = skinning::skin_normals(world_from_local, vertex.normal); +#else // SKINNED + out.world_normal = mesh_functions::mesh_normal_local_to_world( + vertex.normal, + // Use vertex_no_morph.instance_index instead of vertex.instance_index to work around a wgpu dx12 bug. + // See https://github.com/gfx-rs/naga/issues/2416 + vertex_no_morph.instance_index + ); +#endif // SKINNED -struct Vertex { - @builtin(instance_index) instance_index: u32, -#ifdef VERTEX_POSITIONS - @location(0) position: vec3, -#endif -#ifdef VERTEX_NORMALS - @location(1) normal: vec3, -#endif -#ifdef VERTEX_UVS - @location(2) uv: vec2, -#endif -#ifdef VERTEX_UVS_B - @location(3) uv_b: vec2, -#endif #ifdef VERTEX_TANGENTS - @location(4) tangent: vec4, -#endif + out.world_tangent = mesh_functions::mesh_tangent_local_to_world( + world_from_local, + vertex.tangent, + // Use vertex_no_morph.instance_index instead of vertex.instance_index to work around a wgpu dx12 bug. + // See https://github.com/gfx-rs/naga/issues/2416 + vertex_no_morph.instance_index + ); +#endif // VERTEX_TANGENTS +#endif // NORMAL_PREPASS_OR_DEFERRED_PREPASS + #ifdef VERTEX_COLORS - @location(5) color: vec4, -#endif -#ifdef SKINNED - @location(6) joint_indices: vec4, - @location(7) joint_weights: vec4, + out.color = vertex.color; #endif + + // Compute the motion vector for TAA among other purposes. For this we need + // to know where the vertex was last frame. +#ifdef MOTION_VECTOR_PREPASS + + // Take morph targets into account. #ifdef MORPH_TARGETS - @builtin(vertex_index) index: u32, -#endif -}; -struct VertexOutput { - // This is `clip position` when the struct is used as a vertex stage output - // and `frag coord` when used as a fragment stage input - @builtin(position) position: vec4, - @location(0) world_position: vec4, - @location(1) world_normal: vec3, -#ifdef VERTEX_UVS - @location(2) uv: vec2, -#endif -#ifdef VERTEX_UVS_B - @location(3) uv_b: vec2, -#endif -#ifdef VERTEX_TANGENTS - @location(4) world_tangent: vec4, -#endif -#ifdef VERTEX_COLORS - @location(5) color: vec4, -#endif +#ifdef HAS_PREVIOUS_MORPH + let prev_vertex = morph_prev_vertex(vertex_no_morph); +#else // HAS_PREVIOUS_MORPH + let prev_vertex = vertex_no_morph; +#endif // HAS_PREVIOUS_MORPH + +#else // MORPH_TARGETS + let prev_vertex = vertex_no_morph; +#endif // MORPH_TARGETS + + // Take skinning into account. +#ifdef SKINNED + +#ifdef HAS_PREVIOUS_SKIN + let prev_model = skinning::skin_prev_model( + prev_vertex.joint_indices, + prev_vertex.joint_weights, + ); +#else // HAS_PREVIOUS_SKIN + let prev_model = mesh_functions::get_previous_world_from_local(prev_vertex.instance_index); +#endif // HAS_PREVIOUS_SKIN + +#else // SKINNED + let prev_model = mesh_functions::get_previous_world_from_local(prev_vertex.instance_index); +#endif // SKINNED + + out.previous_world_position = mesh_functions::mesh_position_local_to_world( + prev_model, + vec4(prev_vertex.position, 1.0) + ); +#endif // MOTION_VECTOR_PREPASS + #ifdef VERTEX_OUTPUT_INSTANCE_INDEX - @location(6) @interpolate(flat) instance_index: u32, + // Use vertex_no_morph.instance_index instead of vertex.instance_index to work around a wgpu dx12 bug. + // See https://github.com/gfx-rs/naga/issues/2416 + out.instance_index = vertex_no_morph.instance_index; #endif + + return out; } -struct FragmentOutput { - @location(0) color: vec4, +#ifdef PREPASS_FRAGMENT +@fragment +fn fragment(in: VertexOutput) -> FragmentOutput { + var out: FragmentOutput; + +#ifdef NORMAL_PREPASS + out.normal = vec4(in.world_normal * 0.5 + vec3(0.5), 1.0); +#endif + +#ifdef DEPTH_CLAMP_ORTHO + out.frag_depth = in.clip_position_unclamped.z; +#endif // DEPTH_CLAMP_ORTHO + +#ifdef MOTION_VECTOR_PREPASS + let clip_position_t = view.unjittered_clip_from_world * in.world_position; + let clip_position = clip_position_t.xy / clip_position_t.w; + let previous_clip_position_t = prepass_bindings::previous_view_uniforms.clip_from_world * in.previous_world_position; + let previous_clip_position = previous_clip_position_t.xy / previous_clip_position_t.w; + // These motion vectors are used as offsets to UV positions and are stored + // in the range -1,1 to allow offsetting from the one corner to the + // diagonally-opposite corner in UV coordinates, in either direction. + // A difference between diagonally-opposite corners of clip space is in the + // range -2,2, so this needs to be scaled by 0.5. And the V direction goes + // down where clip space y goes up, so y needs to be flipped. + out.motion_vector = (clip_position - previous_clip_position) * vec2(0.5, -0.5); +#endif // MOTION_VECTOR_PREPASS + +#ifdef DEFERRED_PREPASS + // There isn't any material info available for this default prepass shader so we are just writing  + // emissive magenta out to the deferred gbuffer to be rendered by the first deferred lighting pass layer. + // This is here so if the default prepass fragment is used for deferred magenta will be rendered, and also + // as an example to show that a user could write to the deferred gbuffer if they were to start from this shader. + out.deferred = vec4(0u, bevy_pbr::rgb9e5::vec3_to_rgb9e5_(vec3(1.0, 0.0, 1.0)), 0u, 0u); + out.deferred_lighting_pass_id = 1u; +#endif + + return out; } +#endif // PREPASS_FRAGMENT ``` -### crates/bevy_pbr/src/render/rgb9e5 +### bevy_shaders/maths ```rust -#define_import_path bevy_pbr::rgb9e5 +#define_import_path bevy_render::maths -const RGB9E5_EXPONENT_BITS = 5u; -const RGB9E5_MANTISSA_BITS = 9; -const RGB9E5_MANTISSA_BITSU = 9u; -const RGB9E5_EXP_BIAS = 15; -const RGB9E5_MAX_VALID_BIASED_EXP = 31u; +const PI: f32 = 3.141592653589793; // π +const PI_2: f32 = 6.283185307179586; // 2π +const HALF_PI: f32 = 1.57079632679; // π/2 +const FRAC_PI_3: f32 = 1.0471975512; // π/3 +const E: f32 = 2.718281828459045; // exp(1) -//#define MAX_RGB9E5_EXP (RGB9E5_MAX_VALID_BIASED_EXP - RGB9E5_EXP_BIAS) -//#define RGB9E5_MANTISSA_VALUES (1<) -> mat3x3 { + return mat3x3( + vec3(affine[0].xy, 0.0), + vec3(affine[1].xy, 0.0), + vec3(affine[2].xy, 1.0), + ); +} -const MAX_RGB9E5_EXP = 16u; -const RGB9E5_MANTISSA_VALUES = 512; -const MAX_RGB9E5_MANTISSA = 511; -const MAX_RGB9E5_MANTISSAU = 511u; -const MAX_RGB9E5_ = 65408.0; -const EPSILON_RGB9E5_ = 0.000000059604645; +fn affine3_to_square(affine: mat3x4) -> mat4x4 { + return transpose(mat4x4( + affine[0], + affine[1], + affine[2], + vec4(0.0, 0.0, 0.0, 1.0), + )); +} -fn floor_log2_(x: f32) -> i32 { - let f = bitcast(x); - let biasedexponent = (f & 0x7F800000u) >> 23u; - return i32(biasedexponent) - 127; +fn mat2x4_f32_to_mat3x3_unpack( + a: mat2x4, + b: f32, +) -> mat3x3 { + return mat3x3( + a[0].xyz, + vec3(a[0].w, a[1].xy), + vec3(a[1].zw, b), + ); } -// https://www.khronos.org/registry/OpenGL/extensions/EXT/EXT_texture_shared_exponent.txt -fn vec3_to_rgb9e5_(rgb_in: vec3) -> u32 { - let rgb = clamp(rgb_in, vec3(0.0), vec3(MAX_RGB9E5_)); +// Extracts the square portion of an affine matrix: i.e. discards the +// translation. +fn affine3_to_mat3x3(affine: mat4x3) -> mat3x3 { + return mat3x3(affine[0].xyz, affine[1].xyz, affine[2].xyz); +} - let maxrgb = max(rgb.r, max(rgb.g, rgb.b)); - var exp_shared = max(-RGB9E5_EXP_BIAS - 1, floor_log2_(maxrgb)) + 1 + RGB9E5_EXP_BIAS; - var denom = exp2(f32(exp_shared - RGB9E5_EXP_BIAS - RGB9E5_MANTISSA_BITS)); +// Returns the inverse of a 3x3 matrix. +fn inverse_mat3x3(matrix: mat3x3) -> mat3x3 { + let tmp0 = cross(matrix[1], matrix[2]); + let tmp1 = cross(matrix[2], matrix[0]); + let tmp2 = cross(matrix[0], matrix[1]); + let inv_det = 1.0 / dot(matrix[2], tmp2); + return transpose(mat3x3(tmp0 * inv_det, tmp1 * inv_det, tmp2 * inv_det)); +} - let maxm = i32(floor(maxrgb / denom + 0.5)); - if (maxm == RGB9E5_MANTISSA_VALUES) { - denom *= 2.0; - exp_shared += 1; - } +// Returns the inverse of an affine matrix. +// +// https://en.wikipedia.org/wiki/Affine_transformation#Groups +fn inverse_affine3(affine: mat4x3) -> mat4x3 { + let matrix3 = affine3_to_mat3x3(affine); + let inv_matrix3 = inverse_mat3x3(matrix3); + return mat4x3(inv_matrix3[0], inv_matrix3[1], inv_matrix3[2], -(inv_matrix3 * affine[3])); +} - let n = vec3(floor(rgb / denom + 0.5)); - - return (u32(exp_shared) << 27u) | (n.b << 18u) | (n.g << 9u) | (n.r << 0u); +// Extracts the upper 3x3 portion of a 4x4 matrix. +fn mat4x4_to_mat3x3(m: mat4x4) -> mat3x3 { + return mat3x3(m[0].xyz, m[1].xyz, m[2].xyz); } -// Builtin extractBits() is not working on WEBGL or DX12 -// DX12: HLSL: Unimplemented("write_expr_math ExtractBits") -fn extract_bits(value: u32, offset: u32, bits: u32) -> u32 { - let mask = (1u << bits) - 1u; - return (value >> offset) & mask; +// Creates an orthonormal basis given a Z vector and an up vector (which becomes +// Y after orthonormalization). +// +// The results are equivalent to the Gram-Schmidt process [1]. +// +// [1]: https://math.stackexchange.com/a/1849294 +fn orthonormalize(z_unnormalized: vec3, up: vec3) -> mat3x3 { + let z_basis = normalize(z_unnormalized); + let x_basis = normalize(cross(z_basis, up)); + let y_basis = cross(z_basis, x_basis); + return mat3x3(x_basis, y_basis, z_basis); } -fn rgb9e5_to_vec3_(v: u32) -> vec3 { - let exponent = i32(extract_bits(v, 27u, RGB9E5_EXPONENT_BITS)) - RGB9E5_EXP_BIAS - RGB9E5_MANTISSA_BITS; - let scale = exp2(f32(exponent)); +// Returns true if any part of a sphere is on the positive side of a plane. +// +// `sphere_center.w` should be 1.0. +// +// This is used for frustum culling. +fn sphere_intersects_plane_half_space( + plane: vec4, + sphere_center: vec4, + sphere_radius: f32 +) -> bool { + return dot(plane, sphere_center) + sphere_radius > 0.0; +} - return vec3( - f32(extract_bits(v, 0u, RGB9E5_MANTISSA_BITSU)), - f32(extract_bits(v, 9u, RGB9E5_MANTISSA_BITSU)), - f32(extract_bits(v, 18u, RGB9E5_MANTISSA_BITSU)) - ) * scale; +// pow() but safe for NaNs/negatives +fn powsafe(color: vec3, power: f32) -> vec3 { + return pow(abs(color), vec3(power)) * sign(color); +} + +``` + +### bevy_shaders/wireframe2d + +```rust +#import bevy_sprite::mesh2d_vertex_output::VertexOutput + +struct WireframeMaterial { + color: vec4, +}; + +@group(2) @binding(0) var material: WireframeMaterial; +@fragment +fn fragment(in: VertexOutput) -> @location(0) vec4 { + return material.color; } ``` -### crates/bevy_pbr/src/render/morph +### bevy_shaders/wireframe ```rust -#define_import_path bevy_pbr::morph - -#ifdef MORPH_TARGETS - -#import bevy_pbr::mesh_types::MorphWeights; - -@group(1) @binding(2) var morph_weights: MorphWeights; -@group(1) @binding(3) var morph_targets: texture_3d; +#import bevy_pbr::forward_io::VertexOutput -// NOTE: Those are the "hardcoded" values found in `MorphAttributes` struct -// in crates/bevy_render/src/mesh/morph/visitors.rs -// In an ideal world, the offsets are established dynamically and passed as #defines -// to the shader, but it's out of scope for the initial implementation of morph targets. -const position_offset: u32 = 0u; -const normal_offset: u32 = 3u; -const tangent_offset: u32 = 6u; -const total_component_count: u32 = 9u; +struct WireframeMaterial { + color: vec4, +}; -fn layer_count() -> u32 { - let dimensions = textureDimensions(morph_targets); - return u32(dimensions.z); -} -fn component_texture_coord(vertex_index: u32, component_offset: u32) -> vec2 { - let width = u32(textureDimensions(morph_targets).x); - let component_index = total_component_count * vertex_index + component_offset; - return vec2(component_index % width, component_index / width); -} -fn weight_at(weight_index: u32) -> f32 { - let i = weight_index; - return morph_weights.weights[i / 4u][i % 4u]; -} -fn morph_pixel(vertex: u32, component: u32, weight: u32) -> f32 { - let coord = component_texture_coord(vertex, component); - // Due to https://gpuweb.github.io/gpuweb/wgsl/#texel-formats - // While the texture stores a f32, the textureLoad returns a vec4<>, where - // only the first component is set. - return textureLoad(morph_targets, vec3(coord, weight), 0).r; -} -fn morph(vertex_index: u32, component_offset: u32, weight_index: u32) -> vec3 { - return vec3( - morph_pixel(vertex_index, component_offset, weight_index), - morph_pixel(vertex_index, component_offset + 1u, weight_index), - morph_pixel(vertex_index, component_offset + 2u, weight_index), - ); +@group(2) @binding(0) +var material: WireframeMaterial; +@fragment +fn fragment(in: VertexOutput) -> @location(0) vec4 { + return material.color; } -#endif // MORPH_TARGETS - ``` -### crates/bevy_pbr/src/render/clustered_forward +### bevy_shaders/dof ```rust -#define_import_path bevy_pbr::clustered_forward +// Performs depth of field postprocessing, with both Gaussian and bokeh kernels. +// +// Gaussian blur is performed as a separable convolution: first blurring in the +// X direction, and then in the Y direction. This is asymptotically more +// efficient than performing a 2D convolution. +// +// The Bokeh blur uses a similar, but more complex, separable convolution +// technique. The algorithm is described in Colin Barré-Brisebois, "Hexagonal +// Bokeh Blur Revisited" [1]. It's motivated by the observation that we can use +// separable convolutions not only to produce boxes but to produce +// parallelograms. Thus, by performing three separable convolutions in sequence, +// we can produce a hexagonal shape. The first and second convolutions are done +// simultaneously using multiple render targets to cut the total number of +// passes down to two. +// +// [1]: https://colinbarrebrisebois.com/2017/04/18/hexagonal-bokeh-blur-revisited-part-2-improved-2-pass-version/ -#import bevy_pbr::{ - mesh_view_bindings as bindings, - utils::{hsv2rgb, rand_f}, +#import bevy_core_pipeline::fullscreen_vertex_shader::FullscreenVertexOutput +#import bevy_pbr::mesh_view_bindings::view +#import bevy_pbr::view_transformations::depth_ndc_to_view_z +#import bevy_render::view::View + +// Parameters that control the depth of field effect. See +// `bevy_core_pipeline::dof::DepthOfFieldUniforms` for information on what these +// parameters mean. +struct DepthOfFieldParams { + /// The distance in meters to the location in focus. + focal_distance: f32, + + /// The [focal length]. Physically speaking, this represents "the distance + /// from the center of the lens to the principal foci of the lens". The + /// default value, 50 mm, is considered representative of human eyesight. + /// Real-world lenses range from anywhere from 5 mm for "fisheye" lenses to + /// 2000 mm for "super-telephoto" lenses designed for very distant objects. + /// + /// The higher the value, the more blurry objects not in focus will be. + /// + /// [focal length]: https://en.wikipedia.org/wiki/Focal_length + focal_length: f32, + + /// The premultiplied factor that we scale the circle of confusion by. + /// + /// This is calculated as `focal_length² / (sensor_height * aperture_f_stops)`. + coc_scale_factor: f32, + + /// The maximum diameter, in pixels, that we allow a circle of confusion to be. + /// + /// A circle of confusion essentially describes the size of a blur. + /// + /// This value is nonphysical but is useful for avoiding pathologically-slow + /// behavior. + max_circle_of_confusion_diameter: f32, + + /// The depth value that we clamp distant objects to. See the comment in + /// [`DepthOfFieldSettings`] for more information. + max_depth: f32, + + /// Padding. + pad_a: u32, + /// Padding. + pad_b: u32, + /// Padding. + pad_c: u32, +} + +// The first bokeh pass outputs to two render targets. We declare them here. +struct DualOutput { + // The vertical output. + @location(0) output_0: vec4, + // The diagonal output. + @location(1) output_1: vec4, +} + +// @group(0) @binding(0) is `mesh_view_bindings::view`. + +// The depth texture for the main view. +#ifdef MULTISAMPLED +@group(0) @binding(1) var depth_texture: texture_depth_multisampled_2d; +#else // MULTISAMPLED +@group(0) @binding(1) var depth_texture: texture_depth_2d; +#endif // MULTISAMPLED + +// The main color texture. +@group(0) @binding(2) var color_texture_a: texture_2d; + +// The auxiliary color texture that we're sampling from. This is only used as +// part of the second bokeh pass. +#ifdef DUAL_INPUT +@group(0) @binding(3) var color_texture_b: texture_2d; +#endif // DUAL_INPUT + +// The global uniforms, representing data backed by buffers shared among all +// views in the scene. + +// The parameters that control the depth of field effect. +@group(1) @binding(0) var dof_params: DepthOfFieldParams; + +// The sampler that's used to fetch texels from the source color buffer. +@group(1) @binding(1) var color_texture_sampler: sampler; + +// cos(-30°), used for the bokeh blur. +const COS_NEG_FRAC_PI_6: f32 = 0.8660254037844387; +// sin(-30°), used for the bokeh blur. +const SIN_NEG_FRAC_PI_6: f32 = -0.5; +// cos(-150°), used for the bokeh blur. +const COS_NEG_FRAC_PI_5_6: f32 = -0.8660254037844387; +// sin(-150°), used for the bokeh blur. +const SIN_NEG_FRAC_PI_5_6: f32 = -0.5; + +// Calculates and returns the diameter (not the radius) of the [circle of +// confusion]. +// +// [circle of confusion]: https://en.wikipedia.org/wiki/Circle_of_confusion +fn calculate_circle_of_confusion(in_frag_coord: vec4) -> f32 { + // Unpack the depth of field parameters. + let focus = dof_params.focal_distance; + let f = dof_params.focal_length; + let scale = dof_params.coc_scale_factor; + let max_coc_diameter = dof_params.max_circle_of_confusion_diameter; + + // Sample the depth. + let frag_coord = vec2(floor(in_frag_coord.xy)); + let raw_depth = textureLoad(depth_texture, frag_coord, 0); + let depth = min(-depth_ndc_to_view_z(raw_depth), dof_params.max_depth); + + // Calculate the circle of confusion. + // + // This is just the formula from Wikipedia [1]. + // + // [1]: https://en.wikipedia.org/wiki/Circle_of_confusion#Determining_a_circle_of_confusion_diameter_from_the_object_field + let candidate_coc = scale * abs(depth - focus) / (depth * (focus - f)); + + let framebuffer_size = vec2(textureDimensions(color_texture_a)); + return clamp(candidate_coc * framebuffer_size.y, 0.0, max_coc_diameter); } -// NOTE: Keep in sync with bevy_pbr/src/light.rs -fn view_z_to_z_slice(view_z: f32, is_orthographic: bool) -> u32 { - var z_slice: u32 = 0u; - if is_orthographic { - // NOTE: view_z is correct in the orthographic case - z_slice = u32(floor((view_z - bindings::lights.cluster_factors.z) * bindings::lights.cluster_factors.w)); - } else { - // NOTE: had to use -view_z to make it positive else log(negative) is nan - z_slice = u32(log(-view_z) * bindings::lights.cluster_factors.z - bindings::lights.cluster_factors.w + 1.0); +// Performs a single direction of the separable Gaussian blur kernel. +// +// * `frag_coord` is the screen-space pixel coordinate of the fragment (i.e. the +// `position` input to the fragment). +// +// * `coc` is the diameter (not the radius) of the circle of confusion for this +// fragment. +// +// * `frag_offset` is the vector, in screen-space units, from one sample to the +// next. For a horizontal blur this will be `vec2(1.0, 0.0)`; for a vertical +// blur this will be `vec2(0.0, 1.0)`. +// +// Returns the resulting color of the fragment. +fn gaussian_blur(frag_coord: vec4, coc: f32, frag_offset: vec2) -> vec4 { + // Usually σ (the standard deviation) is half the radius, and the radius is + // half the CoC. So we multiply by 0.25. + let sigma = coc * 0.25; + + // 1.5σ is a good, somewhat aggressive default for support—the number of + // texels on each side of the center that we process. + let support = i32(ceil(sigma * 1.5)); + let uv = frag_coord.xy / vec2(textureDimensions(color_texture_a)); + let offset = frag_offset / vec2(textureDimensions(color_texture_a)); + + // The probability density function of the Gaussian blur is (up to constant factors) `exp(-1 / 2σ² * + // x²). We precalculate the constant factor here to avoid having to + // calculate it in the inner loop. + let exp_factor = -1.0 / (2.0 * sigma * sigma); + + // Accumulate samples on both sides of the current texel. Go two at a time, + // taking advantage of bilinear filtering. + var sum = textureSampleLevel(color_texture_a, color_texture_sampler, uv, 0.0).rgb; + var weight_sum = 1.0; + for (var i = 1; i <= support; i += 2) { + // This is a well-known trick to reduce the number of needed texture + // samples by a factor of two. We seek to accumulate two adjacent + // samples c₀ and c₁ with weights w₀ and w₁ respectively, with a single + // texture sample at a carefully chosen location. Observe that: + // + // k ⋅ lerp(c₀, c₁, t) = w₀⋅c₀ + w₁⋅c₁ + // + // w₁ + // if k = w₀ + w₁ and t = ─────── + // w₀ + w₁ + // + // Therefore, if we sample at a distance of t = w₁ / (w₀ + w₁) texels in + // between the two texel centers and scale by k = w₀ + w₁ afterward, we + // effectively evaluate w₀⋅c₀ + w₁⋅c₁ with a single texture lookup. + let w0 = exp(exp_factor * f32(i) * f32(i)); + let w1 = exp(exp_factor * f32(i + 1) * f32(i + 1)); + let uv_offset = offset * (f32(i) + w1 / (w0 + w1)); + let weight = w0 + w1; + + sum += ( + textureSampleLevel(color_texture_a, color_texture_sampler, uv + uv_offset, 0.0).rgb + + textureSampleLevel(color_texture_a, color_texture_sampler, uv - uv_offset, 0.0).rgb + ) * weight; + weight_sum += weight * 2.0; } - // NOTE: We use min as we may limit the far z plane used for clustering to be closer than - // the furthest thing being drawn. This means that we need to limit to the maximum cluster. - return min(z_slice, bindings::lights.cluster_dimensions.z - 1u); -} -fn fragment_cluster_index(frag_coord: vec2, view_z: f32, is_orthographic: bool) -> u32 { - let xy = vec2(floor((frag_coord - bindings::view.viewport.xy) * bindings::lights.cluster_factors.xy)); - let z_slice = view_z_to_z_slice(view_z, is_orthographic); - // NOTE: Restricting cluster index to avoid undefined behavior when accessing uniform buffer - // arrays based on the cluster index. - return min( - (xy.y * bindings::lights.cluster_dimensions.x + xy.x) * bindings::lights.cluster_dimensions.z + z_slice, - bindings::lights.cluster_dimensions.w - 1u - ); + return vec4(sum / weight_sum, 1.0); } -// this must match CLUSTER_COUNT_SIZE in light.rs -const CLUSTER_COUNT_SIZE = 9u; -fn unpack_offset_and_counts(cluster_index: u32) -> vec3 { -#if AVAILABLE_STORAGE_BUFFER_BINDINGS >= 3 - return bindings::cluster_offsets_and_counts.data[cluster_index].xyz; -#else - let offset_and_counts = bindings::cluster_offsets_and_counts.data[cluster_index >> 2u][cluster_index & ((1u << 2u) - 1u)]; - // [ 31 .. 18 | 17 .. 9 | 8 .. 0 ] - // [ offset | point light count | spot light count ] - return vec3( - (offset_and_counts >> (CLUSTER_COUNT_SIZE * 2u)) & ((1u << (32u - (CLUSTER_COUNT_SIZE * 2u))) - 1u), - (offset_and_counts >> CLUSTER_COUNT_SIZE) & ((1u << CLUSTER_COUNT_SIZE) - 1u), - offset_and_counts & ((1u << CLUSTER_COUNT_SIZE) - 1u), - ); -#endif -} +// Performs a box blur in a single direction, sampling `color_texture_a`. +// +// * `frag_coord` is the screen-space pixel coordinate of the fragment (i.e. the +// `position` input to the fragment). +// +// * `coc` is the diameter (not the radius) of the circle of confusion for this +// fragment. +// +// * `frag_offset` is the vector, in screen-space units, from one sample to the +// next. This need not be horizontal or vertical. +fn box_blur_a(frag_coord: vec4, coc: f32, frag_offset: vec2) -> vec4 { + let support = i32(round(coc * 0.5)); + let uv = frag_coord.xy / vec2(textureDimensions(color_texture_a)); + let offset = frag_offset / vec2(textureDimensions(color_texture_a)); + + // Accumulate samples in a single direction. + var sum = vec3(0.0); + for (var i = 0; i <= support; i += 1) { + sum += textureSampleLevel( + color_texture_a, color_texture_sampler, uv + offset * f32(i), 0.0).rgb; + } -fn get_light_id(index: u32) -> u32 { -#if AVAILABLE_STORAGE_BUFFER_BINDINGS >= 3 - return bindings::cluster_light_index_lists.data[index]; -#else - // The index is correct but in cluster_light_index_lists we pack 4 u8s into a u32 - // This means the index into cluster_light_index_lists is index / 4 - let indices = bindings::cluster_light_index_lists.data[index >> 4u][(index >> 2u) & ((1u << 2u) - 1u)]; - // And index % 4 gives the sub-index of the u8 within the u32 so we shift by 8 * sub-index - return (indices >> (8u * (index & ((1u << 2u) - 1u)))) & ((1u << 8u) - 1u); -#endif + return vec4(sum / vec3(1.0 + f32(support)), 1.0); } -fn cluster_debug_visualization( - input_color: vec4, - view_z: f32, - is_orthographic: bool, - offset_and_counts: vec3, - cluster_index: u32, -) -> vec4 { - var output_color = input_color; - - // Cluster allocation debug (using 'over' alpha blending) -#ifdef CLUSTERED_FORWARD_DEBUG_Z_SLICES - // NOTE: This debug mode visualises the z-slices - let cluster_overlay_alpha = 0.1; - var z_slice: u32 = view_z_to_z_slice(view_z, is_orthographic); - // A hack to make the colors alternate a bit more - if (z_slice & 1u) == 1u { - z_slice = z_slice + bindings::lights.cluster_dimensions.z / 2u; +// Performs a box blur in a single direction, sampling `color_texture_b`. +// +// * `frag_coord` is the screen-space pixel coordinate of the fragment (i.e. the +// `position` input to the fragment). +// +// * `coc` is the diameter (not the radius) of the circle of confusion for this +// fragment. +// +// * `frag_offset` is the vector, in screen-space units, from one sample to the +// next. This need not be horizontal or vertical. +#ifdef DUAL_INPUT +fn box_blur_b(frag_coord: vec4, coc: f32, frag_offset: vec2) -> vec4 { + let support = i32(round(coc * 0.5)); + let uv = frag_coord.xy / vec2(textureDimensions(color_texture_b)); + let offset = frag_offset / vec2(textureDimensions(color_texture_b)); + + // Accumulate samples in a single direction. + var sum = vec3(0.0); + for (var i = 0; i <= support; i += 1) { + sum += textureSampleLevel( + color_texture_b, color_texture_sampler, uv + offset * f32(i), 0.0).rgb; } - let slice_color = hsv2rgb(f32(z_slice) / f32(bindings::lights.cluster_dimensions.z + 1u), 1.0, 0.5); - output_color = vec4( - (1.0 - cluster_overlay_alpha) * output_color.rgb + cluster_overlay_alpha * slice_color, - output_color.a - ); -#endif // CLUSTERED_FORWARD_DEBUG_Z_SLICES -#ifdef CLUSTERED_FORWARD_DEBUG_CLUSTER_LIGHT_COMPLEXITY - // NOTE: This debug mode visualises the number of lights within the cluster that contains - // the fragment. It shows a sort of lighting complexity measure. - let cluster_overlay_alpha = 0.1; - let max_light_complexity_per_cluster = 64.0; - output_color.r = (1.0 - cluster_overlay_alpha) * output_color.r + cluster_overlay_alpha * smoothStep(0.0, max_light_complexity_per_cluster, f32(offset_and_counts[1] + offset_and_counts[2])); - output_color.g = (1.0 - cluster_overlay_alpha) * output_color.g + cluster_overlay_alpha * (1.0 - smoothStep(0.0, max_light_complexity_per_cluster, f32(offset_and_counts[1] + offset_and_counts[2]))); -#endif // CLUSTERED_FORWARD_DEBUG_CLUSTER_LIGHT_COMPLEXITY -#ifdef CLUSTERED_FORWARD_DEBUG_CLUSTER_COHERENCY - // NOTE: Visualizes the cluster to which the fragment belongs - let cluster_overlay_alpha = 0.1; - var rng = cluster_index; - let cluster_color = hsv2rgb(rand_f(&rng), 1.0, 0.5); - output_color = vec4( - (1.0 - cluster_overlay_alpha) * output_color.rgb + cluster_overlay_alpha * cluster_color, - output_color.a - ); -#endif // CLUSTERED_FORWARD_DEBUG_CLUSTER_COHERENCY - return output_color; + return vec4(sum / vec3(1.0 + f32(support)), 1.0); } +#endif -``` +// Calculates the horizontal component of the separable Gaussian blur. +@fragment +fn gaussian_horizontal(in: FullscreenVertexOutput) -> @location(0) vec4 { + let coc = calculate_circle_of_confusion(in.position); + return gaussian_blur(in.position, coc, vec2(1.0, 0.0)); +} -### crates/bevy_pbr/src/render/mesh_functions +// Calculates the vertical component of the separable Gaussian blur. +@fragment +fn gaussian_vertical(in: FullscreenVertexOutput) -> @location(0) vec4 { + let coc = calculate_circle_of_confusion(in.position); + return gaussian_blur(in.position, coc, vec2(0.0, 1.0)); +} -```rust -#define_import_path bevy_pbr::mesh_functions +// Calculates the vertical and first diagonal components of the separable +// hexagonal bokeh blur. +// +// ╱ +// ╱ +// • +// │ +// │ +@fragment +fn bokeh_pass_0(in: FullscreenVertexOutput) -> DualOutput { + let coc = calculate_circle_of_confusion(in.position); + let vertical = box_blur_a(in.position, coc, vec2(0.0, 1.0)); + let diagonal = box_blur_a(in.position, coc, vec2(COS_NEG_FRAC_PI_6, SIN_NEG_FRAC_PI_6)); + + // Note that the diagonal part is pre-mixed with the vertical component. + var output: DualOutput; + output.output_0 = vertical; + output.output_1 = mix(vertical, diagonal, 0.5); + return output; +} -#import bevy_pbr::{ - mesh_view_bindings::view, - mesh_bindings::mesh, - mesh_types::MESH_FLAGS_SIGN_DETERMINANT_MODEL_3X3_BIT, - view_transformations::position_world_to_clip, +// Calculates the second diagonal components of the separable hexagonal bokeh +// blur. +// +// ╲ ╱ +// ╲ ╱ +// • +#ifdef DUAL_INPUT +@fragment +fn bokeh_pass_1(in: FullscreenVertexOutput) -> @location(0) vec4 { + let coc = calculate_circle_of_confusion(in.position); + let output_0 = box_blur_a(in.position, coc, vec2(COS_NEG_FRAC_PI_6, SIN_NEG_FRAC_PI_6)); + let output_1 = box_blur_b(in.position, coc, vec2(COS_NEG_FRAC_PI_5_6, SIN_NEG_FRAC_PI_5_6)); + return mix(output_0, output_1, 0.5); } -#import bevy_render::maths::{affine3_to_square, mat2x4_f32_to_mat3x3_unpack} +#endif +``` -fn get_model_matrix(instance_index: u32) -> mat4x4 { - return affine3_to_square(mesh[instance_index].model); -} +### bevy_shaders/ui_vertex_output -fn get_previous_model_matrix(instance_index: u32) -> mat4x4 { - return affine3_to_square(mesh[instance_index].previous_model); -} +```rust +#define_import_path bevy_ui::ui_vertex_output -fn mesh_position_local_to_world(model: mat4x4, vertex_position: vec4) -> vec4 { - return model * vertex_position; -} +// The Vertex output of the default vertex shader for the Ui Material pipeline. +struct UiVertexOutput { + @location(0) uv: vec2, + // The size of the borders in UV space. Order is Left, Right, Top, Bottom. + @location(1) border_widths: vec4, + // The size of the node in pixels. Order is width, height. + @location(2) @interpolate(flat) size: vec2, + @builtin(position) position: vec4, +}; -// NOTE: The intermediate world_position assignment is important -// for precision purposes when using the 'equals' depth comparison -// function. -fn mesh_position_local_to_clip(model: mat4x4, vertex_position: vec4) -> vec4 { - let world_position = mesh_position_local_to_world(model, vertex_position); - return position_world_to_clip(world_position.xyz); -} +``` -fn mesh_normal_local_to_world(vertex_normal: vec3, instance_index: u32) -> vec3 { - // NOTE: The mikktspace method of normal mapping requires that the world normal is - // re-normalized in the vertex shader to match the way mikktspace bakes vertex tangents - // and normal maps so that the exact inverse process is applied when shading. Blender, Unity, - // Unreal Engine, Godot, and more all use the mikktspace method. - // We only skip normalization for invalid normals so that they don't become NaN. - // Do not change this code unless you really know what you are doing. - // http://www.mikktspace.com/ - if any(vertex_normal != vec3(0.0)) { - return normalize( - mat2x4_f32_to_mat3x3_unpack( - mesh[instance_index].inverse_transpose_model_a, - mesh[instance_index].inverse_transpose_model_b, - ) * vertex_normal - ); - } else { - return vertex_normal; - } -} +### bevy_shaders/dummy_visibility_buffer_resolve -// Calculates the sign of the determinant of the 3x3 model matrix based on a -// mesh flag -fn sign_determinant_model_3x3m(instance_index: u32) -> f32 { - // bool(u32) is false if 0u else true - // f32(bool) is 1.0 if true else 0.0 - // * 2.0 - 1.0 remaps 0.0 or 1.0 to -1.0 or 1.0 respectively - return f32(bool(mesh[instance_index].flags & MESH_FLAGS_SIGN_DETERMINANT_MODEL_3X3_BIT)) * 2.0 - 1.0; -} +```rust +#define_import_path bevy_pbr::meshlet_visibility_buffer_resolve -fn mesh_tangent_local_to_world(model: mat4x4, vertex_tangent: vec4, instance_index: u32) -> vec4 { - // NOTE: The mikktspace method of normal mapping requires that the world tangent is - // re-normalized in the vertex shader to match the way mikktspace bakes vertex tangents - // and normal maps so that the exact inverse process is applied when shading. Blender, Unity, - // Unreal Engine, Godot, and more all use the mikktspace method. - // We only skip normalization for invalid tangents so that they don't become NaN. - // Do not change this code unless you really know what you are doing. - // http://www.mikktspace.com/ - if any(vertex_tangent != vec4(0.0)) { - return vec4( - normalize( - mat3x3( - model[0].xyz, - model[1].xyz, - model[2].xyz - ) * vertex_tangent.xyz - ), - // NOTE: Multiplying by the sign of the determinant of the 3x3 model matrix accounts for - // situations such as negative scaling. - vertex_tangent.w * sign_determinant_model_3x3m(instance_index) - ); - } else { - return vertex_tangent; - } -} +/// Dummy shader to prevent naga_oil from complaining about missing imports when the MeshletPlugin is not loaded, +/// as naga_oil tries to resolve imports even if they're behind an #ifdef. ``` -### crates/bevy_pbr/src/render/pbr_transmission +### bevy_shaders/irradiance_volume_voxel_visualization ```rust -#define_import_path bevy_pbr::transmission - -#import bevy_pbr::{ - lighting, - prepass_utils, - utils::{PI, interleaved_gradient_noise}, - utils, - mesh_view_bindings as view_bindings, -}; +#import bevy_pbr::forward_io::VertexOutput +#import bevy_pbr::irradiance_volume +#import bevy_pbr::mesh_view_bindings -#import bevy_core_pipeline::tonemapping::{ - approximate_inverse_tone_mapping -}; +struct VoxelVisualizationIrradianceVolumeInfo { + world_from_voxel: mat4x4, + voxel_from_world: mat4x4, + resolution: vec3, + // A scale factor that's applied to the diffuse and specular light from the + // light probe. This is in units of cd/m² (candela per square meter). + intensity: f32, +} -fn specular_transmissive_light(world_position: vec4, frag_coord: vec3, view_z: f32, N: vec3, V: vec3, F0: vec3, ior: f32, thickness: f32, perceptual_roughness: f32, specular_transmissive_color: vec3, transmitted_environment_light_specular: vec3) -> vec3 { - // Calculate the ratio between refaction indexes. Assume air/vacuum for the space outside the mesh - let eta = 1.0 / ior; +@group(2) @binding(100) +var irradiance_volume_info: VoxelVisualizationIrradianceVolumeInfo; - // Calculate incidence vector (opposite to view vector) and its dot product with the mesh normal - let I = -V; - let NdotI = dot(N, I); +@fragment +fn fragment(mesh: VertexOutput) -> @location(0) vec4 { + // Snap the world position we provide to `irradiance_volume_light()` to the + // middle of the nearest texel. + var unit_pos = (irradiance_volume_info.voxel_from_world * + vec4(mesh.world_position.xyz, 1.0f)).xyz; + let resolution = vec3(irradiance_volume_info.resolution); + let stp = clamp((unit_pos + 0.5) * resolution, vec3(0.5f), resolution - vec3(0.5f)); + let stp_rounded = round(stp - 0.5f) + 0.5f; + let rounded_world_pos = (irradiance_volume_info.world_from_voxel * vec4(stp_rounded, 1.0f)).xyz; - // Calculate refracted direction using Snell's law - let k = 1.0 - eta * eta * (1.0 - NdotI * NdotI); - let T = eta * I - (eta * NdotI + sqrt(k)) * N; + // `irradiance_volume_light()` multiplies by intensity, so cancel it out. + // If we take intensity into account, the cubes will be way too bright. + let rgb = irradiance_volume::irradiance_volume_light( + mesh.world_position.xyz, + mesh.world_normal) / irradiance_volume_info.intensity; - // Calculate the exit position of the refracted ray, by propagating refacted direction through thickness - let exit_position = world_position.xyz + T * thickness; + return vec4(rgb, 1.0f); +} - // Transform exit_position into clip space - let clip_exit_position = view_bindings::view.view_proj * vec4(exit_position, 1.0); +``` - // Scale / offset position so that coordinate is in right space for sampling transmissive background texture - let offset_position = (clip_exit_position.xy / clip_exit_position.w) * vec2(0.5, -0.5) + 0.5; +### bevy_shaders/volumetric_fog - // Fetch background color - var background_color: vec4; - if perceptual_roughness == 0.0 { - // If the material has zero roughness, we can use a faster approach without the blur - background_color = fetch_transmissive_background_non_rough(offset_position, frag_coord); - } else { - background_color = fetch_transmissive_background(offset_position, frag_coord, view_z, perceptual_roughness); - } +```rust +// A postprocessing shader that implements volumetric fog via raymarching and +// sampling directional light shadow maps. +// +// The overall approach is a combination of the volumetric rendering in [1] and +// the shadow map raymarching in [2]. First, we sample the depth buffer to +// determine how long our ray is. Then we do a raymarch, with physically-based +// calculations at each step to determine how much light was absorbed, scattered +// out, and scattered in. To determine in-scattering, we sample the shadow map +// for the light to determine whether the point was in shadow or not. +// +// [1]: https://www.scratchapixel.com/lessons/3d-basic-rendering/volume-rendering-for-developers/intro-volume-rendering.html +// +// [2]: http://www.alexandre-pestana.com/volumetric-lights/ - // Compensate for exposure, since the background color is coming from an already exposure-adjusted texture - background_color = vec4(background_color.rgb / view_bindings::view.exposure, background_color.a); +#import bevy_core_pipeline::fullscreen_vertex_shader::FullscreenVertexOutput +#import bevy_pbr::mesh_view_bindings::{lights, view} +#import bevy_pbr::mesh_view_types::DIRECTIONAL_LIGHT_FLAGS_VOLUMETRIC_BIT +#import bevy_pbr::shadow_sampling::sample_shadow_map_hardware +#import bevy_pbr::shadows::{get_cascade_index, world_to_directional_light_local} +#import bevy_pbr::view_transformations::{ + frag_coord_to_ndc, + position_ndc_to_view, + position_ndc_to_world +} + +// The GPU version of [`VolumetricFogSettings`]. See the comments in +// `volumetric_fog/mod.rs` for descriptions of the fields here. +struct VolumetricFog { + fog_color: vec3, + light_tint: vec3, + ambient_color: vec3, + ambient_intensity: f32, + step_count: u32, + max_depth: f32, + absorption: f32, + scattering: f32, + density: f32, + scattering_asymmetry: f32, + light_intensity: f32, +} + +@group(1) @binding(0) var volumetric_fog: VolumetricFog; +@group(1) @binding(1) var color_texture: texture_2d; +@group(1) @binding(2) var color_sampler: sampler; - // Dot product of the refracted direction with the exit normal (Note: We assume the exit normal is the entry normal but inverted) - let MinusNdotT = dot(-N, T); +#ifdef MULTISAMPLED +@group(1) @binding(3) var depth_texture: texture_depth_multisampled_2d; +#else +@group(1) @binding(3) var depth_texture: texture_depth_2d; +#endif - // Calculate 1.0 - fresnel factor (how much light is _NOT_ reflected, i.e. how much is transmitted) - let F = vec3(1.0) - lighting::fresnel(F0, MinusNdotT); +// 1 / (4π) +const FRAC_4_PI: f32 = 0.07957747154594767; - // Calculate final color by applying fresnel multiplied specular transmissive color to a mix of background color and transmitted specular environment light - return F * specular_transmissive_color * mix(transmitted_environment_light_specular, background_color.rgb, background_color.a); +// The common Henyey-Greenstein asymmetric phase function [1] [2]. +// +// This determines how much light goes toward the viewer as opposed to away from +// the viewer. From a visual point of view, it controls how the light shafts +// appear and disappear as the camera looks at the light source. +// +// [1]: https://www.scratchapixel.com/lessons/3d-basic-rendering/volume-rendering-for-developers/ray-marching-get-it-right.html +// +// [2]: https://www.pbr-book.org/4ed/Volume_Scattering/Phase_Functions#TheHenyeyndashGreensteinPhaseFunction +fn henyey_greenstein(neg_LdotV: f32) -> f32 { + let g = volumetric_fog.scattering_asymmetry; + let denom = 1.0 + g * g - 2.0 * g * neg_LdotV; + return FRAC_4_PI * (1.0 - g * g) / (denom * sqrt(denom)); } -fn fetch_transmissive_background_non_rough(offset_position: vec2, frag_coord: vec3) -> vec4 { - var background_color = textureSampleLevel( - view_bindings::view_transmission_texture, - view_bindings::view_transmission_sampler, - offset_position, - 0.0 +@fragment +fn fragment(in: FullscreenVertexOutput) -> @location(0) vec4 { + // Unpack the `volumetric_fog` settings. + let fog_color = volumetric_fog.fog_color; + let ambient_color = volumetric_fog.ambient_color; + let ambient_intensity = volumetric_fog.ambient_intensity; + let step_count = volumetric_fog.step_count; + let max_depth = volumetric_fog.max_depth; + let absorption = volumetric_fog.absorption; + let scattering = volumetric_fog.scattering; + let density = volumetric_fog.density; + let light_tint = volumetric_fog.light_tint; + let light_intensity = volumetric_fog.light_intensity; + + let exposure = view.exposure; + + // Sample the depth. If this is multisample, just use sample 0; this is + // approximate but good enough. + let frag_coord = in.position; + let depth = textureLoad(depth_texture, vec2(frag_coord.xy), 0); + + // Starting at the end depth, which we got above, figure out how long the + // ray we want to trace is and the length of each increment. + let end_depth = min( + max_depth, + -position_ndc_to_view(frag_coord_to_ndc(vec4(in.position.xy, depth, 1.0))).z ); + let step_size = end_depth / f32(step_count); -#ifdef DEPTH_PREPASS -#ifndef WEBGL2 - // Use depth prepass data to reject values that are in front of the current fragment - if prepass_utils::prepass_depth(vec4(offset_position * view_bindings::view.viewport.zw, 0.0, 0.0), 0u) > frag_coord.z { - background_color.a = 0.0; - } -#endif -#endif + let directional_light_count = lights.n_directional_lights; -#ifdef TONEMAP_IN_SHADER - background_color = approximate_inverse_tone_mapping(background_color, view_bindings::view.color_grading); -#endif + // Calculate the ray origin (`Ro`) and the ray direction (`Rd`) in NDC, + // view, and world coordinates. + let Rd_ndc = vec3(frag_coord_to_ndc(in.position).xy, 1.0); + let Rd_view = normalize(position_ndc_to_view(Rd_ndc)); + let Ro_world = view.world_position; + let Rd_world = normalize(position_ndc_to_world(Rd_ndc) - Ro_world); - return background_color; -} + // Use Beer's law [1] [2] to calculate the maximum amount of light that each + // directional light could contribute, and modulate that value by the light + // tint and fog color. (The actual value will in turn be modulated by the + // phase according to the Henyey-Greenstein formula.) + // + // We use a bit of a hack here. Conceptually, directional lights are + // infinitely far away. But, if we modeled exactly that, then directional + // lights would never contribute any light to the fog, because an + // infinitely-far directional light combined with an infinite amount of fog + // would result in complete absorption of the light. So instead we pretend + // that the directional light is `max_depth` units away and do the + // calculation in those terms. Because the fake distance to the directional + // light is a constant, this lets us perform the calculation once up here + // instead of marching secondary rays toward the light during the + // raymarching step, which improves performance dramatically. + // + // [1]: https://www.scratchapixel.com/lessons/3d-basic-rendering/volume-rendering-for-developers/intro-volume-rendering.html + // + // [2]: https://en.wikipedia.org/wiki/Beer%E2%80%93Lambert_law + let light_attenuation = exp(-density * max_depth * (absorption + scattering)); + let light_factors_per_step = fog_color * light_tint * light_attenuation * scattering * + density * step_size * light_intensity * exposure; + + // Use Beer's law again to accumulate the ambient light all along the path. + var accumulated_color = exp(-end_depth * (absorption + scattering)) * ambient_color * + ambient_intensity; + + // Pre-calculate absorption (amount of light absorbed by the fog) and + // out-scattering (amount of light the fog scattered away). This is the same + // amount for every step. + let sample_attenuation = exp(-step_size * density * (absorption + scattering)); + + // This is the amount of the background that shows through. We're actually + // going to recompute this over and over again for each directional light, + // coming up with the same values each time. + var background_alpha = 1.0; + + for (var light_index = 0u; light_index < directional_light_count; light_index += 1u) { + // Volumetric lights are all sorted first, so the first time we come to + // a non-volumetric light, we know we've seen them all. + let light = &lights.directional_lights[light_index]; + if (((*light).flags & DIRECTIONAL_LIGHT_FLAGS_VOLUMETRIC_BIT) == 0) { + break; + } -fn fetch_transmissive_background(offset_position: vec2, frag_coord: vec3, view_z: f32, perceptual_roughness: f32) -> vec4 { - // Calculate view aspect ratio, used to scale offset so that it's proportionate - let aspect = view_bindings::view.viewport.z / view_bindings::view.viewport.w; + // Offset the depth value by the bias. + let depth_offset = (*light).shadow_depth_bias * (*light).direction_to_light.xyz; - // Calculate how “blurry” the transmission should be. - // Blur is more or less eyeballed to look approximately “right”, since the “correct” - // approach would involve projecting many scattered rays and figuring out their individual - // exit positions. IRL, light rays can be scattered when entering/exiting a material (due to - // roughness) or inside the material (due to subsurface scattering). Here, we only consider - // the first scenario. - // - // Blur intensity is: - // - proportional to the square of `perceptual_roughness` - // - proportional to the inverse of view z - let blur_intensity = (perceptual_roughness * perceptual_roughness) / view_z; + // Compute phase, which determines the fraction of light that's + // scattered toward the camera instead of away from it. + let neg_LdotV = dot(normalize((*light).direction_to_light.xyz), Rd_world); + let phase = henyey_greenstein(neg_LdotV); -#ifdef SCREEN_SPACE_SPECULAR_TRANSMISSION_BLUR_TAPS - let num_taps = #{SCREEN_SPACE_SPECULAR_TRANSMISSION_BLUR_TAPS}; // Controlled by the `Camera3d::screen_space_specular_transmission_quality` property -#else - let num_taps = 8; // Fallback to 8 taps, if not specified -#endif - let num_spirals = i32(ceil(f32(num_taps) / 8.0)); -#ifdef TEMPORAL_JITTER - let random_angle = interleaved_gradient_noise(frag_coord.xy, view_bindings::globals.frame_count); -#else - let random_angle = interleaved_gradient_noise(frag_coord.xy, 0u); -#endif - // Pixel checkerboard pattern (helps make the interleaved gradient noise pattern less visible) - let pixel_checkboard = ( -#ifdef TEMPORAL_JITTER - // 0 or 1 on even/odd pixels, alternates every frame - (i32(frag_coord.x) + i32(frag_coord.y) + i32(view_bindings::globals.frame_count)) % 2 -#else - // 0 or 1 on even/odd pixels - (i32(frag_coord.x) + i32(frag_coord.y)) % 2 -#endif - ); + // Modulate the factor we calculated above by the phase, fog color, + // light color, light tint. + let light_color_per_step = (*light).color.rgb * phase * light_factors_per_step; - var result = vec4(0.0); - for (var i: i32 = 0; i < num_taps; i = i + 1) { - let current_spiral = (i >> 3u); - let angle = (random_angle + f32(current_spiral) / f32(num_spirals)) * 2.0 * PI; - let m = vec2(sin(angle), cos(angle)); - let rotation_matrix = mat2x2( - m.y, -m.x, - m.x, m.y - ); + // Reset `background_alpha` for a new raymarch. + background_alpha = 1.0; - // Get spiral offset - var spiral_offset: vec2; - switch i & 7 { - // https://www.iryoku.com/next-generation-post-processing-in-call-of-duty-advanced-warfare (slides 120-135) - // TODO: Figure out a more reasonable way of doing this, as WGSL - // seems to only allow constant indexes into constant arrays at the moment. - // The downstream shader compiler should be able to optimize this into a single - // constant when unrolling the for loop, but it's still not ideal. - case 0: { spiral_offset = utils::SPIRAL_OFFSET_0_; } // Note: We go even first and then odd, so that the lowest - case 1: { spiral_offset = utils::SPIRAL_OFFSET_2_; } // quality possible (which does 4 taps) still does a full spiral - case 2: { spiral_offset = utils::SPIRAL_OFFSET_4_; } // instead of just the first half of it - case 3: { spiral_offset = utils::SPIRAL_OFFSET_6_; } - case 4: { spiral_offset = utils::SPIRAL_OFFSET_1_; } - case 5: { spiral_offset = utils::SPIRAL_OFFSET_3_; } - case 6: { spiral_offset = utils::SPIRAL_OFFSET_5_; } - case 7: { spiral_offset = utils::SPIRAL_OFFSET_7_; } - default: {} - } + // Start raymarching. + for (var step = 0u; step < step_count; step += 1u) { + // As an optimization, break if we've gotten too dark. + if (background_alpha < 0.001) { + break; + } - // Make each consecutive spiral slightly smaller than the previous one - spiral_offset *= 1.0 - (0.5 * f32(current_spiral + 1) / f32(num_spirals)); + // Calculate where we are in the ray. + let P_world = Ro_world + Rd_world * f32(step) * step_size; + let P_view = Rd_view * f32(step) * step_size; + + // Process absorption and out-scattering. + background_alpha *= sample_attenuation; + + // Compute in-scattering (amount of light other fog particles + // scattered into this ray). This is where any directional light is + // scattered in. + + // Prepare to sample the shadow map. + let cascade_index = get_cascade_index(light_index, P_view.z); + let light_local = world_to_directional_light_local( + light_index, + cascade_index, + vec4(P_world + depth_offset, 1.0) + ); + + // If we're outside the shadow map entirely, local light attenuation + // is zero. + var local_light_attenuation = f32(light_local.w != 0.0); + + // Otherwise, sample the shadow map to determine whether, and by how + // much, this sample is in the light. + if (local_light_attenuation != 0.0) { + let cascade = &(*light).cascades[cascade_index]; + let array_index = i32((*light).depth_texture_base_index + cascade_index); + local_light_attenuation = + sample_shadow_map_hardware(light_local.xy, light_local.z, array_index); + } - // Rotate and correct for aspect ratio - let rotated_spiral_offset = (rotation_matrix * spiral_offset) * vec2(1.0, aspect); + if (local_light_attenuation != 0.0) { + // Accumulate the light. + accumulated_color += light_color_per_step * local_light_attenuation * + background_alpha; + } + } + } - // Calculate final offset position, with blur and spiral offset - let modified_offset_position = offset_position + rotated_spiral_offset * blur_intensity * (1.0 - f32(pixel_checkboard) * 0.1); + // We're done! Blend between the source color and the lit fog color. + let source = textureSample(color_texture, color_sampler, in.uv); + return vec4(source.rgb * background_alpha + accumulated_color, source.a); +} - // Sample the view transmission texture at the offset position + noise offset, to get the background color - var sample = textureSampleLevel( - view_bindings::view_transmission_texture, - view_bindings::view_transmission_sampler, - modified_offset_position, - 0.0 - ); +``` -#ifdef DEPTH_PREPASS -#ifndef WEBGL2 - // Use depth prepass data to reject values that are in front of the current fragment - if prepass_utils::prepass_depth(vec4(modified_offset_position * view_bindings::view.viewport.zw, 0.0, 0.0), 0u) > frag_coord.z { - sample = vec4(0.0); - } -#endif -#endif +### bevy_shaders/custom_phase_item - // As blur intensity grows higher, gradually limit *very bright* color RGB values towards a - // maximum length of 1.0 to prevent stray “firefly” pixel artifacts. This can potentially make - // very strong emissive meshes appear much dimmer, but the artifacts are noticeable enough to - // warrant this treatment. - let normalized_rgb = normalize(sample.rgb); - result += vec4(min(sample.rgb, normalized_rgb / saturate(blur_intensity / 2.0)), sample.a); - } +```rust +// `custom_phase_item.wgsl` +// +// This shader goes with the `custom_phase_item` example. It demonstrates how to +// enqueue custom rendering logic in a `RenderPhase`. - result /= f32(num_taps); +// The GPU-side vertex structure. +struct Vertex { + // The world-space position of the vertex. + @location(0) position: vec3, + // The color of the vertex. + @location(1) color: vec3, +}; -#ifdef TONEMAP_IN_SHADER - result = approximate_inverse_tone_mapping(result, view_bindings::view.color_grading); -#endif +// Information passed from the vertex shader to the fragment shader. +struct VertexOutput { + // The clip-space position of the vertex. + @builtin(position) clip_position: vec4, + // The color of the vertex. + @location(0) color: vec3, +}; - return result; +// The vertex shader entry point. +@vertex +fn vertex(vertex: Vertex) -> VertexOutput { + // Use an orthographic projection. + var vertex_output: VertexOutput; + vertex_output.clip_position = vec4(vertex.position.xyz, 1.0); + vertex_output.color = vertex.color; + return vertex_output; +} + +// The fragment shader entry point. +@fragment +fn fragment(vertex_output: VertexOutput) -> @location(0) vec4 { + return vec4(vertex_output.color, 1.0); } ``` -### crates/bevy_pbr/src/render/mesh_view_bindings +### bevy_shaders/write_index_buffer ```rust -#define_import_path bevy_pbr::mesh_view_bindings - -#import bevy_pbr::mesh_view_types as types -#import bevy_render::{ - view::View, - globals::Globals, +#import bevy_pbr::meshlet_bindings::{ + meshlet_thread_meshlet_ids, + meshlets, + draw_indirect_args, + draw_index_buffer, + get_meshlet_occlusion, + get_meshlet_previous_occlusion, } -@group(0) @binding(0) var view: View; -@group(0) @binding(1) var lights: types::Lights; -#ifdef NO_CUBE_ARRAY_TEXTURES_SUPPORT -@group(0) @binding(2) var point_shadow_textures: texture_depth_cube; -#else -@group(0) @binding(2) var point_shadow_textures: texture_depth_cube_array; -#endif -@group(0) @binding(3) var point_shadow_textures_sampler: sampler_comparison; -#ifdef NO_ARRAY_TEXTURES_SUPPORT -@group(0) @binding(4) var directional_shadow_textures: texture_depth_2d; -#else -@group(0) @binding(4) var directional_shadow_textures: texture_depth_2d_array; -#endif -@group(0) @binding(5) var directional_shadow_textures_sampler: sampler_comparison; +var draw_index_buffer_start_workgroup: u32; + +/// This pass writes out a buffer of cluster + triangle IDs for the draw_indirect() call to rasterize each visible meshlet. + +@compute +@workgroup_size(64, 1, 1) // 64 threads per workgroup, 1 workgroup per cluster, 1 thread per triangle +fn write_index_buffer(@builtin(workgroup_id) workgroup_id: vec3, @builtin(num_workgroups) num_workgroups: vec3, @builtin(local_invocation_index) triangle_id: u32) { + // Calculate the cluster ID for this workgroup + let cluster_id = dot(workgroup_id, vec3(num_workgroups.x * num_workgroups.x, num_workgroups.x, 1u)); + if cluster_id >= arrayLength(&meshlet_thread_meshlet_ids) { return; } + + // If the meshlet was culled, then we don't need to draw it + if !get_meshlet_occlusion(cluster_id) { return; } -#if AVAILABLE_STORAGE_BUFFER_BINDINGS >= 3 -@group(0) @binding(6) var point_lights: types::PointLights; -@group(0) @binding(7) var cluster_light_index_lists: types::ClusterLightIndexLists; -@group(0) @binding(8) var cluster_offsets_and_counts: types::ClusterOffsetsAndCounts; -#else -@group(0) @binding(6) var point_lights: types::PointLights; -@group(0) @binding(7) var cluster_light_index_lists: types::ClusterLightIndexLists; -@group(0) @binding(8) var cluster_offsets_and_counts: types::ClusterOffsetsAndCounts; + // If the meshlet was drawn in the first pass, and this is the second pass, then we don't need to draw it +#ifdef MESHLET_SECOND_WRITE_INDEX_BUFFER_PASS + if get_meshlet_previous_occlusion(cluster_id) { return; } #endif -@group(0) @binding(9) var globals: Globals; -@group(0) @binding(10) var fog: types::Fog; -@group(0) @binding(11) var light_probes: types::LightProbes; + let meshlet_id = meshlet_thread_meshlet_ids[cluster_id]; + let meshlet = meshlets[meshlet_id]; -@group(0) @binding(12) var screen_space_ambient_occlusion_texture: texture_2d; + // Reserve space in the buffer for this meshlet's triangles, and broadcast the start of that slice to all threads + if triangle_id == 0u { + draw_index_buffer_start_workgroup = atomicAdd(&draw_indirect_args.vertex_count, meshlet.triangle_count * 3u); + draw_index_buffer_start_workgroup /= 3u; + } + workgroupBarrier(); -#ifdef MULTIPLE_LIGHT_PROBES_IN_ARRAY -@group(0) @binding(13) var diffuse_environment_maps: binding_array, 8u>; -@group(0) @binding(14) var specular_environment_maps: binding_array, 8u>; -#else -@group(0) @binding(13) var diffuse_environment_map: texture_cube; -@group(0) @binding(14) var specular_environment_map: texture_cube; -#endif -@group(0) @binding(15) var environment_map_sampler: sampler; + // Each thread writes one triangle of the meshlet to the buffer slice reserved for the meshlet + if triangle_id < meshlet.triangle_count { + draw_index_buffer[draw_index_buffer_start_workgroup + triangle_id] = (cluster_id << 8u) | triangle_id; + } +} -#ifdef IRRADIANCE_VOLUMES_ARE_USABLE -#ifdef MULTIPLE_LIGHT_PROBES_IN_ARRAY -@group(0) @binding(16) var irradiance_volumes: binding_array, 8u>; -#else -@group(0) @binding(16) var irradiance_volume: texture_3d; -#endif -@group(0) @binding(17) var irradiance_volume_sampler: sampler; -#endif +``` -// NB: If you change these, make sure to update `tonemapping_shared.wgsl` too. -@group(0) @binding(18) var dt_lut_texture: texture_3d; -@group(0) @binding(19) var dt_lut_sampler: sampler; +### bevy_shaders/circle_shader -#ifdef MULTISAMPLED -#ifdef DEPTH_PREPASS -@group(0) @binding(20) var depth_prepass_texture: texture_depth_multisampled_2d; -#endif // DEPTH_PREPASS -#ifdef NORMAL_PREPASS -@group(0) @binding(21) var normal_prepass_texture: texture_multisampled_2d; -#endif // NORMAL_PREPASS -#ifdef MOTION_VECTOR_PREPASS -@group(0) @binding(22) var motion_vector_prepass_texture: texture_multisampled_2d; -#endif // MOTION_VECTOR_PREPASS +```rust +// This shader draws a circle with a given input color +#import bevy_ui::ui_vertex_output::UiVertexOutput -#else // MULTISAMPLED +struct CustomUiMaterial { + @location(0) color: vec4 +} -#ifdef DEPTH_PREPASS -@group(0) @binding(20) var depth_prepass_texture: texture_depth_2d; -#endif // DEPTH_PREPASS -#ifdef NORMAL_PREPASS -@group(0) @binding(21) var normal_prepass_texture: texture_2d; -#endif // NORMAL_PREPASS -#ifdef MOTION_VECTOR_PREPASS -@group(0) @binding(22) var motion_vector_prepass_texture: texture_2d; -#endif // MOTION_VECTOR_PREPASS +@group(1) @binding(0) +var input: CustomUiMaterial; -#endif // MULTISAMPLED +@fragment +fn fragment(in: UiVertexOutput) -> @location(0) vec4 { + // the UVs are now adjusted around the middle of the rect. + let uv = in.uv * 2.0 - 1.0; -#ifdef DEFERRED_PREPASS -@group(0) @binding(23) var deferred_prepass_texture: texture_2d; -#endif // DEFERRED_PREPASS + // circle alpha, the higher the power the harsher the falloff. + let alpha = 1.0 - pow(sqrt(dot(uv, uv)), 100.0); -@group(0) @binding(24) var view_transmission_texture: texture_2d; -@group(0) @binding(25) var view_transmission_sampler: sampler; + return vec4(input.color.rgb, alpha); +} ``` -### crates/bevy_pbr/src/render/view_transformations +### bevy_shaders/color_material ```rust -#define_import_path bevy_pbr::view_transformations - -#import bevy_pbr::mesh_view_bindings as view_bindings - -/// World space: -/// +y is up +#import bevy_sprite::{ + mesh2d_vertex_output::VertexOutput, + mesh2d_view_bindings::view, +} -/// View space: -/// -z is forward, +x is right, +y is up -/// Forward is from the camera position into the scene. -/// (0.0, 0.0, -1.0) is linear distance of 1.0 in front of the camera's view relative to the camera's rotation -/// (0.0, 1.0, 0.0) is linear distance of 1.0 above the camera's view relative to the camera's rotation +#ifdef TONEMAP_IN_SHADER +#import bevy_core_pipeline::tonemapping +#endif -/// NDC (normalized device coordinate): -/// https://www.w3.org/TR/webgpu/#coordinate-systems -/// (-1.0, -1.0) in NDC is located at the bottom-left corner of NDC -/// (1.0, 1.0) in NDC is located at the top-right corner of NDC -/// Z is depth where: -/// 1.0 is near clipping plane -/// Perspective projection: 0.0 is inf far away -/// Orthographic projection: 0.0 is far clipping plane +struct ColorMaterial { + color: vec4, + // 'flags' is a bit field indicating various options. u32 is 32 bits so we have up to 32 options. + flags: u32, +}; +const COLOR_MATERIAL_FLAGS_TEXTURE_BIT: u32 = 1u; -/// UV space: -/// 0.0, 0.0 is the top left -/// 1.0, 1.0 is the bottom right +@group(2) @binding(0) var material: ColorMaterial; +@group(2) @binding(1) var texture: texture_2d; +@group(2) @binding(2) var texture_sampler: sampler; +@fragment +fn fragment( + mesh: VertexOutput, +) -> @location(0) vec4 { + var output_color: vec4 = material.color; +#ifdef VERTEX_COLORS + output_color = output_color * mesh.color; +#endif + if ((material.flags & COLOR_MATERIAL_FLAGS_TEXTURE_BIT) != 0u) { + output_color = output_color * textureSample(texture, texture_sampler, mesh.uv); + } +#ifdef TONEMAP_IN_SHADER + output_color = tonemapping::tone_mapping(output_color, view.color_grading); +#endif + return output_color; +} -// ----------------- -// TO WORLD -------- -// ----------------- +``` -/// Convert a view space position to world space -fn position_view_to_world(view_pos: vec3) -> vec3 { - let world_pos = view_bindings::view.view * vec4(view_pos, 1.0); - return world_pos.xyz; -} +### bevy_shaders/pbr_transmission -/// Convert a clip space position to world space -fn position_clip_to_world(clip_pos: vec4) -> vec3 { - let world_pos = view_bindings::view.inverse_view_proj * clip_pos; - return world_pos.xyz; -} +```rust +#define_import_path bevy_pbr::transmission -/// Convert a ndc space position to world space -fn position_ndc_to_world(ndc_pos: vec3) -> vec3 { - let world_pos = view_bindings::view.inverse_view_proj * vec4(ndc_pos, 1.0); - return world_pos.xyz / world_pos.w; -} +#import bevy_pbr::{ + lighting, + prepass_utils, + utils::interleaved_gradient_noise, + utils, + mesh_view_bindings as view_bindings, +}; -/// Convert a view space direction to world space -fn direction_view_to_world(view_dir: vec3) -> vec3 { - let world_dir = view_bindings::view.view * vec4(view_dir, 0.0); - return world_dir.xyz; -} +#import bevy_render::maths::PI -/// Convert a clip space direction to world space -fn direction_clip_to_world(clip_dir: vec4) -> vec3 { - let world_dir = view_bindings::view.inverse_view_proj * clip_dir; - return world_dir.xyz; -} +#ifdef TONEMAP_IN_SHADER +#import bevy_core_pipeline::tonemapping::approximate_inverse_tone_mapping +#endif -// ----------------- -// TO VIEW --------- -// ----------------- +fn specular_transmissive_light(world_position: vec4, frag_coord: vec3, view_z: f32, N: vec3, V: vec3, F0: vec3, ior: f32, thickness: f32, perceptual_roughness: f32, specular_transmissive_color: vec3, transmitted_environment_light_specular: vec3) -> vec3 { + // Calculate the ratio between refaction indexes. Assume air/vacuum for the space outside the mesh + let eta = 1.0 / ior; -/// Convert a world space position to view space -fn position_world_to_view(world_pos: vec3) -> vec3 { - let view_pos = view_bindings::view.inverse_view * vec4(world_pos, 1.0); - return view_pos.xyz; -} + // Calculate incidence vector (opposite to view vector) and its dot product with the mesh normal + let I = -V; + let NdotI = dot(N, I); -/// Convert a clip space position to view space -fn position_clip_to_view(clip_pos: vec4) -> vec3 { - let view_pos = view_bindings::view.inverse_projection * clip_pos; - return view_pos.xyz; -} + // Calculate refracted direction using Snell's law + let k = 1.0 - eta * eta * (1.0 - NdotI * NdotI); + let T = eta * I - (eta * NdotI + sqrt(k)) * N; -/// Convert a ndc space position to view space -fn position_ndc_to_view(ndc_pos: vec3) -> vec3 { - let view_pos = view_bindings::view.inverse_projection * vec4(ndc_pos, 1.0); - return view_pos.xyz / view_pos.w; -} + // Calculate the exit position of the refracted ray, by propagating refacted direction through thickness + let exit_position = world_position.xyz + T * thickness; -/// Convert a world space direction to view space -fn direction_world_to_view(world_dir: vec3) -> vec3 { - let view_dir = view_bindings::view.inverse_view * vec4(world_dir, 0.0); - return view_dir.xyz; -} + // Transform exit_position into clip space + let clip_exit_position = view_bindings::view.clip_from_world * vec4(exit_position, 1.0); -/// Convert a clip space direction to view space -fn direction_clip_to_view(clip_dir: vec4) -> vec3 { - let view_dir = view_bindings::view.inverse_projection * clip_dir; - return view_dir.xyz; -} + // Scale / offset position so that coordinate is in right space for sampling transmissive background texture + let offset_position = (clip_exit_position.xy / clip_exit_position.w) * vec2(0.5, -0.5) + 0.5; -// ----------------- -// TO CLIP --------- -// ----------------- + // Fetch background color + var background_color: vec4; + if perceptual_roughness == 0.0 { + // If the material has zero roughness, we can use a faster approach without the blur + background_color = fetch_transmissive_background_non_rough(offset_position, frag_coord); + } else { + background_color = fetch_transmissive_background(offset_position, frag_coord, view_z, perceptual_roughness); + } -/// Convert a world space position to clip space -fn position_world_to_clip(world_pos: vec3) -> vec4 { - let clip_pos = view_bindings::view.view_proj * vec4(world_pos, 1.0); - return clip_pos; -} + // Compensate for exposure, since the background color is coming from an already exposure-adjusted texture + background_color = vec4(background_color.rgb / view_bindings::view.exposure, background_color.a); -/// Convert a view space position to clip space -fn position_view_to_clip(view_pos: vec3) -> vec4 { - let clip_pos = view_bindings::view.projection * vec4(view_pos, 1.0); - return clip_pos; -} + // Dot product of the refracted direction with the exit normal (Note: We assume the exit normal is the entry normal but inverted) + let MinusNdotT = dot(-N, T); -/// Convert a world space direction to clip space -fn direction_world_to_clip(world_dir: vec3) -> vec4 { - let clip_dir = view_bindings::view.view_proj * vec4(world_dir, 0.0); - return clip_dir; -} + // Calculate 1.0 - fresnel factor (how much light is _NOT_ reflected, i.e. how much is transmitted) + let F = vec3(1.0) - lighting::fresnel(F0, MinusNdotT); -/// Convert a view space direction to clip space -fn direction_view_to_clip(view_dir: vec3) -> vec4 { - let clip_dir = view_bindings::view.projection * vec4(view_dir, 0.0); - return clip_dir; + // Calculate final color by applying fresnel multiplied specular transmissive color to a mix of background color and transmitted specular environment light + return F * specular_transmissive_color * mix(transmitted_environment_light_specular, background_color.rgb, background_color.a); } -// ----------------- -// TO NDC ---------- -// ----------------- +fn fetch_transmissive_background_non_rough(offset_position: vec2, frag_coord: vec3) -> vec4 { + var background_color = textureSampleLevel( + view_bindings::view_transmission_texture, + view_bindings::view_transmission_sampler, + offset_position, + 0.0 + ); -/// Convert a world space position to ndc space -fn position_world_to_ndc(world_pos: vec3) -> vec3 { - let ndc_pos = view_bindings::view.view_proj * vec4(world_pos, 1.0); - return ndc_pos.xyz / ndc_pos.w; -} +#ifdef DEPTH_PREPASS +#ifndef WEBGL2 + // Use depth prepass data to reject values that are in front of the current fragment + if prepass_utils::prepass_depth(vec4(offset_position * view_bindings::view.viewport.zw, 0.0, 0.0), 0u) > frag_coord.z { + background_color.a = 0.0; + } +#endif +#endif -/// Convert a view space position to ndc space -fn position_view_to_ndc(view_pos: vec3) -> vec3 { - let ndc_pos = view_bindings::view.projection * vec4(view_pos, 1.0); - return ndc_pos.xyz / ndc_pos.w; +#ifdef TONEMAP_IN_SHADER + background_color = approximate_inverse_tone_mapping(background_color, view_bindings::view.color_grading); +#endif + + return background_color; } -// ----------------- -// DEPTH ----------- -// ----------------- +fn fetch_transmissive_background(offset_position: vec2, frag_coord: vec3, view_z: f32, perceptual_roughness: f32) -> vec4 { + // Calculate view aspect ratio, used to scale offset so that it's proportionate + let aspect = view_bindings::view.viewport.z / view_bindings::view.viewport.w; -/// Retrieve the perspective camera near clipping plane -fn perspective_camera_near() -> f32 { - return view_bindings::view.projection[3][2]; -} + // Calculate how “blurry” the transmission should be. + // Blur is more or less eyeballed to look approximately “right”, since the “correct” + // approach would involve projecting many scattered rays and figuring out their individual + // exit positions. IRL, light rays can be scattered when entering/exiting a material (due to + // roughness) or inside the material (due to subsurface scattering). Here, we only consider + // the first scenario. + // + // Blur intensity is: + // - proportional to the square of `perceptual_roughness` + // - proportional to the inverse of view z + let blur_intensity = (perceptual_roughness * perceptual_roughness) / view_z; -/// Convert ndc depth to linear view z. -/// Note: Depth values in front of the camera will be negative as -z is forward -fn depth_ndc_to_view_z(ndc_depth: f32) -> f32 { -#ifdef VIEW_PROJECTION_PERSPECTIVE - return -perspective_camera_near() / ndc_depth; -#else ifdef VIEW_PROJECTION_ORTHOGRAPHIC - return -(view_bindings::view.projection[3][2] - ndc_depth) / view_bindings::view.projection[2][2]; +#ifdef SCREEN_SPACE_SPECULAR_TRANSMISSION_BLUR_TAPS + let num_taps = #{SCREEN_SPACE_SPECULAR_TRANSMISSION_BLUR_TAPS}; // Controlled by the `Camera3d::screen_space_specular_transmission_quality` property #else - let view_pos = view_bindings::view.inverse_projection * vec4(0.0, 0.0, ndc_depth, 1.0); - return view_pos.z / view_pos.w; + let num_taps = 8; // Fallback to 8 taps, if not specified #endif -} - -/// Convert linear view z to ndc depth. -/// Note: View z input should be negative for values in front of the camera as -z is forward -fn view_z_to_depth_ndc(view_z: f32) -> f32 { -#ifdef VIEW_PROJECTION_PERSPECTIVE - return -perspective_camera_near() / view_z; -#else ifdef VIEW_PROJECTION_ORTHOGRAPHIC - return view_bindings::view.projection[3][2] + view_z * view_bindings::view.projection[2][2]; + let num_spirals = i32(ceil(f32(num_taps) / 8.0)); +#ifdef TEMPORAL_JITTER + let random_angle = interleaved_gradient_noise(frag_coord.xy, view_bindings::globals.frame_count); #else - let ndc_pos = view_bindings::view.projection * vec4(0.0, 0.0, view_z, 1.0); - return ndc_pos.z / ndc_pos.w; + let random_angle = interleaved_gradient_noise(frag_coord.xy, 0u); #endif -} - -// ----------------- -// UV -------------- -// ----------------- + // Pixel checkerboard pattern (helps make the interleaved gradient noise pattern less visible) + let pixel_checkboard = ( +#ifdef TEMPORAL_JITTER + // 0 or 1 on even/odd pixels, alternates every frame + (i32(frag_coord.x) + i32(frag_coord.y) + i32(view_bindings::globals.frame_count)) % 2 +#else + // 0 or 1 on even/odd pixels + (i32(frag_coord.x) + i32(frag_coord.y)) % 2 +#endif + ); -/// Convert ndc space xy coordinate [-1.0 .. 1.0] to uv [0.0 .. 1.0] -fn ndc_to_uv(ndc: vec2) -> vec2 { - return ndc * vec2(0.5, -0.5) + vec2(0.5); -} + var result = vec4(0.0); + for (var i: i32 = 0; i < num_taps; i = i + 1) { + let current_spiral = (i >> 3u); + let angle = (random_angle + f32(current_spiral) / f32(num_spirals)) * 2.0 * PI; + let m = vec2(sin(angle), cos(angle)); + let rotation_matrix = mat2x2( + m.y, -m.x, + m.x, m.y + ); -/// Convert uv [0.0 .. 1.0] coordinate to ndc space xy [-1.0 .. 1.0] -fn uv_to_ndc(uv: vec2) -> vec2 { - return uv * vec2(2.0, -2.0) + vec2(-1.0, 1.0); -} + // Get spiral offset + var spiral_offset: vec2; + switch i & 7 { + // https://www.iryoku.com/next-generation-post-processing-in-call-of-duty-advanced-warfare (slides 120-135) + // TODO: Figure out a more reasonable way of doing this, as WGSL + // seems to only allow constant indexes into constant arrays at the moment. + // The downstream shader compiler should be able to optimize this into a single + // constant when unrolling the for loop, but it's still not ideal. + case 0: { spiral_offset = utils::SPIRAL_OFFSET_0_; } // Note: We go even first and then odd, so that the lowest + case 1: { spiral_offset = utils::SPIRAL_OFFSET_2_; } // quality possible (which does 4 taps) still does a full spiral + case 2: { spiral_offset = utils::SPIRAL_OFFSET_4_; } // instead of just the first half of it + case 3: { spiral_offset = utils::SPIRAL_OFFSET_6_; } + case 4: { spiral_offset = utils::SPIRAL_OFFSET_1_; } + case 5: { spiral_offset = utils::SPIRAL_OFFSET_3_; } + case 6: { spiral_offset = utils::SPIRAL_OFFSET_5_; } + case 7: { spiral_offset = utils::SPIRAL_OFFSET_7_; } + default: {} + } -/// returns the (0.0, 0.0) .. (1.0, 1.0) position within the viewport for the current render target -/// [0 .. render target viewport size] eg. [(0.0, 0.0) .. (1280.0, 720.0)] to [(0.0, 0.0) .. (1.0, 1.0)] -fn frag_coord_to_uv(frag_coord: vec2) -> vec2 { - return (frag_coord - view_bindings::view.viewport.xy) / view_bindings::view.viewport.zw; -} + // Make each consecutive spiral slightly smaller than the previous one + spiral_offset *= 1.0 - (0.5 * f32(current_spiral + 1) / f32(num_spirals)); -/// Convert frag coord to ndc -fn frag_coord_to_ndc(frag_coord: vec4) -> vec3 { - return vec3(uv_to_ndc(frag_coord_to_uv(frag_coord.xy)), frag_coord.z); -} + // Rotate and correct for aspect ratio + let rotated_spiral_offset = (rotation_matrix * spiral_offset) * vec2(1.0, aspect); -``` + // Calculate final offset position, with blur and spiral offset + let modified_offset_position = offset_position + rotated_spiral_offset * blur_intensity * (1.0 - f32(pixel_checkboard) * 0.1); -### crates/bevy_pbr/src/render/skinning + // Sample the view transmission texture at the offset position + noise offset, to get the background color + var sample = textureSampleLevel( + view_bindings::view_transmission_texture, + view_bindings::view_transmission_sampler, + modified_offset_position, + 0.0 + ); -```rust -#define_import_path bevy_pbr::skinning +#ifdef DEPTH_PREPASS +#ifndef WEBGL2 + // Use depth prepass data to reject values that are in front of the current fragment + if prepass_utils::prepass_depth(vec4(modified_offset_position * view_bindings::view.viewport.zw, 0.0, 0.0), 0u) > frag_coord.z { + sample = vec4(0.0); + } +#endif +#endif -#import bevy_pbr::mesh_types::SkinnedMesh + // As blur intensity grows higher, gradually limit *very bright* color RGB values towards a + // maximum length of 1.0 to prevent stray “firefly” pixel artifacts. This can potentially make + // very strong emissive meshes appear much dimmer, but the artifacts are noticeable enough to + // warrant this treatment. + let normalized_rgb = normalize(sample.rgb); + result += vec4(min(sample.rgb, normalized_rgb / saturate(blur_intensity / 2.0)), sample.a); + } -#ifdef SKINNED + result /= f32(num_taps); -@group(1) @binding(1) var joint_matrices: SkinnedMesh; +#ifdef TONEMAP_IN_SHADER + result = approximate_inverse_tone_mapping(result, view_bindings::view.color_grading); +#endif -fn skin_model( - indexes: vec4, - weights: vec4, -) -> mat4x4 { - return weights.x * joint_matrices.data[indexes.x] - + weights.y * joint_matrices.data[indexes.y] - + weights.z * joint_matrices.data[indexes.z] - + weights.w * joint_matrices.data[indexes.w]; + return result; } -fn inverse_transpose_3x3m(in: mat3x3) -> mat3x3 { - let x = cross(in[1], in[2]); - let y = cross(in[2], in[0]); - let z = cross(in[0], in[1]); - let det = dot(in[2], z); - return mat3x3( - x / det, - y / det, - z / det - ); -} +``` -fn skin_normals( - model: mat4x4, - normal: vec3, -) -> vec3 { - return normalize( - inverse_transpose_3x3m( - mat3x3( - model[0].xyz, - model[1].xyz, - model[2].xyz - ) - ) * normal - ); -} +### bevy_shaders/rgb9e5 -#endif +```rust +#define_import_path bevy_pbr::rgb9e5 -``` +const RGB9E5_EXPONENT_BITS = 5u; +const RGB9E5_MANTISSA_BITS = 9; +const RGB9E5_MANTISSA_BITSU = 9u; +const RGB9E5_EXP_BIAS = 15; +const RGB9E5_MAX_VALID_BIASED_EXP = 31u; + +//#define MAX_RGB9E5_EXP (RGB9E5_MAX_VALID_BIASED_EXP - RGB9E5_EXP_BIAS) +//#define RGB9E5_MANTISSA_VALUES (1< i32 { + let f = bitcast(x); + let biasedexponent = (f & 0x7F800000u) >> 23u; + return i32(biasedexponent) - 127; +} -#import bevy_pbr::mesh_view_bindings as view_bindings +// https://www.khronos.org/registry/OpenGL/extensions/EXT/EXT_texture_shared_exponent.txt +fn vec3_to_rgb9e5_(rgb_in: vec3) -> u32 { + let rgb = clamp(rgb_in, vec3(0.0), vec3(MAX_RGB9E5_)); -#ifdef DEPTH_PREPASS -fn prepass_depth(frag_coord: vec4, sample_index: u32) -> f32 { -#ifdef MULTISAMPLED - return textureLoad(view_bindings::depth_prepass_texture, vec2(frag_coord.xy), i32(sample_index)); -#else // MULTISAMPLED - return textureLoad(view_bindings::depth_prepass_texture, vec2(frag_coord.xy), 0); -#endif // MULTISAMPLED + let maxrgb = max(rgb.r, max(rgb.g, rgb.b)); + var exp_shared = max(-RGB9E5_EXP_BIAS - 1, floor_log2_(maxrgb)) + 1 + RGB9E5_EXP_BIAS; + var denom = exp2(f32(exp_shared - RGB9E5_EXP_BIAS - RGB9E5_MANTISSA_BITS)); + + let maxm = i32(floor(maxrgb / denom + 0.5)); + if (maxm == RGB9E5_MANTISSA_VALUES) { + denom *= 2.0; + exp_shared += 1; + } + + let n = vec3(floor(rgb / denom + 0.5)); + + return (u32(exp_shared) << 27u) | (n.b << 18u) | (n.g << 9u) | (n.r << 0u); } -#endif // DEPTH_PREPASS -#ifdef NORMAL_PREPASS -fn prepass_normal(frag_coord: vec4, sample_index: u32) -> vec3 { -#ifdef MULTISAMPLED - let normal_sample = textureLoad(view_bindings::normal_prepass_texture, vec2(frag_coord.xy), i32(sample_index)); -#else - let normal_sample = textureLoad(view_bindings::normal_prepass_texture, vec2(frag_coord.xy), 0); -#endif // MULTISAMPLED - return normalize(normal_sample.xyz * 2.0 - vec3(1.0)); +// Builtin extractBits() is not working on WEBGL or DX12 +// DX12: HLSL: Unimplemented("write_expr_math ExtractBits") +fn extract_bits(value: u32, offset: u32, bits: u32) -> u32 { + let mask = (1u << bits) - 1u; + return (value >> offset) & mask; } -#endif // NORMAL_PREPASS -#ifdef MOTION_VECTOR_PREPASS -fn prepass_motion_vector(frag_coord: vec4, sample_index: u32) -> vec2 { -#ifdef MULTISAMPLED - let motion_vector_sample = textureLoad(view_bindings::motion_vector_prepass_texture, vec2(frag_coord.xy), i32(sample_index)); -#else - let motion_vector_sample = textureLoad(view_bindings::motion_vector_prepass_texture, vec2(frag_coord.xy), 0); -#endif - return motion_vector_sample.rg; +fn rgb9e5_to_vec3_(v: u32) -> vec3 { + let exponent = i32(extract_bits(v, 27u, RGB9E5_EXPONENT_BITS)) - RGB9E5_EXP_BIAS - RGB9E5_MANTISSA_BITS; + let scale = exp2(f32(exponent)); + + return vec3( + f32(extract_bits(v, 0u, RGB9E5_MANTISSA_BITSU)), + f32(extract_bits(v, 9u, RGB9E5_MANTISSA_BITSU)), + f32(extract_bits(v, 18u, RGB9E5_MANTISSA_BITSU)) + ) * scale; } -#endif // MOTION_VECTOR_PREPASS ``` -### crates/bevy_pbr/src/prepass/prepass_bindings +### bevy_shaders/cull_meshlets ```rust -#define_import_path bevy_pbr::prepass_bindings - -struct PreviousViewUniforms { - inverse_view: mat4x4, - view_proj: mat4x4, +#import bevy_pbr::meshlet_bindings::{ + meshlet_thread_meshlet_ids, + meshlet_bounding_spheres, + meshlet_thread_instance_ids, + meshlet_instance_uniforms, + meshlet_occlusion, + view, + should_cull_instance, + get_meshlet_previous_occlusion, } +#ifdef MESHLET_SECOND_CULLING_PASS +#import bevy_pbr::meshlet_bindings::depth_pyramid +#endif +#import bevy_render::maths::affine3_to_square -#ifdef MOTION_VECTOR_PREPASS -@group(0) @binding(2) var previous_view_uniforms: PreviousViewUniforms; -#endif // MOTION_VECTOR_PREPASS +/// Culls individual clusters (1 per thread) in two passes (two pass occlusion culling), and outputs a bitmask of which clusters survived. +/// 1. The first pass is only frustum culling, on only the clusters that were visible last frame. +/// 2. The second pass performs both frustum and occlusion culling (using the depth buffer generated from the first pass), on all clusters. -// Material bindings will be in @group(2) +@compute +@workgroup_size(128, 1, 1) // 128 threads per workgroup, 1 instanced meshlet per thread +fn cull_meshlets(@builtin(global_invocation_id) cluster_id: vec3) { + // Fetch the instanced meshlet data + if cluster_id.x >= arrayLength(&meshlet_thread_meshlet_ids) { return; } + let instance_id = meshlet_thread_instance_ids[cluster_id.x]; + if should_cull_instance(instance_id) { + return; + } + let meshlet_id = meshlet_thread_meshlet_ids[cluster_id.x]; + let bounding_sphere = meshlet_bounding_spheres[meshlet_id]; + let instance_uniform = meshlet_instance_uniforms[instance_id]; + let model = affine3_to_square(instance_uniform.model); + let model_scale = max(length(model[0]), max(length(model[1]), length(model[2]))); + let bounding_sphere_center = model * vec4(bounding_sphere.center, 1.0); + let bounding_sphere_radius = model_scale * bounding_sphere.radius; -``` + // In the first pass, operate only on the clusters visible last frame. In the second pass, operate on all clusters. +#ifdef MESHLET_SECOND_CULLING_PASS + var meshlet_visible = true; +#else + var meshlet_visible = get_meshlet_previous_occlusion(cluster_id.x); + if !meshlet_visible { return; } +#endif -### crates/bevy_pbr/src/prepass/prepass + // Frustum culling + // TODO: Faster method from https://vkguide.dev/docs/gpudriven/compute_culling/#frustum-culling-function + for (var i = 0u; i < 6u; i++) { + if !meshlet_visible { break; } + meshlet_visible &= dot(view.frustum[i], bounding_sphere_center) > -bounding_sphere_radius; + } -```rust -#import bevy_pbr::{ - prepass_bindings, - mesh_functions, - prepass_io::{Vertex, VertexOutput, FragmentOutput}, - skinning, - morph, - mesh_view_bindings::view, -} +#ifdef MESHLET_SECOND_CULLING_PASS + // In the second culling pass, cull against the depth pyramid generated from the first pass + if meshlet_visible { + let bounding_sphere_center_view_space = (view.inverse_view * vec4(bounding_sphere_center.xyz, 1.0)).xyz; + let aabb = project_view_space_sphere_to_screen_space_aabb(bounding_sphere_center_view_space, bounding_sphere_radius); -#ifdef DEFERRED_PREPASS -#import bevy_pbr::rgb9e5 -#endif + // Halve the AABB size because the first depth mip resampling pass cut the full screen resolution into a power of two conservatively + let depth_pyramid_size_mip_0 = vec2(textureDimensions(depth_pyramid, 0)) * 0.5; + let width = (aabb.z - aabb.x) * depth_pyramid_size_mip_0.x; + let height = (aabb.w - aabb.y) * depth_pyramid_size_mip_0.y; + let depth_level = max(0, i32(ceil(log2(max(width, height))))); // TODO: Naga doesn't like this being a u32 + let depth_pyramid_size = vec2(textureDimensions(depth_pyramid, depth_level)); + let aabb_top_left = vec2(aabb.xy * depth_pyramid_size); -#ifdef MORPH_TARGETS -fn morph_vertex(vertex_in: Vertex) -> Vertex { - var vertex = vertex_in; - let weight_count = morph::layer_count(); - for (var i: u32 = 0u; i < weight_count; i ++) { - let weight = morph::weight_at(i); - if weight == 0.0 { - continue; + let depth_quad_a = textureLoad(depth_pyramid, aabb_top_left, depth_level).x; + let depth_quad_b = textureLoad(depth_pyramid, aabb_top_left + vec2(1u, 0u), depth_level).x; + let depth_quad_c = textureLoad(depth_pyramid, aabb_top_left + vec2(0u, 1u), depth_level).x; + let depth_quad_d = textureLoad(depth_pyramid, aabb_top_left + vec2(1u, 1u), depth_level).x; + + let occluder_depth = min(min(depth_quad_a, depth_quad_b), min(depth_quad_c, depth_quad_d)); + if view.projection[3][3] == 1.0 { + // Orthographic + let sphere_depth = view.projection[3][2] + (bounding_sphere_center_view_space.z + bounding_sphere_radius) * view.projection[2][2]; + meshlet_visible &= sphere_depth >= occluder_depth; + } else { + // Perspective + let sphere_depth = -view.projection[3][2] / (bounding_sphere_center_view_space.z + bounding_sphere_radius); + meshlet_visible &= sphere_depth >= occluder_depth; } - vertex.position += weight * morph::morph(vertex.index, morph::position_offset, i); -#ifdef VERTEX_NORMALS - vertex.normal += weight * morph::morph(vertex.index, morph::normal_offset, i); -#endif -#ifdef VERTEX_TANGENTS - vertex.tangent += vec4(weight * morph::morph(vertex.index, morph::tangent_offset, i), 0.0); -#endif } - return vertex; -} #endif -@vertex -fn vertex(vertex_no_morph: Vertex) -> VertexOutput { - var out: VertexOutput; + // Write the bitmask of whether or not the cluster was culled + let occlusion_bit = u32(meshlet_visible) << (cluster_id.x % 32u); + atomicOr(&meshlet_occlusion[cluster_id.x / 32u], occlusion_bit); +} -#ifdef MORPH_TARGETS - var vertex = morph_vertex(vertex_no_morph); -#else - var vertex = vertex_no_morph; -#endif +// https://zeux.io/2023/01/12/approximate-projected-bounds +fn project_view_space_sphere_to_screen_space_aabb(cp: vec3, r: f32) -> vec4 { + let inv_width = view.projection[0][0] * 0.5; + let inv_height = view.projection[1][1] * 0.5; + if view.projection[3][3] == 1.0 { + // Orthographic + let min_x = cp.x - r; + let max_x = cp.x + r; -#ifdef SKINNED - var model = skinning::skin_model(vertex.joint_indices, vertex.joint_weights); -#else // SKINNED - // Use vertex_no_morph.instance_index instead of vertex.instance_index to work around a wgpu dx12 bug. - // See https://github.com/gfx-rs/naga/issues/2416 - var model = mesh_functions::get_model_matrix(vertex_no_morph.instance_index); -#endif // SKINNED + let min_y = cp.y - r; + let max_y = cp.y + r; - out.position = mesh_functions::mesh_position_local_to_clip(model, vec4(vertex.position, 1.0)); -#ifdef DEPTH_CLAMP_ORTHO - out.clip_position_unclamped = out.position; - out.position.z = min(out.position.z, 1.0); -#endif // DEPTH_CLAMP_ORTHO + return vec4(min_x * inv_width, 1.0 - max_y * inv_height, max_x * inv_width, 1.0 - min_y * inv_height); + } else { + // Perspective + let c = vec3(cp.xy, -cp.z); + let cr = c * r; + let czr2 = c.z * c.z - r * r; -#ifdef VERTEX_UVS - out.uv = vertex.uv; -#endif // VERTEX_UVS + let vx = sqrt(c.x * c.x + czr2); + let min_x = (vx * c.x - cr.z) / (vx * c.z + cr.x); + let max_x = (vx * c.x + cr.z) / (vx * c.z - cr.x); -#ifdef VERTEX_UVS_B - out.uv_b = vertex.uv_b; -#endif // VERTEX_UVS_B + let vy = sqrt(c.y * c.y + czr2); + let min_y = (vy * c.y - cr.z) / (vy * c.z + cr.y); + let max_y = (vy * c.y + cr.z) / (vy * c.z - cr.y); -#ifdef NORMAL_PREPASS_OR_DEFERRED_PREPASS -#ifdef SKINNED - out.world_normal = skinning::skin_normals(model, vertex.normal); -#else // SKINNED - out.world_normal = mesh_functions::mesh_normal_local_to_world( - vertex.normal, - // Use vertex_no_morph.instance_index instead of vertex.instance_index to work around a wgpu dx12 bug. - // See https://github.com/gfx-rs/naga/issues/2416 - vertex_no_morph.instance_index - ); -#endif // SKINNED + return vec4(min_x * inv_width, -max_y * inv_height, max_x * inv_width, -min_y * inv_height) + vec4(0.5); + } +} -#ifdef VERTEX_TANGENTS - out.world_tangent = mesh_functions::mesh_tangent_local_to_world( - model, - vertex.tangent, - // Use vertex_no_morph.instance_index instead of vertex.instance_index to work around a wgpu dx12 bug. - // See https://github.com/gfx-rs/naga/issues/2416 - vertex_no_morph.instance_index - ); -#endif // VERTEX_TANGENTS -#endif // NORMAL_PREPASS_OR_DEFERRED_PREPASS +``` -#ifdef VERTEX_COLORS - out.color = vertex.color; -#endif +### bevy_shaders/clustered_forward - out.world_position = mesh_functions::mesh_position_local_to_world(model, vec4(vertex.position, 1.0)); +```rust +#define_import_path bevy_pbr::clustered_forward -#ifdef MOTION_VECTOR_PREPASS - // Use vertex_no_morph.instance_index instead of vertex.instance_index to work around a wgpu dx12 bug. - // See https://github.com/gfx-rs/naga/issues/2416 - out.previous_world_position = mesh_functions::mesh_position_local_to_world( - mesh_functions::get_previous_model_matrix(vertex_no_morph.instance_index), - vec4(vertex.position, 1.0) - ); -#endif // MOTION_VECTOR_PREPASS +#import bevy_pbr::{ + mesh_view_bindings as bindings, + utils::rand_f, +} -#ifdef VERTEX_OUTPUT_INSTANCE_INDEX - // Use vertex_no_morph.instance_index instead of vertex.instance_index to work around a wgpu dx12 bug. - // See https://github.com/gfx-rs/naga/issues/2416 - out.instance_index = vertex_no_morph.instance_index; -#endif +#import bevy_render::{ + color_operations::hsv_to_rgb, + maths::PI_2, +} - return out; +// NOTE: Keep in sync with bevy_pbr/src/light.rs +fn view_z_to_z_slice(view_z: f32, is_orthographic: bool) -> u32 { + var z_slice: u32 = 0u; + if is_orthographic { + // NOTE: view_z is correct in the orthographic case + z_slice = u32(floor((view_z - bindings::lights.cluster_factors.z) * bindings::lights.cluster_factors.w)); + } else { + // NOTE: had to use -view_z to make it positive else log(negative) is nan + z_slice = u32(log(-view_z) * bindings::lights.cluster_factors.z - bindings::lights.cluster_factors.w + 1.0); + } + // NOTE: We use min as we may limit the far z plane used for clustering to be closer than + // the furthest thing being drawn. This means that we need to limit to the maximum cluster. + return min(z_slice, bindings::lights.cluster_dimensions.z - 1u); } -#ifdef PREPASS_FRAGMENT -@fragment -fn fragment(in: VertexOutput) -> FragmentOutput { - var out: FragmentOutput; +fn fragment_cluster_index(frag_coord: vec2, view_z: f32, is_orthographic: bool) -> u32 { + let xy = vec2(floor((frag_coord - bindings::view.viewport.xy) * bindings::lights.cluster_factors.xy)); + let z_slice = view_z_to_z_slice(view_z, is_orthographic); + // NOTE: Restricting cluster index to avoid undefined behavior when accessing uniform buffer + // arrays based on the cluster index. + return min( + (xy.y * bindings::lights.cluster_dimensions.x + xy.x) * bindings::lights.cluster_dimensions.z + z_slice, + bindings::lights.cluster_dimensions.w - 1u + ); +} -#ifdef NORMAL_PREPASS - out.normal = vec4(in.world_normal * 0.5 + vec3(0.5), 1.0); +// this must match CLUSTER_COUNT_SIZE in light.rs +const CLUSTER_COUNT_SIZE = 9u; +fn unpack_offset_and_counts(cluster_index: u32) -> vec3 { +#if AVAILABLE_STORAGE_BUFFER_BINDINGS >= 3 + return bindings::cluster_offsets_and_counts.data[cluster_index].xyz; +#else + let offset_and_counts = bindings::cluster_offsets_and_counts.data[cluster_index >> 2u][cluster_index & ((1u << 2u) - 1u)]; + // [ 31 .. 18 | 17 .. 9 | 8 .. 0 ] + // [ offset | point light count | spot light count ] + return vec3( + (offset_and_counts >> (CLUSTER_COUNT_SIZE * 2u)) & ((1u << (32u - (CLUSTER_COUNT_SIZE * 2u))) - 1u), + (offset_and_counts >> CLUSTER_COUNT_SIZE) & ((1u << CLUSTER_COUNT_SIZE) - 1u), + offset_and_counts & ((1u << CLUSTER_COUNT_SIZE) - 1u), + ); #endif +} -#ifdef DEPTH_CLAMP_ORTHO - out.frag_depth = in.clip_position_unclamped.z; -#endif // DEPTH_CLAMP_ORTHO +fn get_clusterable_object_id(index: u32) -> u32 { +#if AVAILABLE_STORAGE_BUFFER_BINDINGS >= 3 + return bindings::clusterable_object_index_lists.data[index]; +#else + // The index is correct but in clusterable_object_index_lists we pack 4 u8s into a u32 + // This means the index into clusterable_object_index_lists is index / 4 + let indices = bindings::clusterable_object_index_lists.data[index >> 4u][(index >> 2u) & + ((1u << 2u) - 1u)]; + // And index % 4 gives the sub-index of the u8 within the u32 so we shift by 8 * sub-index + return (indices >> (8u * (index & ((1u << 2u) - 1u)))) & ((1u << 8u) - 1u); +#endif +} -#ifdef MOTION_VECTOR_PREPASS - let clip_position_t = view.unjittered_view_proj * in.world_position; - let clip_position = clip_position_t.xy / clip_position_t.w; - let previous_clip_position_t = prepass_bindings::previous_view_uniforms.view_proj * in.previous_world_position; - let previous_clip_position = previous_clip_position_t.xy / previous_clip_position_t.w; - // These motion vectors are used as offsets to UV positions and are stored - // in the range -1,1 to allow offsetting from the one corner to the - // diagonally-opposite corner in UV coordinates, in either direction. - // A difference between diagonally-opposite corners of clip space is in the - // range -2,2, so this needs to be scaled by 0.5. And the V direction goes - // down where clip space y goes up, so y needs to be flipped. - out.motion_vector = (clip_position - previous_clip_position) * vec2(0.5, -0.5); -#endif // MOTION_VECTOR_PREPASS +fn cluster_debug_visualization( + input_color: vec4, + view_z: f32, + is_orthographic: bool, + offset_and_counts: vec3, + cluster_index: u32, +) -> vec4 { + var output_color = input_color; -#ifdef DEFERRED_PREPASS - // There isn't any material info available for this default prepass shader so we are just writing  - // emissive magenta out to the deferred gbuffer to be rendered by the first deferred lighting pass layer. - // This is here so if the default prepass fragment is used for deferred magenta will be rendered, and also - // as an example to show that a user could write to the deferred gbuffer if they were to start from this shader. - out.deferred = vec4(0u, bevy_pbr::rgb9e5::vec3_to_rgb9e5_(vec3(1.0, 0.0, 1.0)), 0u, 0u); - out.deferred_lighting_pass_id = 1u; -#endif + // Cluster allocation debug (using 'over' alpha blending) +#ifdef CLUSTERED_FORWARD_DEBUG_Z_SLICES + // NOTE: This debug mode visualises the z-slices + let cluster_overlay_alpha = 0.1; + var z_slice: u32 = view_z_to_z_slice(view_z, is_orthographic); + // A hack to make the colors alternate a bit more + if (z_slice & 1u) == 1u { + z_slice = z_slice + bindings::lights.cluster_dimensions.z / 2u; + } + let slice_color = hsv_to_rgb( + f32(z_slice) / f32(bindings::lights.cluster_dimensions.z + 1u) * PI_2, + 1.0, + 0.5 + ); + output_color = vec4( + (1.0 - cluster_overlay_alpha) * output_color.rgb + cluster_overlay_alpha * slice_color, + output_color.a + ); +#endif // CLUSTERED_FORWARD_DEBUG_Z_SLICES +#ifdef CLUSTERED_FORWARD_DEBUG_CLUSTER_COMPLEXITY + // NOTE: This debug mode visualises the number of clusterable objects within + // the cluster that contains the fragment. It shows a sort of cluster + // complexity measure. + let cluster_overlay_alpha = 0.1; + let max_complexity_per_cluster = 64.0; + output_color.r = (1.0 - cluster_overlay_alpha) * output_color.r + cluster_overlay_alpha * + smoothStep( + 0.0, + max_complexity_per_cluster, + f32(offset_and_counts[1] + offset_and_counts[2])); + output_color.g = (1.0 - cluster_overlay_alpha) * output_color.g + cluster_overlay_alpha * + (1.0 - smoothStep( + 0.0, + max_complexity_per_cluster, + f32(offset_and_counts[1] + offset_and_counts[2]))); +#endif // CLUSTERED_FORWARD_DEBUG_CLUSTER_COMPLEXITY +#ifdef CLUSTERED_FORWARD_DEBUG_CLUSTER_COHERENCY + // NOTE: Visualizes the cluster to which the fragment belongs + let cluster_overlay_alpha = 0.1; + var rng = cluster_index; + let cluster_color = hsv_to_rgb(rand_f(&rng) * PI_2, 1.0, 0.5); + output_color = vec4( + (1.0 - cluster_overlay_alpha) * output_color.rgb + cluster_overlay_alpha * cluster_color, + output_color.a + ); +#endif // CLUSTERED_FORWARD_DEBUG_CLUSTER_COHERENCY - return out; + return output_color; } -#endif // PREPASS_FRAGMENT ``` -### crates/bevy_pbr/src/prepass/prepass_io +### bevy_shaders/mesh2d ```rust -#define_import_path bevy_pbr::prepass_io +#import bevy_sprite::{ + mesh2d_functions as mesh_functions, + mesh2d_vertex_output::VertexOutput, + mesh2d_view_bindings::view, +} + +#ifdef TONEMAP_IN_SHADER +#import bevy_core_pipeline::tonemapping +#endif -// Most of these attributes are not used in the default prepass fragment shader, but they are still needed so we can -// pass them to custom prepass shaders like pbr_prepass.wgsl. struct Vertex { @builtin(instance_index) instance_index: u32, +#ifdef VERTEX_POSITIONS @location(0) position: vec3, - -#ifdef VERTEX_UVS - @location(1) uv: vec2, #endif - -#ifdef VERTEX_UVS_B - @location(2) uv_b: vec2, +#ifdef VERTEX_NORMALS + @location(1) normal: vec3, #endif - -#ifdef NORMAL_PREPASS_OR_DEFERRED_PREPASS - @location(3) normal: vec3, -#ifdef VERTEX_TANGENTS - @location(4) tangent: vec4, +#ifdef VERTEX_UVS + @location(2) uv: vec2, #endif -#endif // NORMAL_PREPASS_OR_DEFERRED_PREPASS - -#ifdef SKINNED - @location(5) joint_indices: vec4, - @location(6) joint_weights: vec4, +#ifdef VERTEX_TANGENTS + @location(3) tangent: vec4, #endif - #ifdef VERTEX_COLORS - @location(7) color: vec4, + @location(4) color: vec4, #endif +}; -#ifdef MORPH_TARGETS - @builtin(vertex_index) index: u32, -#endif // MORPH_TARGETS -} - -struct VertexOutput { - // This is `clip position` when the struct is used as a vertex stage output - // and `frag coord` when used as a fragment stage input - @builtin(position) position: vec4, - +@vertex +fn vertex(vertex: Vertex) -> VertexOutput { + var out: VertexOutput; #ifdef VERTEX_UVS - @location(0) uv: vec2, -#endif - -#ifdef VERTEX_UVS_B - @location(1) uv_b: vec2, + out.uv = vertex.uv; #endif -#ifdef NORMAL_PREPASS_OR_DEFERRED_PREPASS - @location(2) world_normal: vec3, -#ifdef VERTEX_TANGENTS - @location(3) world_tangent: vec4, +#ifdef VERTEX_POSITIONS + var world_from_local = mesh_functions::get_world_from_local(vertex.instance_index); + out.world_position = mesh_functions::mesh2d_position_local_to_world( + world_from_local, + vec4(vertex.position, 1.0) + ); + out.position = mesh_functions::mesh2d_position_world_to_clip(out.world_position); #endif -#endif // NORMAL_PREPASS_OR_DEFERRED_PREPASS - @location(4) world_position: vec4, -#ifdef MOTION_VECTOR_PREPASS - @location(5) previous_world_position: vec4, +#ifdef VERTEX_NORMALS + out.world_normal = mesh_functions::mesh2d_normal_local_to_world(vertex.normal, vertex.instance_index); #endif -#ifdef DEPTH_CLAMP_ORTHO - @location(6) clip_position_unclamped: vec4, -#endif // DEPTH_CLAMP_ORTHO -#ifdef VERTEX_OUTPUT_INSTANCE_INDEX - @location(7) instance_index: u32, +#ifdef VERTEX_TANGENTS + out.world_tangent = mesh_functions::mesh2d_tangent_local_to_world( + world_from_local, + vertex.tangent + ); #endif #ifdef VERTEX_COLORS - @location(8) color: vec4, + out.color = vertex.color; #endif + return out; } -#ifdef PREPASS_FRAGMENT -struct FragmentOutput { -#ifdef NORMAL_PREPASS - @location(0) normal: vec4, -#endif - -#ifdef MOTION_VECTOR_PREPASS - @location(1) motion_vector: vec2, +@fragment +fn fragment( + in: VertexOutput, +) -> @location(0) vec4 { +#ifdef VERTEX_COLORS + var color = in.color; +#ifdef TONEMAP_IN_SHADER + color = tonemapping::tone_mapping(color, view.color_grading); #endif - -#ifdef DEFERRED_PREPASS - @location(2) deferred: vec4, - @location(3) deferred_lighting_pass_id: u32, + return color; +#else + return vec4(1.0, 0.0, 1.0, 1.0); #endif - -#ifdef DEPTH_CLAMP_ORTHO - @builtin(frag_depth) frag_depth: f32, -#endif // DEPTH_CLAMP_ORTHO } -#endif //PREPASS_FRAGMENT ``` -### crates/bevy_pbr/src/meshlet/copy_material_depth +### bevy_shaders/instance_index ```rust -#import bevy_core_pipeline::fullscreen_vertex_shader::FullscreenVertexOutput - -@group(0) @binding(0) var material_depth: texture_2d; +#define_import_path bevy_render::instance_index -/// This pass copies the R16Uint material depth texture to an actual Depth16Unorm depth texture. +#ifdef BASE_INSTANCE_WORKAROUND +// naga and wgpu should polyfill WGSL instance_index functionality where it is +// not available in GLSL. Until that is done, we can work around it in bevy +// using a push constant which is converted to a uniform by naga and wgpu. +// https://github.com/gfx-rs/wgpu/issues/1573 +var base_instance: i32; -@fragment -fn copy_material_depth(in: FullscreenVertexOutput) -> @builtin(frag_depth) f32 { - return f32(textureLoad(material_depth, vec2(in.position.xy), 0).r) / 65535.0; +fn get_instance_index(instance_index: u32) -> u32 { + return u32(base_instance) + instance_index; } +#else +fn get_instance_index(instance_index: u32) -> u32 { + return instance_index; +} +#endif ``` -### crates/bevy_pbr/src/meshlet/write_index_buffer +### bevy_shaders/tonemapping ```rust -#import bevy_pbr::meshlet_bindings::{ - meshlet_thread_meshlet_ids, - meshlets, - draw_indirect_args, - draw_index_buffer, - get_meshlet_occlusion, - get_meshlet_previous_occlusion, +#define TONEMAPPING_PASS + +#import bevy_render::{ + view::View, + maths::powsafe, +} +#import bevy_core_pipeline::{ + fullscreen_vertex_shader::FullscreenVertexOutput, + tonemapping::{tone_mapping, screen_space_dither}, } -var draw_index_buffer_start_workgroup: u32; +@group(0) @binding(0) var view: View; -/// This pass writes out a buffer of cluster + triangle IDs for the draw_indirect() call to rasterize each visible meshlet. +@group(0) @binding(1) var hdr_texture: texture_2d; +@group(0) @binding(2) var hdr_sampler: sampler; +@group(0) @binding(3) var dt_lut_texture: texture_3d; +@group(0) @binding(4) var dt_lut_sampler: sampler; -@compute -@workgroup_size(64, 1, 1) // 64 threads per workgroup, 1 workgroup per cluster, 1 thread per triangle -fn write_index_buffer(@builtin(workgroup_id) workgroup_id: vec3, @builtin(num_workgroups) num_workgroups: vec3, @builtin(local_invocation_index) triangle_id: u32) { - // Calculate the cluster ID for this workgroup - let cluster_id = dot(workgroup_id, vec3(num_workgroups.x * num_workgroups.x, num_workgroups.x, 1u)); - if cluster_id >= arrayLength(&meshlet_thread_meshlet_ids) { return; } +@fragment +fn fragment(in: FullscreenVertexOutput) -> @location(0) vec4 { + let hdr_color = textureSample(hdr_texture, hdr_sampler, in.uv); - // If the meshlet was culled, then we don't need to draw it - if !get_meshlet_occlusion(cluster_id) { return; } + var output_rgb = tone_mapping(hdr_color, view.color_grading).rgb; - // If the meshlet was drawn in the first pass, and this is the second pass, then we don't need to draw it -#ifdef MESHLET_SECOND_WRITE_INDEX_BUFFER_PASS - if get_meshlet_previous_occlusion(cluster_id) { return; } +#ifdef DEBAND_DITHER + output_rgb = powsafe(output_rgb.rgb, 1.0 / 2.2); + output_rgb = output_rgb + screen_space_dither(in.position.xy); + // This conversion back to linear space is required because our output texture format is + // SRGB; the GPU will assume our output is linear and will apply an SRGB conversion. + output_rgb = powsafe(output_rgb.rgb, 2.2); #endif - let meshlet_id = meshlet_thread_meshlet_ids[cluster_id]; - let meshlet = meshlets[meshlet_id]; + return vec4(output_rgb, hdr_color.a); +} - // Reserve space in the buffer for this meshlet's triangles, and broadcast the start of that slice to all threads - if triangle_id == 0u { - draw_index_buffer_start_workgroup = atomicAdd(&draw_indirect_args.vertex_count, meshlet.triangle_count * 3u); - draw_index_buffer_start_workgroup /= 3u; +``` + +### bevy_shaders/color_operations + +```rust +#define_import_path bevy_render::color_operations + +#import bevy_render::maths::FRAC_PI_3 + +// Converts HSV to RGB. +// +// Input: H ∈ [0, 2π), S ∈ [0, 1], V ∈ [0, 1]. +// Output: R ∈ [0, 1], G ∈ [0, 1], B ∈ [0, 1]. +// +// +fn hsv_to_rgb(hsv: vec3) -> vec3 { + let n = vec3(5.0, 3.0, 1.0); + let k = (n + hsv.x / FRAC_PI_3) % 6.0; + return hsv.z - hsv.z * hsv.y * max(vec3(0.0), min(k, min(4.0 - k, vec3(1.0)))); +} + +// Converts RGB to HSV. +// +// Input: R ∈ [0, 1], G ∈ [0, 1], B ∈ [0, 1]. +// Output: H ∈ [0, 2π), S ∈ [0, 1], V ∈ [0, 1]. +// +// +fn rgb_to_hsv(rgb: vec3) -> vec3 { + let x_max = max(rgb.r, max(rgb.g, rgb.b)); // i.e. V + let x_min = min(rgb.r, min(rgb.g, rgb.b)); + let c = x_max - x_min; // chroma + + var swizzle = vec3(0.0); + if (x_max == rgb.r) { + swizzle = vec3(rgb.gb, 0.0); + } else if (x_max == rgb.g) { + swizzle = vec3(rgb.br, 2.0); + } else { + swizzle = vec3(rgb.rg, 4.0); } - workgroupBarrier(); - // Each thread writes one triangle of the meshlet to the buffer slice reserved for the meshlet - if triangle_id < meshlet.triangle_count { - draw_index_buffer[draw_index_buffer_start_workgroup + triangle_id] = (cluster_id << 8u) | triangle_id; + let h = FRAC_PI_3 * (((swizzle.x - swizzle.y) / c + swizzle.z) % 6.0); + + // Avoid division by zero. + var s = 0.0; + if (x_max > 0.0) { + s = c / x_max; } + + return vec3(h, s, x_max); } + ``` -### crates/bevy_pbr/src/meshlet/meshlet_bindings +### bevy_shaders/instancing ```rust -#define_import_path bevy_pbr::meshlet_bindings +#import bevy_pbr::mesh_functions::{get_world_from_local, mesh_position_local_to_clip} -#import bevy_pbr::mesh_types::Mesh -#import bevy_render::view::View +struct Vertex { + @location(0) position: vec3, + @location(1) normal: vec3, + @location(2) uv: vec2, -struct PackedMeshletVertex { - a: vec4, - b: vec4, - tangent: vec4, -} + @location(3) i_pos_scale: vec4, + @location(4) i_color: vec4, +}; -// TODO: Octahedral encode normal, remove tangent and derive from UV derivatives -struct MeshletVertex { - position: vec3, - normal: vec3, - uv: vec2, - tangent: vec4, -} +struct VertexOutput { + @builtin(position) clip_position: vec4, + @location(0) color: vec4, +}; -fn unpack_meshlet_vertex(packed: PackedMeshletVertex) -> MeshletVertex { - var vertex: MeshletVertex; - vertex.position = packed.a.xyz; - vertex.normal = vec3(packed.a.w, packed.b.xy); - vertex.uv = packed.b.zw; - vertex.tangent = packed.tangent; - return vertex; +@vertex +fn vertex(vertex: Vertex) -> VertexOutput { + let position = vertex.position * vertex.i_pos_scale.w + vertex.i_pos_scale.xyz; + var out: VertexOutput; + // NOTE: Passing 0 as the instance_index to get_world_from_local() is a hack + // for this example as the instance_index builtin would map to the wrong + // index in the Mesh array. This index could be passed in via another + // uniform instead but it's unnecessary for the example. + out.clip_position = mesh_position_local_to_clip( + get_world_from_local(0u), + vec4(position, 1.0) + ); + out.color = vertex.i_color; + return out; } -struct Meshlet { - start_vertex_id: u32, - start_index_id: u32, - triangle_count: u32, +@fragment +fn fragment(in: VertexOutput) -> @location(0) vec4 { + return in.color; } -struct MeshletBoundingSphere { - center: vec3, - radius: f32, -} +``` -struct DrawIndirectArgs { - vertex_count: atomic, - instance_count: u32, - first_vertex: u32, - first_instance: u32, -} +### bevy_shaders/mesh_view_bindings -#ifdef MESHLET_CULLING_PASS -@group(0) @binding(0) var meshlet_thread_meshlet_ids: array; // Per cluster (instance of a meshlet) -@group(0) @binding(1) var meshlet_bounding_spheres: array; // Per asset meshlet -@group(0) @binding(2) var meshlet_thread_instance_ids: array; // Per cluster (instance of a meshlet) -@group(0) @binding(3) var meshlet_instance_uniforms: array; // Per entity instance -@group(0) @binding(4) var meshlet_view_instance_visibility: array; // 1 bit per entity instance, packed as a bitmask -@group(0) @binding(5) var meshlet_occlusion: array>; // 1 bit per cluster (instance of a meshlet), packed as a bitmask -@group(0) @binding(6) var meshlet_previous_cluster_ids: array; // Per cluster (instance of a meshlet) -@group(0) @binding(7) var meshlet_previous_occlusion: array; // 1 bit per cluster (instance of a meshlet), packed as a bitmask -@group(0) @binding(8) var view: View; -@group(0) @binding(9) var depth_pyramid: texture_2d; // Generated from the first raster pass (unused in the first pass but still bound) +```rust +#define_import_path bevy_pbr::mesh_view_bindings -fn should_cull_instance(instance_id: u32) -> bool { - let bit_offset = instance_id % 32u; - let packed_visibility = meshlet_view_instance_visibility[instance_id / 32u]; - return bool(extractBits(packed_visibility, bit_offset, 1u)); +#import bevy_pbr::mesh_view_types as types +#import bevy_render::{ + view::View, + globals::Globals, } -fn get_meshlet_previous_occlusion(cluster_id: u32) -> bool { - let previous_cluster_id = meshlet_previous_cluster_ids[cluster_id]; - let packed_occlusion = meshlet_previous_occlusion[previous_cluster_id / 32u]; - let bit_offset = previous_cluster_id % 32u; - return bool(extractBits(packed_occlusion, bit_offset, 1u)); -} +@group(0) @binding(0) var view: View; +@group(0) @binding(1) var lights: types::Lights; +#ifdef NO_CUBE_ARRAY_TEXTURES_SUPPORT +@group(0) @binding(2) var point_shadow_textures: texture_depth_cube; +#else +@group(0) @binding(2) var point_shadow_textures: texture_depth_cube_array; #endif +@group(0) @binding(3) var point_shadow_textures_sampler: sampler_comparison; +#ifdef NO_ARRAY_TEXTURES_SUPPORT +@group(0) @binding(4) var directional_shadow_textures: texture_depth_2d; +#else +@group(0) @binding(4) var directional_shadow_textures: texture_depth_2d_array; +#endif +@group(0) @binding(5) var directional_shadow_textures_sampler: sampler_comparison; -#ifdef MESHLET_WRITE_INDEX_BUFFER_PASS -@group(0) @binding(0) var meshlet_occlusion: array; // 1 bit per cluster (instance of a meshlet), packed as a bitmask -@group(0) @binding(1) var meshlet_thread_meshlet_ids: array; // Per cluster (instance of a meshlet) -@group(0) @binding(2) var meshlet_previous_cluster_ids: array; // Per cluster (instance of a meshlet) -@group(0) @binding(3) var meshlet_previous_occlusion: array; // 1 bit per cluster (instance of a meshlet), packed as a bitmask -@group(0) @binding(4) var meshlets: array; // Per asset meshlet -@group(0) @binding(5) var draw_indirect_args: DrawIndirectArgs; // Single object shared between all workgroups/meshlets/triangles -@group(0) @binding(6) var draw_index_buffer: array; // Single object shared between all workgroups/meshlets/triangles +#if AVAILABLE_STORAGE_BUFFER_BINDINGS >= 3 +@group(0) @binding(6) var clusterable_objects: types::ClusterableObjects; +@group(0) @binding(7) var clusterable_object_index_lists: types::ClusterLightIndexLists; +@group(0) @binding(8) var cluster_offsets_and_counts: types::ClusterOffsetsAndCounts; +#else +@group(0) @binding(6) var clusterable_objects: types::ClusterableObjects; +@group(0) @binding(7) var clusterable_object_index_lists: types::ClusterLightIndexLists; +@group(0) @binding(8) var cluster_offsets_and_counts: types::ClusterOffsetsAndCounts; +#endif -fn get_meshlet_occlusion(cluster_id: u32) -> bool { - let packed_occlusion = meshlet_occlusion[cluster_id / 32u]; - let bit_offset = cluster_id % 32u; - return bool(extractBits(packed_occlusion, bit_offset, 1u)); -} +@group(0) @binding(9) var globals: Globals; +@group(0) @binding(10) var fog: types::Fog; +@group(0) @binding(11) var light_probes: types::LightProbes; -fn get_meshlet_previous_occlusion(cluster_id: u32) -> bool { - let previous_cluster_id = meshlet_previous_cluster_ids[cluster_id]; - let packed_occlusion = meshlet_previous_occlusion[previous_cluster_id / 32u]; - let bit_offset = previous_cluster_id % 32u; - return bool(extractBits(packed_occlusion, bit_offset, 1u)); -} +const VISIBILITY_RANGE_UNIFORM_BUFFER_SIZE: u32 = 64u; +#if AVAILABLE_STORAGE_BUFFER_BINDINGS >= 6 +@group(0) @binding(12) var visibility_ranges: array>; +#else +@group(0) @binding(12) var visibility_ranges: array, VISIBILITY_RANGE_UNIFORM_BUFFER_SIZE>; #endif -#ifdef MESHLET_VISIBILITY_BUFFER_RASTER_PASS -@group(0) @binding(0) var meshlet_thread_meshlet_ids: array; // Per cluster (instance of a meshlet) -@group(0) @binding(1) var meshlets: array; // Per asset meshlet -@group(0) @binding(2) var meshlet_indices: array; // Many per asset meshlet -@group(0) @binding(3) var meshlet_vertex_ids: array; // Many per asset meshlet -@group(0) @binding(4) var meshlet_vertex_data: array; // Many per asset meshlet -@group(0) @binding(5) var meshlet_thread_instance_ids: array; // Per cluster (instance of a meshlet) -@group(0) @binding(6) var meshlet_instance_uniforms: array; // Per entity instance -@group(0) @binding(7) var meshlet_instance_material_ids: array; // Per entity instance -@group(0) @binding(8) var draw_index_buffer: array; // Single object shared between all workgroups/meshlets/triangles -@group(0) @binding(9) var view: View; +@group(0) @binding(13) var ssr_settings: types::ScreenSpaceReflectionsSettings; +@group(0) @binding(14) var screen_space_ambient_occlusion_texture: texture_2d; -fn get_meshlet_index(index_id: u32) -> u32 { - let packed_index = meshlet_indices[index_id / 4u]; - let bit_offset = (index_id % 4u) * 8u; - return extractBits(packed_index, bit_offset, 8u); -} +#ifdef MULTIPLE_LIGHT_PROBES_IN_ARRAY +@group(0) @binding(15) var diffuse_environment_maps: binding_array, 8u>; +@group(0) @binding(16) var specular_environment_maps: binding_array, 8u>; +#else +@group(0) @binding(15) var diffuse_environment_map: texture_cube; +@group(0) @binding(16) var specular_environment_map: texture_cube; #endif +@group(0) @binding(17) var environment_map_sampler: sampler; -#ifdef MESHLET_MESH_MATERIAL_PASS -@group(1) @binding(0) var meshlet_visibility_buffer: texture_2d; // Generated from the meshlet raster passes -@group(1) @binding(1) var meshlet_thread_meshlet_ids: array; // Per cluster (instance of a meshlet) -@group(1) @binding(2) var meshlets: array; // Per asset meshlet -@group(1) @binding(3) var meshlet_indices: array; // Many per asset meshlet -@group(1) @binding(4) var meshlet_vertex_ids: array; // Many per asset meshlet -@group(1) @binding(5) var meshlet_vertex_data: array; // Many per asset meshlet -@group(1) @binding(6) var meshlet_thread_instance_ids: array; // Per cluster (instance of a meshlet) -@group(1) @binding(7) var meshlet_instance_uniforms: array; // Per entity instance - -fn get_meshlet_index(index_id: u32) -> u32 { - let packed_index = meshlet_indices[index_id / 4u]; - let bit_offset = (index_id % 4u) * 8u; - return extractBits(packed_index, bit_offset, 8u); -} +#ifdef IRRADIANCE_VOLUMES_ARE_USABLE +#ifdef MULTIPLE_LIGHT_PROBES_IN_ARRAY +@group(0) @binding(18) var irradiance_volumes: binding_array, 8u>; +#else +@group(0) @binding(18) var irradiance_volume: texture_3d; +#endif +@group(0) @binding(19) var irradiance_volume_sampler: sampler; #endif -``` +@group(0) @binding(20) var dt_lut_texture: texture_3d; +@group(0) @binding(21) var dt_lut_sampler: sampler; + +#ifdef MULTISAMPLED +#ifdef DEPTH_PREPASS +@group(0) @binding(22) var depth_prepass_texture: texture_depth_multisampled_2d; +#endif // DEPTH_PREPASS +#ifdef NORMAL_PREPASS +@group(0) @binding(23) var normal_prepass_texture: texture_multisampled_2d; +#endif // NORMAL_PREPASS +#ifdef MOTION_VECTOR_PREPASS +@group(0) @binding(24) var motion_vector_prepass_texture: texture_multisampled_2d; +#endif // MOTION_VECTOR_PREPASS -### crates/bevy_pbr/src/meshlet/downsample_depth +#else // MULTISAMPLED -```rust -#import bevy_core_pipeline::fullscreen_vertex_shader::FullscreenVertexOutput +#ifdef DEPTH_PREPASS +@group(0) @binding(22) var depth_prepass_texture: texture_depth_2d; +#endif // DEPTH_PREPASS +#ifdef NORMAL_PREPASS +@group(0) @binding(23) var normal_prepass_texture: texture_2d; +#endif // NORMAL_PREPASS +#ifdef MOTION_VECTOR_PREPASS +@group(0) @binding(24) var motion_vector_prepass_texture: texture_2d; +#endif // MOTION_VECTOR_PREPASS -@group(0) @binding(0) var input_depth: texture_2d; -@group(0) @binding(1) var samplr: sampler; +#endif // MULTISAMPLED -/// Performs a 2x2 downsample on a depth texture to generate the next mip level of a hierarchical depth buffer. +#ifdef DEFERRED_PREPASS +@group(0) @binding(25) var deferred_prepass_texture: texture_2d; +#endif // DEFERRED_PREPASS -@fragment -fn downsample_depth(in: FullscreenVertexOutput) -> @location(0) vec4 { - let depth_quad = textureGather(0, input_depth, samplr, in.uv); - let downsampled_depth = min( - min(depth_quad.x, depth_quad.y), - min(depth_quad.z, depth_quad.w), - ); - return vec4(downsampled_depth, 0.0, 0.0, 0.0); -} +@group(0) @binding(26) var view_transmission_texture: texture_2d; +@group(0) @binding(27) var view_transmission_sampler: sampler; ``` -### crates/bevy_pbr/src/meshlet/cull_meshlets +### bevy_shaders/preprocess_depth ```rust -#import bevy_pbr::meshlet_bindings::{ - meshlet_thread_meshlet_ids, - meshlet_bounding_spheres, - meshlet_thread_instance_ids, - meshlet_instance_uniforms, - meshlet_occlusion, - view, - should_cull_instance, - get_meshlet_previous_occlusion, +// Inputs a depth texture and outputs a MIP-chain of depths. +// +// Because SSAO's performance is bound by texture reads, this increases +// performance over using the full resolution depth for every sample. + +// Reference: https://research.nvidia.com/sites/default/files/pubs/2012-06_Scalable-Ambient-Obscurance/McGuire12SAO.pdf, section 2.2 + +#import bevy_render::view::View + +@group(0) @binding(0) var input_depth: texture_depth_2d; +@group(0) @binding(1) var preprocessed_depth_mip0: texture_storage_2d; +@group(0) @binding(2) var preprocessed_depth_mip1: texture_storage_2d; +@group(0) @binding(3) var preprocessed_depth_mip2: texture_storage_2d; +@group(0) @binding(4) var preprocessed_depth_mip3: texture_storage_2d; +@group(0) @binding(5) var preprocessed_depth_mip4: texture_storage_2d; +@group(1) @binding(0) var point_clamp_sampler: sampler; +@group(1) @binding(1) var view: View; + + +// Using 4 depths from the previous MIP, compute a weighted average for the depth of the current MIP +fn weighted_average(depth0: f32, depth1: f32, depth2: f32, depth3: f32) -> f32 { + let depth_range_scale_factor = 0.75; + let effect_radius = depth_range_scale_factor * 0.5 * 1.457; + let falloff_range = 0.615 * effect_radius; + let falloff_from = effect_radius * (1.0 - 0.615); + let falloff_mul = -1.0 / falloff_range; + let falloff_add = falloff_from / falloff_range + 1.0; + + let min_depth = min(min(depth0, depth1), min(depth2, depth3)); + let weight0 = saturate((depth0 - min_depth) * falloff_mul + falloff_add); + let weight1 = saturate((depth1 - min_depth) * falloff_mul + falloff_add); + let weight2 = saturate((depth2 - min_depth) * falloff_mul + falloff_add); + let weight3 = saturate((depth3 - min_depth) * falloff_mul + falloff_add); + let weight_total = weight0 + weight1 + weight2 + weight3; + + return ((weight0 * depth0) + (weight1 * depth1) + (weight2 * depth2) + (weight3 * depth3)) / weight_total; } -#ifdef MESHLET_SECOND_CULLING_PASS -#import bevy_pbr::meshlet_bindings::depth_pyramid -#endif -#import bevy_render::maths::affine3_to_square -/// Culls individual clusters (1 per thread) in two passes (two pass occlusion culling), and outputs a bitmask of which clusters survived. -/// 1. The first pass is only frustum culling, on only the clusters that were visible last frame. -/// 2. The second pass performs both frustum and occlusion culling (using the depth buffer generated from the first pass), on all clusters. +// Used to share the depths from the previous MIP level between all invocations in a workgroup +var previous_mip_depth: array, 8>; @compute -@workgroup_size(128, 1, 1) // 128 threads per workgroup, 1 instanced meshlet per thread -fn cull_meshlets(@builtin(global_invocation_id) cluster_id: vec3) { - // Fetch the instanced meshlet data - if cluster_id.x >= arrayLength(&meshlet_thread_meshlet_ids) { return; } - let instance_id = meshlet_thread_instance_ids[cluster_id.x]; - if should_cull_instance(instance_id) { - return; - } - let meshlet_id = meshlet_thread_meshlet_ids[cluster_id.x]; - let bounding_sphere = meshlet_bounding_spheres[meshlet_id]; - let instance_uniform = meshlet_instance_uniforms[instance_id]; - let model = affine3_to_square(instance_uniform.model); - let model_scale = max(length(model[0]), max(length(model[1]), length(model[2]))); - let bounding_sphere_center = model * vec4(bounding_sphere.center, 1.0); - let bounding_sphere_radius = model_scale * bounding_sphere.radius; +@workgroup_size(8, 8, 1) +fn preprocess_depth(@builtin(global_invocation_id) global_id: vec3, @builtin(local_invocation_id) local_id: vec3) { + let base_coordinates = vec2(global_id.xy); + + // MIP 0 - Copy 4 texels from the input depth (per invocation, 8x8 invocations per workgroup) + let pixel_coordinates0 = base_coordinates * 2i; + let pixel_coordinates1 = pixel_coordinates0 + vec2(1i, 0i); + let pixel_coordinates2 = pixel_coordinates0 + vec2(0i, 1i); + let pixel_coordinates3 = pixel_coordinates0 + vec2(1i, 1i); + let depths_uv = vec2(pixel_coordinates0) / view.viewport.zw; + let depths = textureGather(0, input_depth, point_clamp_sampler, depths_uv, vec2(1i, 1i)); + textureStore(preprocessed_depth_mip0, pixel_coordinates0, vec4(depths.w, 0.0, 0.0, 0.0)); + textureStore(preprocessed_depth_mip0, pixel_coordinates1, vec4(depths.z, 0.0, 0.0, 0.0)); + textureStore(preprocessed_depth_mip0, pixel_coordinates2, vec4(depths.x, 0.0, 0.0, 0.0)); + textureStore(preprocessed_depth_mip0, pixel_coordinates3, vec4(depths.y, 0.0, 0.0, 0.0)); + + // MIP 1 - Weighted average of MIP 0's depth values (per invocation, 8x8 invocations per workgroup) + let depth_mip1 = weighted_average(depths.w, depths.z, depths.x, depths.y); + textureStore(preprocessed_depth_mip1, base_coordinates, vec4(depth_mip1, 0.0, 0.0, 0.0)); + previous_mip_depth[local_id.x][local_id.y] = depth_mip1; + + workgroupBarrier(); + + // MIP 2 - Weighted average of MIP 1's depth values (per invocation, 4x4 invocations per workgroup) + if all(local_id.xy % vec2(2u) == vec2(0u)) { + let depth0 = previous_mip_depth[local_id.x + 0u][local_id.y + 0u]; + let depth1 = previous_mip_depth[local_id.x + 1u][local_id.y + 0u]; + let depth2 = previous_mip_depth[local_id.x + 0u][local_id.y + 1u]; + let depth3 = previous_mip_depth[local_id.x + 1u][local_id.y + 1u]; + let depth_mip2 = weighted_average(depth0, depth1, depth2, depth3); + textureStore(preprocessed_depth_mip2, base_coordinates / 2i, vec4(depth_mip2, 0.0, 0.0, 0.0)); + previous_mip_depth[local_id.x][local_id.y] = depth_mip2; + } - // In the first pass, operate only on the clusters visible last frame. In the second pass, operate on all clusters. -#ifdef MESHLET_SECOND_CULLING_PASS - var meshlet_visible = true; -#else - var meshlet_visible = get_meshlet_previous_occlusion(cluster_id.x); - if !meshlet_visible { return; } -#endif + workgroupBarrier(); - // Frustum culling - // TODO: Faster method from https://vkguide.dev/docs/gpudriven/compute_culling/#frustum-culling-function - for (var i = 0u; i < 6u; i++) { - if !meshlet_visible { break; } - meshlet_visible &= dot(view.frustum[i], bounding_sphere_center) > -bounding_sphere_radius; + // MIP 3 - Weighted average of MIP 2's depth values (per invocation, 2x2 invocations per workgroup) + if all(local_id.xy % vec2(4u) == vec2(0u)) { + let depth0 = previous_mip_depth[local_id.x + 0u][local_id.y + 0u]; + let depth1 = previous_mip_depth[local_id.x + 2u][local_id.y + 0u]; + let depth2 = previous_mip_depth[local_id.x + 0u][local_id.y + 2u]; + let depth3 = previous_mip_depth[local_id.x + 2u][local_id.y + 2u]; + let depth_mip3 = weighted_average(depth0, depth1, depth2, depth3); + textureStore(preprocessed_depth_mip3, base_coordinates / 4i, vec4(depth_mip3, 0.0, 0.0, 0.0)); + previous_mip_depth[local_id.x][local_id.y] = depth_mip3; } -#ifdef MESHLET_SECOND_CULLING_PASS - // In the second culling pass, cull against the depth pyramid generated from the first pass - if meshlet_visible { - let bounding_sphere_center_view_space = (view.inverse_view * vec4(bounding_sphere_center.xyz, 1.0)).xyz; - let aabb = project_view_space_sphere_to_screen_space_aabb(bounding_sphere_center_view_space, bounding_sphere_radius); + workgroupBarrier(); - // Halve the AABB size because the first depth mip resampling pass cut the full screen resolution into a power of two conservatively - let depth_pyramid_size_mip_0 = vec2(textureDimensions(depth_pyramid, 0)) * 0.5; - let width = (aabb.z - aabb.x) * depth_pyramid_size_mip_0.x; - let height = (aabb.w - aabb.y) * depth_pyramid_size_mip_0.y; - let depth_level = max(0, i32(ceil(log2(max(width, height))))); // TODO: Naga doesn't like this being a u32 - let depth_pyramid_size = vec2(textureDimensions(depth_pyramid, depth_level)); - let aabb_top_left = vec2(aabb.xy * depth_pyramid_size); + // MIP 4 - Weighted average of MIP 3's depth values (per invocation, 1 invocation per workgroup) + if all(local_id.xy % vec2(8u) == vec2(0u)) { + let depth0 = previous_mip_depth[local_id.x + 0u][local_id.y + 0u]; + let depth1 = previous_mip_depth[local_id.x + 4u][local_id.y + 0u]; + let depth2 = previous_mip_depth[local_id.x + 0u][local_id.y + 4u]; + let depth3 = previous_mip_depth[local_id.x + 4u][local_id.y + 4u]; + let depth_mip4 = weighted_average(depth0, depth1, depth2, depth3); + textureStore(preprocessed_depth_mip4, base_coordinates / 8i, vec4(depth_mip4, 0.0, 0.0, 0.0)); + } +} - let depth_quad_a = textureLoad(depth_pyramid, aabb_top_left, depth_level).x; - let depth_quad_b = textureLoad(depth_pyramid, aabb_top_left + vec2(1u, 0u), depth_level).x; - let depth_quad_c = textureLoad(depth_pyramid, aabb_top_left + vec2(0u, 1u), depth_level).x; - let depth_quad_d = textureLoad(depth_pyramid, aabb_top_left + vec2(1u, 1u), depth_level).x; +``` - let occluder_depth = min(min(depth_quad_a, depth_quad_b), min(depth_quad_c, depth_quad_d)); - if view.projection[3][3] == 1.0 { - // Orthographic - let sphere_depth = view.projection[3][2] + (bounding_sphere_center_view_space.z + bounding_sphere_radius) * view.projection[2][2]; - meshlet_visible &= sphere_depth >= occluder_depth; - } else { - // Perspective - let sphere_depth = -view.projection[3][2] / (bounding_sphere_center_view_space.z + bounding_sphere_radius); - meshlet_visible &= sphere_depth >= occluder_depth; - } - } -#endif +### bevy_shaders/tonemapping_test_patterns - // Write the bitmask of whether or not the cluster was culled - let occlusion_bit = u32(meshlet_visible) << (cluster_id.x % 32u); - atomicOr(&meshlet_occlusion[cluster_id.x / 32u], occlusion_bit); +```rust +#import bevy_pbr::{ + mesh_view_bindings, + forward_io::VertexOutput, } -// https://zeux.io/2023/01/12/approximate-projected-bounds -fn project_view_space_sphere_to_screen_space_aabb(cp: vec3, r: f32) -> vec4 { - let inv_width = view.projection[0][0] * 0.5; - let inv_height = view.projection[1][1] * 0.5; - if view.projection[3][3] == 1.0 { - // Orthographic - let min_x = cp.x - r; - let max_x = cp.x + r; +#import bevy_render::maths::PI - let min_y = cp.y - r; - let max_y = cp.y + r; +#ifdef TONEMAP_IN_SHADER +#import bevy_core_pipeline::tonemapping::tone_mapping +#endif - return vec4(min_x * inv_width, 1.0 - max_y * inv_height, max_x * inv_width, 1.0 - min_y * inv_height); +// Sweep across hues on y axis with value from 0.0 to +15EV across x axis +// quantized into 24 steps for both axis. +fn color_sweep(uv_input: vec2) -> vec3 { + var uv = uv_input; + let steps = 24.0; + uv.y = uv.y * (1.0 + 1.0 / steps); + let ratio = 2.0; + + let h = PI * 2.0 * floor(1.0 + steps * uv.y) / steps; + let L = floor(uv.x * steps * ratio) / (steps * ratio) - 0.5; + + var color = vec3(0.0); + if uv.y < 1.0 { + color = cos(h + vec3(0.0, 1.0, 2.0) * PI * 2.0 / 3.0); + let maxRGB = max(color.r, max(color.g, color.b)); + let minRGB = min(color.r, min(color.g, color.b)); + color = exp(15.0 * L) * (color - minRGB) / (maxRGB - minRGB); } else { - // Perspective - let c = vec3(cp.xy, -cp.z); - let cr = c * r; - let czr2 = c.z * c.z - r * r; + color = vec3(exp(15.0 * L)); + } + return color; +} - let vx = sqrt(c.x * c.x + czr2); - let min_x = (vx * c.x - cr.z) / (vx * c.z + cr.x); - let max_x = (vx * c.x + cr.z) / (vx * c.z - cr.x); +fn hsv_to_srgb(c: vec3) -> vec3 { + let K = vec4(1.0, 2.0 / 3.0, 1.0 / 3.0, 3.0); + let p = abs(fract(c.xxx + K.xyz) * 6.0 - K.www); + return c.z * mix(K.xxx, clamp(p - K.xxx, vec3(0.0), vec3(1.0)), c.y); +} - let vy = sqrt(c.y * c.y + czr2); - let min_y = (vy * c.y - cr.z) / (vy * c.z + cr.y); - let max_y = (vy * c.y + cr.z) / (vy * c.z - cr.y); +// Generates a continuous sRGB sweep. +fn continuous_hue(uv: vec2) -> vec3 { + return hsv_to_srgb(vec3(uv.x, 1.0, 1.0)) * max(0.0, exp2(uv.y * 9.0) - 1.0); +} - return vec4(min_x * inv_width, -max_y * inv_height, max_x * inv_width, -min_y * inv_height) + vec4(0.5); +@fragment +fn fragment( + in: VertexOutput, +) -> @location(0) vec4 { + var uv = in.uv; + var out = vec3(0.0); + if uv.y > 0.5 { + uv.y = 1.0 - uv.y; + out = color_sweep(vec2(uv.x, uv.y * 2.0)); + } else { + out = continuous_hue(vec2(uv.y * 2.0, uv.x)); } + var color = vec4(out, 1.0); +#ifdef TONEMAP_IN_SHADER + color = tone_mapping(color, mesh_view_bindings::view.color_grading); +#endif + return color; } ``` -### crates/bevy_pbr/src/meshlet/meshlet_mesh_material +### bevy_shaders/custom_gltf_2d ```rust -#import bevy_pbr::{ - meshlet_visibility_buffer_resolve::resolve_vertex_output, - view_transformations::uv_to_ndc, - prepass_io, - pbr_prepass_functions, - utils::rand_f, +#import bevy_sprite::{ + mesh2d_view_bindings::globals, + mesh2d_functions::{get_world_from_local, mesh2d_position_local_to_clip}, } +struct Vertex { + @builtin(instance_index) instance_index: u32, + @location(0) position: vec3, + @location(1) color: vec4, + @location(2) barycentric: vec3, +}; + +struct VertexOutput { + @builtin(position) clip_position: vec4, + @location(0) color: vec4, + @location(1) barycentric: vec3, +}; + @vertex -fn vertex(@builtin(vertex_index) vertex_input: u32) -> @builtin(position) vec4 { - let vertex_index = vertex_input % 3u; - let material_id = vertex_input / 3u; - let material_depth = f32(material_id) / 65535.0; - let uv = vec2(vec2(vertex_index >> 1u, vertex_index & 1u)) * 2.0; - return vec4(uv_to_ndc(uv), material_depth, 1.0); +fn vertex(vertex: Vertex) -> VertexOutput { + var out: VertexOutput; + let world_from_local = get_world_from_local(vertex.instance_index); + out.clip_position = mesh2d_position_local_to_clip(world_from_local, vec4(vertex.position, 1.0)); + out.color = vertex.color; + out.barycentric = vertex.barycentric; + return out; } +struct FragmentInput { + @location(0) color: vec4, + @location(1) barycentric: vec3, +}; + @fragment -fn fragment(@builtin(position) frag_coord: vec4) -> @location(0) vec4 { - let vertex_output = resolve_vertex_output(frag_coord); - var rng = vertex_output.meshlet_id; - let color = vec3(rand_f(&rng), rand_f(&rng), rand_f(&rng)); - return vec4(color, 1.0); +fn fragment(input: FragmentInput) -> @location(0) vec4 { + let d = min(input.barycentric.x, min(input.barycentric.y, input.barycentric.z)); + let t = 0.05 * (0.85 + sin(5.0 * globals.time)); + return mix(vec4(1.0,1.0,1.0,1.0), input.color, smoothstep(t, t+0.01, d)); } -#ifdef PREPASS_FRAGMENT -@fragment -fn prepass_fragment(@builtin(position) frag_coord: vec4) -> prepass_io::FragmentOutput { - let vertex_output = resolve_vertex_output(frag_coord); +``` - var out: prepass_io::FragmentOutput; +### bevy_shaders/light_probe -#ifdef NORMAL_PREPASS - out.normal = vec4(vertex_output.world_normal * 0.5 + vec3(0.5), 1.0); -#endif +```rust +#define_import_path bevy_pbr::light_probe -#ifdef MOTION_VECTOR_PREPASS - out.motion_vector = vertex_output.motion_vector; -#endif +#import bevy_pbr::mesh_view_bindings::light_probes +#import bevy_pbr::mesh_view_types::LightProbe + +// The result of searching for a light probe. +struct LightProbeQueryResult { + // The index of the light probe texture or textures in the binding array or + // arrays. + texture_index: i32, + // A scale factor that's applied to the diffuse and specular light from the + // light probe. This is in units of cd/m² (candela per square meter). + intensity: f32, + // Transform from world space to the light probe model space. In light probe + // model space, the light probe is a 1×1×1 cube centered on the origin. + light_from_world: mat4x4, +}; + +fn transpose_affine_matrix(matrix: mat3x4) -> mat4x4 { + let matrix4x4 = mat4x4( + matrix[0], + matrix[1], + matrix[2], + vec4(0.0, 0.0, 0.0, 1.0)); + return transpose(matrix4x4); +} + +// Searches for a light probe that contains the fragment. +// +// TODO: Interpolate between multiple light probes. +fn query_light_probe( + world_position: vec3, + is_irradiance_volume: bool, +) -> LightProbeQueryResult { + var result: LightProbeQueryResult; + result.texture_index = -1; + + var light_probe_count: i32; + if is_irradiance_volume { + light_probe_count = light_probes.irradiance_volume_count; + } else { + light_probe_count = light_probes.reflection_probe_count; + } + + for (var light_probe_index: i32 = 0; + light_probe_index < light_probe_count && result.texture_index < 0; + light_probe_index += 1) { + var light_probe: LightProbe; + if is_irradiance_volume { + light_probe = light_probes.irradiance_volumes[light_probe_index]; + } else { + light_probe = light_probes.reflection_probes[light_probe_index]; + } + + // Unpack the inverse transform. + let light_from_world = + transpose_affine_matrix(light_probe.light_from_world_transposed); + + // Check to see if the transformed point is inside the unit cube + // centered at the origin. + let probe_space_pos = (light_from_world * vec4(world_position, 1.0f)).xyz; + if (all(abs(probe_space_pos) <= vec3(0.5f))) { + result.texture_index = light_probe.cubemap_index; + result.intensity = light_probe.intensity; + result.light_from_world = light_from_world; -#ifdef DEFERRED_PREPASS - // There isn't any material info available for this default prepass shader so we are just writing  - // emissive magenta out to the deferred gbuffer to be rendered by the first deferred lighting pass layer. - // This is here so if the default prepass fragment is used for deferred magenta will be rendered, and also - // as an example to show that a user could write to the deferred gbuffer if they were to start from this shader. - out.deferred = vec4(0u, bevy_pbr::rgb9e5::vec3_to_rgb9e5_(vec3(1.0, 0.0, 1.0)), 0u, 0u); - out.deferred_lighting_pass_id = 1u; -#endif + // TODO: Workaround for ICE in DXC https://github.com/microsoft/DirectXShaderCompiler/issues/6183 + // We can't use `break` here because of the ICE. + // So instead we rely on the fact that we set `result.texture_index` + // above and check its value in the `for` loop header before + // looping. + // break; + } + } - return out; + return result; } -#endif + ``` -### crates/bevy_pbr/src/meshlet/visibility_buffer_resolve +### bevy_shaders/pbr_deferred_functions ```rust -#define_import_path bevy_pbr::meshlet_visibility_buffer_resolve +#define_import_path bevy_pbr::pbr_deferred_functions #import bevy_pbr::{ - meshlet_bindings::{ - meshlet_visibility_buffer, - meshlet_thread_meshlet_ids, - meshlets, - meshlet_vertex_ids, - meshlet_vertex_data, - meshlet_thread_instance_ids, - meshlet_instance_uniforms, - get_meshlet_index, - unpack_meshlet_vertex, - }, + pbr_types::{PbrInput, pbr_input_new, STANDARD_MATERIAL_FLAGS_UNLIT_BIT}, + pbr_deferred_types as deferred_types, + pbr_functions, + rgb9e5, mesh_view_bindings::view, - mesh_functions::mesh_position_local_to_world, - mesh_types::MESH_FLAGS_SIGN_DETERMINANT_MODEL_3X3_BIT, - view_transformations::{position_world_to_clip, frag_coord_to_ndc}, + utils::{octahedral_encode, octahedral_decode}, + prepass_io::FragmentOutput, + view_transformations::{position_ndc_to_world, frag_coord_to_ndc}, } -#import bevy_render::maths::{affine3_to_square, mat2x4_f32_to_mat3x3_unpack} -#ifdef PREPASS_FRAGMENT -#ifdef MOTION_VECTOR_PREPASS -#import bevy_pbr::{ - prepass_bindings::previous_view_uniforms, - pbr_prepass_functions::calculate_motion_vector, -} -#endif +#ifdef MESHLET_MESH_MATERIAL_PASS +#import bevy_pbr::meshlet_visibility_buffer_resolve::VertexOutput +#else +#import bevy_pbr::prepass_io::VertexOutput #endif -/// Functions to be used by materials for reading from a meshlet visibility buffer texture. +#ifdef MOTION_VECTOR_PREPASS + #import bevy_pbr::pbr_prepass_functions::calculate_motion_vector +#endif -#ifdef MESHLET_MESH_MATERIAL_PASS -struct PartialDerivatives { - barycentrics: vec3, - ddx: vec3, - ddy: vec3, +// Creates the deferred gbuffer from a PbrInput. +fn deferred_gbuffer_from_pbr_input(in: PbrInput) -> vec4 { + // Only monochrome occlusion supported. May not be worth including at all. + // Some models have baked occlusion, GLTF only supports monochrome. + // Real time occlusion is applied in the deferred lighting pass. + // Deriving luminance via Rec. 709. coefficients + // https://en.wikipedia.org/wiki/Rec._709 + let diffuse_occlusion = dot(in.diffuse_occlusion, vec3(0.2126, 0.7152, 0.0722)); +#ifdef WEBGL2 // More crunched for webgl so we can also fit depth. + var props = deferred_types::pack_unorm3x4_plus_unorm_20_(vec4( + in.material.reflectance, + in.material.metallic, + diffuse_occlusion, + in.frag_coord.z)); +#else + var props = deferred_types::pack_unorm4x8_(vec4( + in.material.reflectance, // could be fewer bits + in.material.metallic, // could be fewer bits + diffuse_occlusion, // is this worth including? + 0.0)); // spare +#endif // WEBGL2 + let flags = deferred_types::deferred_flags_from_mesh_material_flags(in.flags, in.material.flags); + let octahedral_normal = octahedral_encode(normalize(in.N)); + var base_color_srgb = vec3(0.0); + var emissive = in.material.emissive.rgb; + if ((in.material.flags & STANDARD_MATERIAL_FLAGS_UNLIT_BIT) != 0u) { + // Material is unlit, use emissive component of gbuffer for color data. + // Unlit materials are effectively emissive. + emissive = in.material.base_color.rgb; + } else { + base_color_srgb = pow(in.material.base_color.rgb, vec3(1.0 / 2.2)); + } + let deferred = vec4( + deferred_types::pack_unorm4x8_(vec4(base_color_srgb, in.material.perceptual_roughness)), + rgb9e5::vec3_to_rgb9e5_(emissive), + props, + deferred_types::pack_24bit_normal_and_flags(octahedral_normal, flags), + ); + return deferred; } -// https://github.com/ConfettiFX/The-Forge/blob/2d453f376ef278f66f97cbaf36c0d12e4361e275/Examples_3/Visibility_Buffer/src/Shaders/FSL/visibilityBuffer_shade.frag.fsl#L83-L139 -fn compute_partial_derivatives(vertex_clip_positions: array, 3>, ndc_uv: vec2, screen_size: vec2) -> PartialDerivatives { - var result: PartialDerivatives; - - let inv_w = 1.0 / vec3(vertex_clip_positions[0].w, vertex_clip_positions[1].w, vertex_clip_positions[2].w); - let ndc_0 = vertex_clip_positions[0].xy * inv_w[0]; - let ndc_1 = vertex_clip_positions[1].xy * inv_w[1]; - let ndc_2 = vertex_clip_positions[2].xy * inv_w[2]; - - let inv_det = 1.0 / determinant(mat2x2(ndc_2 - ndc_1, ndc_0 - ndc_1)); - result.ddx = vec3(ndc_1.y - ndc_2.y, ndc_2.y - ndc_0.y, ndc_0.y - ndc_1.y) * inv_det * inv_w; - result.ddy = vec3(ndc_2.x - ndc_1.x, ndc_0.x - ndc_2.x, ndc_1.x - ndc_0.x) * inv_det * inv_w; - - var ddx_sum = dot(result.ddx, vec3(1.0)); - var ddy_sum = dot(result.ddy, vec3(1.0)); - - let delta_v = ndc_uv - ndc_0; - let interp_inv_w = inv_w.x + delta_v.x * ddx_sum + delta_v.y * ddy_sum; - let interp_w = 1.0 / interp_inv_w; +// Creates a PbrInput from the deferred gbuffer. +fn pbr_input_from_deferred_gbuffer(frag_coord: vec4, gbuffer: vec4) -> PbrInput { + var pbr = pbr_input_new(); - result.barycentrics = vec3( - interp_w * (delta_v.x * result.ddx.x + delta_v.y * result.ddy.x + inv_w.x), - interp_w * (delta_v.x * result.ddx.y + delta_v.y * result.ddy.y), - interp_w * (delta_v.x * result.ddx.z + delta_v.y * result.ddy.z), - ); + let flags = deferred_types::unpack_flags(gbuffer.a); + let deferred_flags = deferred_types::mesh_material_flags_from_deferred_flags(flags); + pbr.flags = deferred_flags.x; + pbr.material.flags = deferred_flags.y; - result.ddx *= 2.0 / screen_size.x; - result.ddy *= 2.0 / screen_size.y; - ddx_sum *= 2.0 / screen_size.x; - ddy_sum *= 2.0 / screen_size.y; + let base_rough = deferred_types::unpack_unorm4x8_(gbuffer.r); + pbr.material.perceptual_roughness = base_rough.a; + let emissive = rgb9e5::rgb9e5_to_vec3_(gbuffer.g); + if ((pbr.material.flags & STANDARD_MATERIAL_FLAGS_UNLIT_BIT) != 0u) { + pbr.material.base_color = vec4(emissive, 1.0); + pbr.material.emissive = vec4(vec3(0.0), 1.0); + } else { + pbr.material.base_color = vec4(pow(base_rough.rgb, vec3(2.2)), 1.0); + pbr.material.emissive = vec4(emissive, 1.0); + } +#ifdef WEBGL2 // More crunched for webgl so we can also fit depth. + let props = deferred_types::unpack_unorm3x4_plus_unorm_20_(gbuffer.b); + // Bias to 0.5 since that's the value for almost all materials. + pbr.material.reflectance = saturate(props.r - 0.03333333333); +#else + let props = deferred_types::unpack_unorm4x8_(gbuffer.b); + pbr.material.reflectance = props.r; +#endif // WEBGL2 + pbr.material.metallic = props.g; + pbr.diffuse_occlusion = vec3(props.b); + let octahedral_normal = deferred_types::unpack_24bit_normal(gbuffer.a); + let N = octahedral_decode(octahedral_normal); - let interp_ddx_w = 1.0 / (interp_inv_w + ddx_sum); - let interp_ddy_w = 1.0 / (interp_inv_w + ddy_sum); + let world_position = vec4(position_ndc_to_world(frag_coord_to_ndc(frag_coord)), 1.0); + let is_orthographic = view.clip_from_view[3].w == 1.0; + let V = pbr_functions::calculate_view(world_position, is_orthographic); - result.ddx = interp_ddx_w * (result.barycentrics * interp_inv_w + result.ddx) - result.barycentrics; - result.ddy = interp_ddy_w * (result.barycentrics * interp_inv_w + result.ddy) - result.barycentrics; - return result; -} + pbr.frag_coord = frag_coord; + pbr.world_normal = N; + pbr.world_position = world_position; + pbr.N = N; + pbr.V = V; + pbr.is_orthographic = is_orthographic; -struct VertexOutput { - position: vec4, - world_position: vec4, - world_normal: vec3, - uv: vec2, - ddx_uv: vec2, - ddy_uv: vec2, - world_tangent: vec4, - mesh_flags: u32, - meshlet_id: u32, -#ifdef PREPASS_FRAGMENT -#ifdef MOTION_VECTOR_PREPASS - motion_vector: vec2, -#endif -#endif + return pbr; } -/// Load the visibility buffer texture and resolve it into a VertexOutput. -fn resolve_vertex_output(frag_coord: vec4) -> VertexOutput { - let vbuffer = textureLoad(meshlet_visibility_buffer, vec2(frag_coord.xy), 0).r; - let cluster_id = vbuffer >> 8u; - let meshlet_id = meshlet_thread_meshlet_ids[cluster_id]; - let meshlet = meshlets[meshlet_id]; - let triangle_id = extractBits(vbuffer, 0u, 8u); - let index_ids = meshlet.start_index_id + vec3(triangle_id * 3u) + vec3(0u, 1u, 2u); - let indices = meshlet.start_vertex_id + vec3(get_meshlet_index(index_ids.x), get_meshlet_index(index_ids.y), get_meshlet_index(index_ids.z)); - let vertex_ids = vec3(meshlet_vertex_ids[indices.x], meshlet_vertex_ids[indices.y], meshlet_vertex_ids[indices.z]); - let vertex_1 = unpack_meshlet_vertex(meshlet_vertex_data[vertex_ids.x]); - let vertex_2 = unpack_meshlet_vertex(meshlet_vertex_data[vertex_ids.y]); - let vertex_3 = unpack_meshlet_vertex(meshlet_vertex_data[vertex_ids.z]); - - let instance_id = meshlet_thread_instance_ids[cluster_id]; - let instance_uniform = meshlet_instance_uniforms[instance_id]; - let model = affine3_to_square(instance_uniform.model); - - let world_position_1 = mesh_position_local_to_world(model, vec4(vertex_1.position, 1.0)); - let world_position_2 = mesh_position_local_to_world(model, vec4(vertex_2.position, 1.0)); - let world_position_3 = mesh_position_local_to_world(model, vec4(vertex_3.position, 1.0)); - let clip_position_1 = position_world_to_clip(world_position_1.xyz); - let clip_position_2 = position_world_to_clip(world_position_2.xyz); - let clip_position_3 = position_world_to_clip(world_position_3.xyz); - let frag_coord_ndc = frag_coord_to_ndc(frag_coord).xy; - let partial_derivatives = compute_partial_derivatives( - array(clip_position_1, clip_position_2, clip_position_3), - frag_coord_ndc, - view.viewport.zw, - ); - - let world_position = mat3x4(world_position_1, world_position_2, world_position_3) * partial_derivatives.barycentrics; - let vertex_normal = mat3x3(vertex_1.normal, vertex_2.normal, vertex_3.normal) * partial_derivatives.barycentrics; - let world_normal = normalize( - mat2x4_f32_to_mat3x3_unpack( - instance_uniform.inverse_transpose_model_a, - instance_uniform.inverse_transpose_model_b, - ) * vertex_normal - ); - let uv = mat3x2(vertex_1.uv, vertex_2.uv, vertex_3.uv) * partial_derivatives.barycentrics; - let ddx_uv = mat3x2(vertex_1.uv, vertex_2.uv, vertex_3.uv) * partial_derivatives.ddx; - let ddy_uv = mat3x2(vertex_1.uv, vertex_2.uv, vertex_3.uv) * partial_derivatives.ddy; - let vertex_tangent = mat3x4(vertex_1.tangent, vertex_2.tangent, vertex_3.tangent) * partial_derivatives.barycentrics; - let world_tangent = vec4( - normalize( - mat3x3( - model[0].xyz, - model[1].xyz, - model[2].xyz - ) * vertex_tangent.xyz - ), - vertex_tangent.w * (f32(bool(instance_uniform.flags & MESH_FLAGS_SIGN_DETERMINANT_MODEL_3X3_BIT)) * 2.0 - 1.0) - ); +#ifdef PREPASS_PIPELINE +fn deferred_output(in: VertexOutput, pbr_input: PbrInput) -> FragmentOutput { + var out: FragmentOutput; -#ifdef PREPASS_FRAGMENT -#ifdef MOTION_VECTOR_PREPASS - let previous_model = affine3_to_square(instance_uniform.previous_model); - let previous_world_position_1 = mesh_position_local_to_world(previous_model, vec4(vertex_1.position, 1.0)); - let previous_world_position_2 = mesh_position_local_to_world(previous_model, vec4(vertex_2.position, 1.0)); - let previous_world_position_3 = mesh_position_local_to_world(previous_model, vec4(vertex_3.position, 1.0)); - let previous_clip_position_1 = previous_view_uniforms.view_proj * vec4(previous_world_position_1.xyz, 1.0); - let previous_clip_position_2 = previous_view_uniforms.view_proj * vec4(previous_world_position_2.xyz, 1.0); - let previous_clip_position_3 = previous_view_uniforms.view_proj * vec4(previous_world_position_3.xyz, 1.0); - let previous_partial_derivatives = compute_partial_derivatives( - array(previous_clip_position_1, previous_clip_position_2, previous_clip_position_3), - frag_coord_ndc, - view.viewport.zw, - ); - let previous_world_position = mat3x4(previous_world_position_1, previous_world_position_2, previous_world_position_3) * previous_partial_derivatives.barycentrics; - let motion_vector = calculate_motion_vector(world_position, previous_world_position); -#endif + // gbuffer + out.deferred = deferred_gbuffer_from_pbr_input(pbr_input); + // lighting pass id (used to determine which lighting shader to run for the fragment) + out.deferred_lighting_pass_id = pbr_input.material.deferred_lighting_pass_id; + // normal if required +#ifdef NORMAL_PREPASS + out.normal = vec4(in.world_normal * 0.5 + vec3(0.5), 1.0); #endif - - return VertexOutput( - frag_coord, - world_position, - world_normal, - uv, - ddx_uv, - ddy_uv, - world_tangent, - instance_uniform.flags, - meshlet_id, -#ifdef PREPASS_FRAGMENT + // motion vectors if required #ifdef MOTION_VECTOR_PREPASS - motion_vector, +#ifdef MESHLET_MESH_MATERIAL_PASS + out.motion_vector = in.motion_vector; +#else + out.motion_vector = calculate_motion_vector(in.world_position, in.previous_world_position); #endif #endif - ); + + return out; } #endif ``` -### crates/bevy_pbr/src/meshlet/dummy_visibility_buffer_resolve +### bevy_shaders/pbr_prepass_functions ```rust -#define_import_path bevy_pbr::meshlet_visibility_buffer_resolve +#define_import_path bevy_pbr::pbr_prepass_functions -/// Dummy shader to prevent naga_oil from complaining about missing imports when the MeshletPlugin is not loaded, -/// as naga_oil tries to resolve imports even if they're behind an #ifdef. +#import bevy_pbr::{ + prepass_io::VertexOutput, + prepass_bindings::previous_view_uniforms, + mesh_view_bindings::view, + pbr_bindings, + pbr_types, +} + +// Cutoff used for the premultiplied alpha modes BLEND, ADD, and ALPHA_TO_COVERAGE. +const PREMULTIPLIED_ALPHA_CUTOFF = 0.05; + +// We can use a simplified version of alpha_discard() here since we only need to handle the alpha_cutoff +fn prepass_alpha_discard(in: VertexOutput) { + +#ifdef MAY_DISCARD + var output_color: vec4 = pbr_bindings::material.base_color; + +#ifdef VERTEX_UVS +#ifdef STANDARD_MATERIAL_BASE_COLOR_UV_B + var uv = in.uv_b; +#else // STANDARD_MATERIAL_BASE_COLOR_UV_B + var uv = in.uv; +#endif // STANDARD_MATERIAL_BASE_COLOR_UV_B + + let uv_transform = pbr_bindings::material.uv_transform; + uv = (uv_transform * vec3(uv, 1.0)).xy; + if (pbr_bindings::material.flags & pbr_types::STANDARD_MATERIAL_FLAGS_BASE_COLOR_TEXTURE_BIT) != 0u { + output_color = output_color * textureSampleBias(pbr_bindings::base_color_texture, pbr_bindings::base_color_sampler, uv, view.mip_bias); + } +#endif // VERTEX_UVS + + let alpha_mode = pbr_bindings::material.flags & pbr_types::STANDARD_MATERIAL_FLAGS_ALPHA_MODE_RESERVED_BITS; + if alpha_mode == pbr_types::STANDARD_MATERIAL_FLAGS_ALPHA_MODE_MASK { + if output_color.a < pbr_bindings::material.alpha_cutoff { + discard; + } + } else if (alpha_mode == pbr_types::STANDARD_MATERIAL_FLAGS_ALPHA_MODE_BLEND || + alpha_mode == pbr_types::STANDARD_MATERIAL_FLAGS_ALPHA_MODE_ADD || + alpha_mode == pbr_types::STANDARD_MATERIAL_FLAGS_ALPHA_MODE_ALPHA_TO_COVERAGE) { + if output_color.a < PREMULTIPLIED_ALPHA_CUTOFF { + discard; + } + } else if alpha_mode == pbr_types::STANDARD_MATERIAL_FLAGS_ALPHA_MODE_PREMULTIPLIED { + if all(output_color < vec4(PREMULTIPLIED_ALPHA_CUTOFF)) { + discard; + } + } + +#endif // MAY_DISCARD +} + +#ifdef MOTION_VECTOR_PREPASS +fn calculate_motion_vector(world_position: vec4, previous_world_position: vec4) -> vec2 { + let clip_position_t = view.unjittered_clip_from_world * world_position; + let clip_position = clip_position_t.xy / clip_position_t.w; + let previous_clip_position_t = previous_view_uniforms.clip_from_world * previous_world_position; + let previous_clip_position = previous_clip_position_t.xy / previous_clip_position_t.w; + // These motion vectors are used as offsets to UV positions and are stored + // in the range -1,1 to allow offsetting from the one corner to the + // diagonally-opposite corner in UV coordinates, in either direction. + // A difference between diagonally-opposite corners of clip space is in the + // range -2,2, so this needs to be scaled by 0.5. And the V direction goes + // down where clip space y goes up, so y needs to be flipped. + return (clip_position - previous_clip_position) * vec2(0.5, -0.5); +} +#endif // MOTION_VECTOR_PREPASS ``` -### crates/bevy_pbr/src/meshlet/visibility_buffer_raster +### bevy_shaders/mesh_bindings ```rust -#import bevy_pbr::{ - meshlet_bindings::{ - meshlet_thread_meshlet_ids, - meshlets, - meshlet_vertex_ids, - meshlet_vertex_data, - meshlet_thread_instance_ids, - meshlet_instance_uniforms, - meshlet_instance_material_ids, - draw_index_buffer, - view, - get_meshlet_index, - unpack_meshlet_vertex, - }, - mesh_functions::mesh_position_local_to_world, -} -#import bevy_render::maths::affine3_to_square +#define_import_path bevy_pbr::mesh_bindings -/// Vertex/fragment shader for rasterizing meshlets into a visibility buffer. +#import bevy_pbr::mesh_types::Mesh -struct VertexOutput { - @builtin(position) clip_position: vec4, -#ifdef MESHLET_VISIBILITY_BUFFER_RASTER_PASS_OUTPUT - @location(0) @interpolate(flat) visibility: u32, - @location(1) @interpolate(flat) material_depth: u32, -#endif -#ifdef DEPTH_CLAMP_ORTHO - @location(0) unclamped_clip_depth: f32, -#endif +#ifdef PER_OBJECT_BUFFER_BATCH_SIZE +@group(1) @binding(0) var mesh: array; +#else +@group(1) @binding(0) var mesh: array; +#endif // PER_OBJECT_BUFFER_BATCH_SIZE + +``` + +### bevy_shaders/mesh2d_functions + +```rust +#define_import_path bevy_sprite::mesh2d_functions + +#import bevy_sprite::{ + mesh2d_view_bindings::view, + mesh2d_bindings::mesh, } +#import bevy_render::maths::{affine3_to_square, mat2x4_f32_to_mat3x3_unpack} -#ifdef MESHLET_VISIBILITY_BUFFER_RASTER_PASS_OUTPUT -struct FragmentOutput { - @location(0) visibility: vec4, - @location(1) material_depth: vec4, +fn get_world_from_local(instance_index: u32) -> mat4x4 { + return affine3_to_square(mesh[instance_index].world_from_local); } -#endif -@vertex -fn vertex(@builtin(vertex_index) vertex_index: u32) -> VertexOutput { - let packed_ids = draw_index_buffer[vertex_index / 3u]; - let cluster_id = packed_ids >> 8u; - let triangle_id = extractBits(packed_ids, 0u, 8u); - let index_id = (triangle_id * 3u) + (vertex_index % 3u); - let meshlet_id = meshlet_thread_meshlet_ids[cluster_id]; - let meshlet = meshlets[meshlet_id]; - let index = get_meshlet_index(meshlet.start_index_id + index_id); - let vertex_id = meshlet_vertex_ids[meshlet.start_vertex_id + index]; - let vertex = unpack_meshlet_vertex(meshlet_vertex_data[vertex_id]); - let instance_id = meshlet_thread_instance_ids[cluster_id]; - let instance_uniform = meshlet_instance_uniforms[instance_id]; +fn mesh2d_position_local_to_world(world_from_local: mat4x4, vertex_position: vec4) -> vec4 { + return world_from_local * vertex_position; +} - let model = affine3_to_square(instance_uniform.model); - let world_position = mesh_position_local_to_world(model, vec4(vertex.position, 1.0)); - var clip_position = view.view_proj * vec4(world_position.xyz, 1.0); -#ifdef DEPTH_CLAMP_ORTHO - let unclamped_clip_depth = clip_position.z; - clip_position.z = min(clip_position.z, 1.0); -#endif +fn mesh2d_position_world_to_clip(world_position: vec4) -> vec4 { + return view.clip_from_world * world_position; +} - return VertexOutput( - clip_position, -#ifdef MESHLET_VISIBILITY_BUFFER_RASTER_PASS_OUTPUT - packed_ids, - meshlet_instance_material_ids[instance_id], -#endif -#ifdef DEPTH_CLAMP_ORTHO - unclamped_clip_depth, -#endif - ); +// NOTE: The intermediate world_position assignment is important +// for precision purposes when using the 'equals' depth comparison +// function. +fn mesh2d_position_local_to_clip(world_from_local: mat4x4, vertex_position: vec4) -> vec4 { + let world_position = mesh2d_position_local_to_world(world_from_local, vertex_position); + return mesh2d_position_world_to_clip(world_position); } -#ifdef MESHLET_VISIBILITY_BUFFER_RASTER_PASS_OUTPUT -@fragment -fn fragment(vertex_output: VertexOutput) -> FragmentOutput { - return FragmentOutput( - vec4(vertex_output.visibility, 0u, 0u, 0u), - vec4(vertex_output.material_depth, 0u, 0u, 0u), - ); +fn mesh2d_normal_local_to_world(vertex_normal: vec3, instance_index: u32) -> vec3 { + return mat2x4_f32_to_mat3x3_unpack( + mesh[instance_index].local_from_world_transpose_a, + mesh[instance_index].local_from_world_transpose_b, + ) * vertex_normal; } -#endif -#ifdef DEPTH_CLAMP_ORTHO -@fragment -fn fragment(vertex_output: VertexOutput) -> @builtin(frag_depth) f32 { - return vertex_output.unclamped_clip_depth; +fn mesh2d_tangent_local_to_world(world_from_local: mat4x4, vertex_tangent: vec4) -> vec4 { + return vec4( + mat3x3( + world_from_local[0].xyz, + world_from_local[1].xyz, + world_from_local[2].xyz + ) * vertex_tangent.xyz, + vertex_tangent.w + ); } -#endif ``` -### crates/bevy_pbr/src/light_probe/environment_map +### bevy_shaders/shadows ```rust -#define_import_path bevy_pbr::environment_map - -#import bevy_pbr::light_probe::query_light_probe -#import bevy_pbr::mesh_view_bindings as bindings -#import bevy_pbr::mesh_view_bindings::light_probes - -struct EnvironmentMapLight { - diffuse: vec3, - specular: vec3, -}; +#define_import_path bevy_pbr::shadows -struct EnvironmentMapRadiances { - irradiance: vec3, - radiance: vec3, +#import bevy_pbr::{ + mesh_view_types::POINT_LIGHT_FLAGS_SPOT_LIGHT_Y_NEGATIVE, + mesh_view_bindings as view_bindings, + shadow_sampling::{SPOT_SHADOW_TEXEL_SIZE, sample_shadow_cubemap, sample_shadow_map} } -// Define two versions of this function, one for the case in which there are -// multiple light probes and one for the case in which only the view light probe -// is present. - -#ifdef MULTIPLE_LIGHT_PROBES_IN_ARRAY - -fn compute_radiances( - perceptual_roughness: f32, - N: vec3, - R: vec3, - world_position: vec3, - found_diffuse_indirect: bool, -) -> EnvironmentMapRadiances { - var radiances: EnvironmentMapRadiances; +#import bevy_render::{ + color_operations::hsv_to_rgb, + maths::PI_2 +} - // Search for a reflection probe that contains the fragment. - var query_result = query_light_probe(world_position, /*is_irradiance_volume=*/ false); +const flip_z: vec3 = vec3(1.0, 1.0, -1.0); - // If we didn't find a reflection probe, use the view environment map if applicable. - if (query_result.texture_index < 0) { - query_result.texture_index = light_probes.view_cubemap_index; - query_result.intensity = light_probes.intensity_for_view; - } +fn fetch_point_shadow(light_id: u32, frag_position: vec4, surface_normal: vec3) -> f32 { + let light = &view_bindings::clusterable_objects.data[light_id]; - // If there's no cubemap, bail out. - if (query_result.texture_index < 0) { - radiances.irradiance = vec3(0.0); - radiances.radiance = vec3(0.0); - return radiances; - } + // because the shadow maps align with the axes and the frustum planes are at 45 degrees + // we can get the worldspace depth by taking the largest absolute axis + let surface_to_light = (*light).position_radius.xyz - frag_position.xyz; + let surface_to_light_abs = abs(surface_to_light); + let distance_to_light = max(surface_to_light_abs.x, max(surface_to_light_abs.y, surface_to_light_abs.z)); - // Split-sum approximation for image based lighting: https://cdn2.unrealengine.com/Resources/files/2013SiggraphPresentationsNotes-26915738.pdf - let radiance_level = perceptual_roughness * f32(textureNumLevels( - bindings::specular_environment_maps[query_result.texture_index]) - 1u); + // The normal bias here is already scaled by the texel size at 1 world unit from the light. + // The texel size increases proportionally with distance from the light so multiplying by + // distance to light scales the normal bias to the texel size at the fragment distance. + let normal_offset = (*light).shadow_normal_bias * distance_to_light * surface_normal.xyz; + let depth_offset = (*light).shadow_depth_bias * normalize(surface_to_light.xyz); + let offset_position = frag_position.xyz + normal_offset + depth_offset; - if (!found_diffuse_indirect) { - radiances.irradiance = textureSampleLevel( - bindings::diffuse_environment_maps[query_result.texture_index], - bindings::environment_map_sampler, - vec3(N.xy, -N.z), - 0.0).rgb * query_result.intensity; - } + // similar largest-absolute-axis trick as above, but now with the offset fragment position + let frag_ls = offset_position.xyz - (*light).position_radius.xyz ; + let abs_position_ls = abs(frag_ls); + let major_axis_magnitude = max(abs_position_ls.x, max(abs_position_ls.y, abs_position_ls.z)); - radiances.radiance = textureSampleLevel( - bindings::specular_environment_maps[query_result.texture_index], - bindings::environment_map_sampler, - vec3(R.xy, -R.z), - radiance_level).rgb * query_result.intensity; + // NOTE: These simplifications come from multiplying: + // projection * vec4(0, 0, -major_axis_magnitude, 1.0) + // and keeping only the terms that have any impact on the depth. + // Projection-agnostic approach: + let zw = -major_axis_magnitude * (*light).light_custom_data.xy + (*light).light_custom_data.zw; + let depth = zw.x / zw.y; - return radiances; + // Do the lookup, using HW PCF and comparison. Cubemaps assume a left-handed coordinate space, + // so we have to flip the z-axis when sampling. + return sample_shadow_cubemap(frag_ls * flip_z, distance_to_light, depth, light_id); } -#else // MULTIPLE_LIGHT_PROBES_IN_ARRAY +fn fetch_spot_shadow(light_id: u32, frag_position: vec4, surface_normal: vec3) -> f32 { + let light = &view_bindings::clusterable_objects.data[light_id]; -fn compute_radiances( - perceptual_roughness: f32, - N: vec3, - R: vec3, - world_position: vec3, - found_diffuse_indirect: bool, -) -> EnvironmentMapRadiances { - var radiances: EnvironmentMapRadiances; + let surface_to_light = (*light).position_radius.xyz - frag_position.xyz; - if (light_probes.view_cubemap_index < 0) { - radiances.irradiance = vec3(0.0); - radiances.radiance = vec3(0.0); - return radiances; + // construct the light view matrix + var spot_dir = vec3((*light).light_custom_data.x, 0.0, (*light).light_custom_data.y); + // reconstruct spot dir from x/z and y-direction flag + spot_dir.y = sqrt(max(0.0, 1.0 - spot_dir.x * spot_dir.x - spot_dir.z * spot_dir.z)); + if (((*light).flags & POINT_LIGHT_FLAGS_SPOT_LIGHT_Y_NEGATIVE) != 0u) { + spot_dir.y = -spot_dir.y; } - // Split-sum approximation for image based lighting: https://cdn2.unrealengine.com/Resources/files/2013SiggraphPresentationsNotes-26915738.pdf - // Technically we could use textureNumLevels(specular_environment_map) - 1 here, but we use a uniform - // because textureNumLevels() does not work on WebGL2 - let radiance_level = perceptual_roughness * f32(light_probes.smallest_specular_mip_level_for_view); - - let intensity = light_probes.intensity_for_view; + // view matrix z_axis is the reverse of transform.forward() + let fwd = -spot_dir; + let distance_to_light = dot(fwd, surface_to_light); + let offset_position = + -surface_to_light + + ((*light).shadow_depth_bias * normalize(surface_to_light)) + + (surface_normal.xyz * (*light).shadow_normal_bias) * distance_to_light; - if (!found_diffuse_indirect) { - radiances.irradiance = textureSampleLevel( - bindings::diffuse_environment_map, - bindings::environment_map_sampler, - vec3(N.xy, -N.z), - 0.0).rgb * intensity; + // the construction of the up and right vectors needs to precisely mirror the code + // in render/light.rs:spot_light_view_matrix + var sign = -1.0; + if (fwd.z >= 0.0) { + sign = 1.0; } + let a = -1.0 / (fwd.z + sign); + let b = fwd.x * fwd.y * a; + let up_dir = vec3(1.0 + sign * fwd.x * fwd.x * a, sign * b, -sign * fwd.x); + let right_dir = vec3(-b, -sign - fwd.y * fwd.y * a, fwd.y); + let light_inv_rot = mat3x3(right_dir, up_dir, fwd); - radiances.radiance = textureSampleLevel( - bindings::specular_environment_map, - bindings::environment_map_sampler, - vec3(R.xy, -R.z), - radiance_level).rgb * intensity; - - return radiances; -} + // because the matrix is a pure rotation matrix, the inverse is just the transpose, and to calculate + // the product of the transpose with a vector we can just post-multiply instead of pre-multiplying. + // this allows us to keep the matrix construction code identical between CPU and GPU. + let projected_position = offset_position * light_inv_rot; -#endif // MULTIPLE_LIGHT_PROBES_IN_ARRAY + // divide xy by perspective matrix "f" and by -projected.z (projected.z is -projection matrix's w) + // to get ndc coordinates + let f_div_minus_z = 1.0 / ((*light).spot_light_tan_angle * -projected_position.z); + let shadow_xy_ndc = projected_position.xy * f_div_minus_z; + // convert to uv coordinates + let shadow_uv = shadow_xy_ndc * vec2(0.5, -0.5) + vec2(0.5, 0.5); -fn environment_map_light( - perceptual_roughness: f32, - roughness: f32, - diffuse_color: vec3, - NdotV: f32, - f_ab: vec2, - N: vec3, - R: vec3, - F0: vec3, - world_position: vec3, - found_diffuse_indirect: bool, -) -> EnvironmentMapLight { - var out: EnvironmentMapLight; + // 0.1 must match POINT_LIGHT_NEAR_Z + let depth = 0.1 / -projected_position.z; - let radiances = compute_radiances( - perceptual_roughness, - N, - R, - world_position, - found_diffuse_indirect); - if (all(radiances.irradiance == vec3(0.0)) && all(radiances.radiance == vec3(0.0))) { - out.diffuse = vec3(0.0); - out.specular = vec3(0.0); - return out; - } + return sample_shadow_map( + shadow_uv, + depth, + i32(light_id) + view_bindings::lights.spot_light_shadowmap_offset, + SPOT_SHADOW_TEXEL_SIZE + ); +} - // No real world material has specular values under 0.02, so we use this range as a - // "pre-baked specular occlusion" that extinguishes the fresnel term, for artistic control. - // See: https://google.github.io/filament/Filament.html#specularocclusion - let specular_occlusion = saturate(dot(F0, vec3(50.0 * 0.33))); +fn get_cascade_index(light_id: u32, view_z: f32) -> u32 { + let light = &view_bindings::lights.directional_lights[light_id]; - // Multiscattering approximation: https://www.jcgt.org/published/0008/01/03/paper.pdf - // Useful reference: https://bruop.github.io/ibl - let Fr = max(vec3(1.0 - roughness), F0) - F0; - let kS = F0 + Fr * pow(1.0 - NdotV, 5.0); - let Ess = f_ab.x + f_ab.y; - let FssEss = kS * Ess * specular_occlusion; - let Ems = 1.0 - Ess; - let Favg = F0 + (1.0 - F0) / 21.0; - let Fms = FssEss * Favg / (1.0 - Ems * Favg); - let FmsEms = Fms * Ems; - let Edss = 1.0 - (FssEss + FmsEms); - let kD = diffuse_color * Edss; + for (var i: u32 = 0u; i < (*light).num_cascades; i = i + 1u) { + if (-view_z < (*light).cascades[i].far_bound) { + return i; + } + } + return (*light).num_cascades; +} +// Converts from world space to the uv position in the light's shadow map. +// +// The depth is stored in the return value's z coordinate. If the return value's +// w coordinate is 0.0, then we landed outside the shadow map entirely. +fn world_to_directional_light_local( + light_id: u32, + cascade_index: u32, + offset_position: vec4 +) -> vec4 { + let light = &view_bindings::lights.directional_lights[light_id]; + let cascade = &(*light).cascades[cascade_index]; - if (!found_diffuse_indirect) { - out.diffuse = (FmsEms + kD) * radiances.irradiance; - } else { - out.diffuse = vec3(0.0); + let offset_position_clip = (*cascade).clip_from_world * offset_position; + if (offset_position_clip.w <= 0.0) { + return vec4(0.0); + } + let offset_position_ndc = offset_position_clip.xyz / offset_position_clip.w; + // No shadow outside the orthographic projection volume + if (any(offset_position_ndc.xy < vec2(-1.0)) || offset_position_ndc.z < 0.0 + || any(offset_position_ndc > vec3(1.0))) { + return vec4(0.0); } - out.specular = FssEss * radiances.radiance; - return out; -} + // compute texture coordinates for shadow lookup, compensating for the Y-flip difference + // between the NDC and texture coordinates + let flip_correction = vec2(0.5, -0.5); + let light_local = offset_position_ndc.xy * flip_correction + vec2(0.5, 0.5); -``` + let depth = offset_position_ndc.z; -### crates/bevy_pbr/src/light_probe/light_probe + return vec4(light_local, depth, 1.0); +} -```rust -#define_import_path bevy_pbr::light_probe +fn sample_directional_cascade(light_id: u32, cascade_index: u32, frag_position: vec4, surface_normal: vec3) -> f32 { + let light = &view_bindings::lights.directional_lights[light_id]; + let cascade = &(*light).cascades[cascade_index]; -#import bevy_pbr::mesh_view_bindings::light_probes -#import bevy_pbr::mesh_view_types::LightProbe + // The normal bias is scaled to the texel size. + let normal_offset = (*light).shadow_normal_bias * (*cascade).texel_size * surface_normal.xyz; + let depth_offset = (*light).shadow_depth_bias * (*light).direction_to_light.xyz; + let offset_position = vec4(frag_position.xyz + normal_offset + depth_offset, frag_position.w); -// The result of searching for a light probe. -struct LightProbeQueryResult { - // The index of the light probe texture or textures in the binding array or - // arrays. - texture_index: i32, - // A scale factor that's applied to the diffuse and specular light from the - // light probe. This is in units of cd/m² (candela per square meter). - intensity: f32, - // Transform from world space to the light probe model space. In light probe - // model space, the light probe is a 1×1×1 cube centered on the origin. - inverse_transform: mat4x4, -}; + let light_local = world_to_directional_light_local(light_id, cascade_index, offset_position); + if (light_local.w == 0.0) { + return 1.0; + } -fn transpose_affine_matrix(matrix: mat3x4) -> mat4x4 { - let matrix4x4 = mat4x4( - matrix[0], - matrix[1], - matrix[2], - vec4(0.0, 0.0, 0.0, 1.0)); - return transpose(matrix4x4); + let array_index = i32((*light).depth_texture_base_index + cascade_index); + return sample_shadow_map(light_local.xy, light_local.z, array_index, (*cascade).texel_size); } -// Searches for a light probe that contains the fragment. -// -// TODO: Interpolate between multiple light probes. -fn query_light_probe( - world_position: vec3, - is_irradiance_volume: bool, -) -> LightProbeQueryResult { - var result: LightProbeQueryResult; - result.texture_index = -1; +fn fetch_directional_shadow(light_id: u32, frag_position: vec4, surface_normal: vec3, view_z: f32) -> f32 { + let light = &view_bindings::lights.directional_lights[light_id]; + let cascade_index = get_cascade_index(light_id, view_z); - var light_probe_count: i32; - if is_irradiance_volume { - light_probe_count = light_probes.irradiance_volume_count; - } else { - light_probe_count = light_probes.reflection_probe_count; + if (cascade_index >= (*light).num_cascades) { + return 1.0; } - for (var light_probe_index: i32 = 0; - light_probe_index < light_probe_count && result.texture_index < 0; - light_probe_index += 1) { - var light_probe: LightProbe; - if is_irradiance_volume { - light_probe = light_probes.irradiance_volumes[light_probe_index]; - } else { - light_probe = light_probes.reflection_probes[light_probe_index]; - } - - // Unpack the inverse transform. - let inverse_transform = - transpose_affine_matrix(light_probe.inverse_transpose_transform); - - // Check to see if the transformed point is inside the unit cube - // centered at the origin. - let probe_space_pos = (inverse_transform * vec4(world_position, 1.0f)).xyz; - if (all(abs(probe_space_pos) <= vec3(0.5f))) { - result.texture_index = light_probe.cubemap_index; - result.intensity = light_probe.intensity; - result.inverse_transform = inverse_transform; + var shadow = sample_directional_cascade(light_id, cascade_index, frag_position, surface_normal); - // TODO: Workaround for ICE in DXC https://github.com/microsoft/DirectXShaderCompiler/issues/6183 - // We can't use `break` here because of the ICE. - // So instead we rely on the fact that we set `result.texture_index` - // above and check its value in the `for` loop header before - // looping. - // break; + // Blend with the next cascade, if there is one. + let next_cascade_index = cascade_index + 1u; + if (next_cascade_index < (*light).num_cascades) { + let this_far_bound = (*light).cascades[cascade_index].far_bound; + let next_near_bound = (1.0 - (*light).cascades_overlap_proportion) * this_far_bound; + if (-view_z >= next_near_bound) { + let next_shadow = sample_directional_cascade(light_id, next_cascade_index, frag_position, surface_normal); + shadow = mix(shadow, next_shadow, (-view_z - next_near_bound) / (this_far_bound - next_near_bound)); } } - - return result; + return shadow; } +fn cascade_debug_visualization( + output_color: vec3, + light_id: u32, + view_z: f32, +) -> vec3 { + let overlay_alpha = 0.95; + let cascade_index = get_cascade_index(light_id, view_z); + let cascade_color = hsv_to_rgb( + f32(cascade_index) / f32(#{MAX_CASCADES_PER_LIGHT}u + 1u) * PI_2, + 1.0, + 0.5 + ); + return vec3( + (1.0 - overlay_alpha) * output_color.rgb + overlay_alpha * cascade_color + ); +} ``` -### crates/bevy_pbr/src/light_probe/irradiance_volume +### bevy_shaders/downsample_depth ```rust -#define_import_path bevy_pbr::irradiance_volume - -#import bevy_pbr::light_probe::query_light_probe -#import bevy_pbr::mesh_view_bindings::{ - irradiance_volumes, - irradiance_volume, - irradiance_volume_sampler, - light_probes, -}; +@group(0) @binding(0) var mip_0: texture_depth_2d; +@group(0) @binding(1) var mip_1: texture_storage_2d; +@group(0) @binding(2) var mip_2: texture_storage_2d; +@group(0) @binding(3) var mip_3: texture_storage_2d; +@group(0) @binding(4) var mip_4: texture_storage_2d; +@group(0) @binding(5) var mip_5: texture_storage_2d; +@group(0) @binding(6) var mip_6: texture_storage_2d; +@group(0) @binding(7) var mip_7: texture_storage_2d; +@group(0) @binding(8) var mip_8: texture_storage_2d; +@group(0) @binding(9) var mip_9: texture_storage_2d; +@group(0) @binding(10) var mip_10: texture_storage_2d; +@group(0) @binding(11) var mip_11: texture_storage_2d; +@group(0) @binding(12) var mip_12: texture_storage_2d; +@group(0) @binding(13) var samplr: sampler; +var max_mip_level: u32; + +/// Generates a hierarchical depth buffer. +/// Based on FidelityFX SPD v2.1 https://github.com/GPUOpen-LibrariesAndSDKs/FidelityFX-SDK/blob/d7531ae47d8b36a5d4025663e731a47a38be882f/sdk/include/FidelityFX/gpu/spd/ffx_spd.h#L528 + +var intermediate_memory: array, 16>; -#ifdef IRRADIANCE_VOLUMES_ARE_USABLE +@compute +@workgroup_size(256, 1, 1) +fn downsample_depth_first( + @builtin(num_workgroups) num_workgroups: vec3u, + @builtin(workgroup_id) workgroup_id: vec3u, + @builtin(local_invocation_index) local_invocation_index: u32, +) { + let sub_xy = remap_for_wave_reduction(local_invocation_index % 64u); + let x = sub_xy.x + 8u * ((local_invocation_index >> 6u) % 2u); + let y = sub_xy.y + 8u * (local_invocation_index >> 7u); -// See: -// https://advances.realtimerendering.com/s2006/Mitchell-ShadingInValvesSourceEngine.pdf -// Slide 28, "Ambient Cube Basis" -fn irradiance_volume_light(world_position: vec3, N: vec3) -> vec3 { - // Search for an irradiance volume that contains the fragment. - let query_result = query_light_probe(world_position, /*is_irradiance_volume=*/ true); + downsample_mips_0_and_1(x, y, workgroup_id.xy, local_invocation_index); - // If there was no irradiance volume found, bail out. - if (query_result.texture_index < 0) { - return vec3(0.0f); - } + downsample_mips_2_to_5(x, y, workgroup_id.xy, local_invocation_index); +} -#ifdef MULTIPLE_LIGHT_PROBES_IN_ARRAY - let irradiance_volume_texture = irradiance_volumes[query_result.texture_index]; -#else - let irradiance_volume_texture = irradiance_volume; -#endif +@compute +@workgroup_size(256, 1, 1) +fn downsample_depth_second(@builtin(local_invocation_index) local_invocation_index: u32) { + let sub_xy = remap_for_wave_reduction(local_invocation_index % 64u); + let x = sub_xy.x + 8u * ((local_invocation_index >> 6u) % 2u); + let y = sub_xy.y + 8u * (local_invocation_index >> 7u); + + downsample_mips_6_and_7(x, y); + + downsample_mips_8_to_11(x, y, local_invocation_index); +} + +fn downsample_mips_0_and_1(x: u32, y: u32, workgroup_id: vec2u, local_invocation_index: u32) { + var v: vec4f; + + var tex = vec2(workgroup_id * 64u) + vec2(x * 2u, y * 2u); + var pix = vec2(workgroup_id * 32u) + vec2(x, y); + v[0] = reduce_load_mip_0(tex); + textureStore(mip_1, pix, vec4(v[0])); + + tex = vec2(workgroup_id * 64u) + vec2(x * 2u + 32u, y * 2u); + pix = vec2(workgroup_id * 32u) + vec2(x + 16u, y); + v[1] = reduce_load_mip_0(tex); + textureStore(mip_1, pix, vec4(v[1])); + + tex = vec2(workgroup_id * 64u) + vec2(x * 2u, y * 2u + 32u); + pix = vec2(workgroup_id * 32u) + vec2(x, y + 16u); + v[2] = reduce_load_mip_0(tex); + textureStore(mip_1, pix, vec4(v[2])); + + tex = vec2(workgroup_id * 64u) + vec2(x * 2u + 32u, y * 2u + 32u); + pix = vec2(workgroup_id * 32u) + vec2(x + 16u, y + 16u); + v[3] = reduce_load_mip_0(tex); + textureStore(mip_1, pix, vec4(v[3])); + + if max_mip_level <= 1u { return; } + + for (var i = 0u; i < 4u; i++) { + intermediate_memory[x][y] = v[i]; + workgroupBarrier(); + if local_invocation_index < 64u { + v[i] = reduce_4(vec4( + intermediate_memory[x * 2u + 0u][y * 2u + 0u], + intermediate_memory[x * 2u + 1u][y * 2u + 0u], + intermediate_memory[x * 2u + 0u][y * 2u + 1u], + intermediate_memory[x * 2u + 1u][y * 2u + 1u], + )); + pix = (workgroup_id * 16u) + vec2( + x + (i % 2u) * 8u, + y + (i / 2u) * 8u, + ); + textureStore(mip_2, pix, vec4(v[i])); + } + workgroupBarrier(); + } - let atlas_resolution = vec3(textureDimensions(irradiance_volume_texture)); - let resolution = vec3(textureDimensions(irradiance_volume_texture) / vec3(1u, 2u, 3u)); + if local_invocation_index < 64u { + intermediate_memory[x + 0u][y + 0u] = v[0]; + intermediate_memory[x + 8u][y + 0u] = v[1]; + intermediate_memory[x + 0u][y + 8u] = v[2]; + intermediate_memory[x + 8u][y + 8u] = v[3]; + } +} - // Make sure to clamp to the edges to avoid texture bleed. - var unit_pos = (query_result.inverse_transform * vec4(world_position, 1.0f)).xyz; - let stp = clamp((unit_pos + 0.5) * resolution, vec3(0.5f), resolution - vec3(0.5f)); - let uvw = stp / atlas_resolution; +fn downsample_mips_2_to_5(x: u32, y: u32, workgroup_id: vec2u, local_invocation_index: u32) { + if max_mip_level <= 2u { return; } + workgroupBarrier(); + downsample_mip_2(x, y, workgroup_id, local_invocation_index); - // The bottom half of each cube slice is the negative part, so choose it if applicable on each - // slice. - let neg_offset = select(vec3(0.0f), vec3(0.5f), N < vec3(0.0f)); + if max_mip_level <= 3u { return; } + workgroupBarrier(); + downsample_mip_3(x, y, workgroup_id, local_invocation_index); - let uvw_x = uvw + vec3(0.0f, neg_offset.x, 0.0f); - let uvw_y = uvw + vec3(0.0f, neg_offset.y, 1.0f / 3.0f); - let uvw_z = uvw + vec3(0.0f, neg_offset.z, 2.0f / 3.0f); + if max_mip_level <= 4u { return; } + workgroupBarrier(); + downsample_mip_4(x, y, workgroup_id, local_invocation_index); - let rgb_x = textureSampleLevel(irradiance_volume_texture, irradiance_volume_sampler, uvw_x, 0.0).rgb; - let rgb_y = textureSampleLevel(irradiance_volume_texture, irradiance_volume_sampler, uvw_y, 0.0).rgb; - let rgb_z = textureSampleLevel(irradiance_volume_texture, irradiance_volume_sampler, uvw_z, 0.0).rgb; + if max_mip_level <= 5u { return; } + workgroupBarrier(); + downsample_mip_5(workgroup_id, local_invocation_index); +} + +fn downsample_mip_2(x: u32, y: u32, workgroup_id: vec2u, local_invocation_index: u32) { + if local_invocation_index < 64u { + let v = reduce_4(vec4( + intermediate_memory[x * 2u + 0u][y * 2u + 0u], + intermediate_memory[x * 2u + 1u][y * 2u + 0u], + intermediate_memory[x * 2u + 0u][y * 2u + 1u], + intermediate_memory[x * 2u + 1u][y * 2u + 1u], + )); + textureStore(mip_3, (workgroup_id * 8u) + vec2(x, y), vec4(v)); + intermediate_memory[x * 2u + y % 2u][y * 2u] = v; + } +} - // Use Valve's formula to sample. - let NN = N * N; - return (rgb_x * NN.x + rgb_y * NN.y + rgb_z * NN.z) * query_result.intensity; +fn downsample_mip_3(x: u32, y: u32, workgroup_id: vec2u, local_invocation_index: u32) { + if local_invocation_index < 16u { + let v = reduce_4(vec4( + intermediate_memory[x * 4u + 0u + 0u][y * 4u + 0u], + intermediate_memory[x * 4u + 2u + 0u][y * 4u + 0u], + intermediate_memory[x * 4u + 0u + 1u][y * 4u + 2u], + intermediate_memory[x * 4u + 2u + 1u][y * 4u + 2u], + )); + textureStore(mip_4, (workgroup_id * 4u) + vec2(x, y), vec4(v)); + intermediate_memory[x * 4u + y][y * 4u] = v; + } } -#endif // IRRADIANCE_VOLUMES_ARE_USABLE +fn downsample_mip_4(x: u32, y: u32, workgroup_id: vec2u, local_invocation_index: u32) { + if local_invocation_index < 4u { + let v = reduce_4(vec4( + intermediate_memory[x * 8u + 0u + 0u + y * 2u][y * 8u + 0u], + intermediate_memory[x * 8u + 4u + 0u + y * 2u][y * 8u + 0u], + intermediate_memory[x * 8u + 0u + 1u + y * 2u][y * 8u + 4u], + intermediate_memory[x * 8u + 4u + 1u + y * 2u][y * 8u + 4u], + )); + textureStore(mip_5, (workgroup_id * 2u) + vec2(x, y), vec4(v)); + intermediate_memory[x + y * 2u][0u] = v; + } +} -``` +fn downsample_mip_5(workgroup_id: vec2u, local_invocation_index: u32) { + if local_invocation_index < 1u { + let v = reduce_4(vec4( + intermediate_memory[0u][0u], + intermediate_memory[1u][0u], + intermediate_memory[2u][0u], + intermediate_memory[3u][0u], + )); + textureStore(mip_6, workgroup_id, vec4(v)); + } +} -### crates/bevy_pbr/src/lightmap/lightmap +fn downsample_mips_6_and_7(x: u32, y: u32) { + var v: vec4f; -```rust -#define_import_path bevy_pbr::lightmap + var tex = vec2(x * 4u + 0u, y * 4u + 0u); + var pix = vec2(x * 2u + 0u, y * 2u + 0u); + v[0] = reduce_load_mip_6(tex); + textureStore(mip_7, pix, vec4(v[0])); -#import bevy_pbr::mesh_bindings::mesh + tex = vec2(x * 4u + 2u, y * 4u + 0u); + pix = vec2(x * 2u + 1u, y * 2u + 0u); + v[1] = reduce_load_mip_6(tex); + textureStore(mip_7, pix, vec4(v[1])); -@group(1) @binding(4) var lightmaps_texture: texture_2d; -@group(1) @binding(5) var lightmaps_sampler: sampler; + tex = vec2(x * 4u + 0u, y * 4u + 2u); + pix = vec2(x * 2u + 0u, y * 2u + 1u); + v[2] = reduce_load_mip_6(tex); + textureStore(mip_7, pix, vec4(v[2])); -// Samples the lightmap, if any, and returns indirect illumination from it. -fn lightmap(uv: vec2, exposure: f32, instance_index: u32) -> vec3 { - let packed_uv_rect = mesh[instance_index].lightmap_uv_rect; - let uv_rect = vec4(vec4( - packed_uv_rect.x & 0xffffu, - packed_uv_rect.x >> 16u, - packed_uv_rect.y & 0xffffu, - packed_uv_rect.y >> 16u)) / 65535.0; + tex = vec2(x * 4u + 2u, y * 4u + 2u); + pix = vec2(x * 2u + 1u, y * 2u + 1u); + v[3] = reduce_load_mip_6(tex); + textureStore(mip_7, pix, vec4(v[3])); - let lightmap_uv = mix(uv_rect.xy, uv_rect.zw, uv); + if max_mip_level <= 7u { return; } - // Mipmapping lightmaps is usually a bad idea due to leaking across UV - // islands, so there's no harm in using mip level 0 and it lets us avoid - // control flow uniformity problems. - // - // TODO(pcwalton): Consider bicubic filtering. - return textureSampleLevel( - lightmaps_texture, - lightmaps_sampler, - lightmap_uv, - 0.0).rgb * exposure; + let vr = reduce_4(v); + textureStore(mip_8, vec2(x, y), vec4(vr)); + intermediate_memory[x][y] = vr; } -``` +fn downsample_mips_8_to_11(x: u32, y: u32, local_invocation_index: u32) { + if max_mip_level <= 8u { return; } + workgroupBarrier(); + downsample_mip_8(x, y, local_invocation_index); -### crates/bevy_pbr/src/ssao/spatial_denoise + if max_mip_level <= 9u { return; } + workgroupBarrier(); + downsample_mip_9(x, y, local_invocation_index); -```rust -// 3x3 bilaterial filter (edge-preserving blur) -// https://people.csail.mit.edu/sparis/bf_course/course_notes.pdf + if max_mip_level <= 10u { return; } + workgroupBarrier(); + downsample_mip_10(x, y, local_invocation_index); -// Note: Does not use the Gaussian kernel part of a typical bilateral blur -// From the paper: "use the information gathered on a neighborhood of 4 × 4 using a bilateral filter for -// reconstruction, using _uniform_ convolution weights" + if max_mip_level <= 11u { return; } + workgroupBarrier(); + downsample_mip_11(local_invocation_index); +} + +fn downsample_mip_8(x: u32, y: u32, local_invocation_index: u32) { + if local_invocation_index < 64u { + let v = reduce_4(vec4( + intermediate_memory[x * 2u + 0u][y * 2u + 0u], + intermediate_memory[x * 2u + 1u][y * 2u + 0u], + intermediate_memory[x * 2u + 0u][y * 2u + 1u], + intermediate_memory[x * 2u + 1u][y * 2u + 1u], + )); + textureStore(mip_9, vec2(x, y), vec4(v)); + intermediate_memory[x * 2u + y % 2u][y * 2u] = v; + } +} -// Note: The paper does a 4x4 (not quite centered) filter, offset by +/- 1 pixel every other frame -// XeGTAO does a 3x3 filter, on two pixels at a time per compute thread, applied twice -// We do a 3x3 filter, on 1 pixel per compute thread, applied once +fn downsample_mip_9(x: u32, y: u32, local_invocation_index: u32) { + if local_invocation_index < 16u { + let v = reduce_4(vec4( + intermediate_memory[x * 4u + 0u + 0u][y * 4u + 0u], + intermediate_memory[x * 4u + 2u + 0u][y * 4u + 0u], + intermediate_memory[x * 4u + 0u + 1u][y * 4u + 2u], + intermediate_memory[x * 4u + 2u + 1u][y * 4u + 2u], + )); + textureStore(mip_10, vec2(x, y), vec4(v)); + intermediate_memory[x * 4u + y][y * 4u] = v; + } +} -#import bevy_render::view::View +fn downsample_mip_10(x: u32, y: u32, local_invocation_index: u32) { + if local_invocation_index < 4u { + let v = reduce_4(vec4( + intermediate_memory[x * 8u + 0u + 0u + y * 2u][y * 8u + 0u], + intermediate_memory[x * 8u + 4u + 0u + y * 2u][y * 8u + 0u], + intermediate_memory[x * 8u + 0u + 1u + y * 2u][y * 8u + 4u], + intermediate_memory[x * 8u + 4u + 1u + y * 2u][y * 8u + 4u], + )); + textureStore(mip_11, vec2(x, y), vec4(v)); + intermediate_memory[x + y * 2u][0u] = v; + } +} -@group(0) @binding(0) var ambient_occlusion_noisy: texture_2d; -@group(0) @binding(1) var depth_differences: texture_2d; -@group(0) @binding(2) var ambient_occlusion: texture_storage_2d; -@group(1) @binding(0) var point_clamp_sampler: sampler; -@group(1) @binding(1) var view: View; +fn downsample_mip_11(local_invocation_index: u32) { + if local_invocation_index < 1u { + let v = reduce_4(vec4( + intermediate_memory[0u][0u], + intermediate_memory[1u][0u], + intermediate_memory[2u][0u], + intermediate_memory[3u][0u], + )); + textureStore(mip_12, vec2(0u, 0u), vec4(v)); + } +} -@compute -@workgroup_size(8, 8, 1) -fn spatial_denoise(@builtin(global_invocation_id) global_id: vec3) { - let pixel_coordinates = vec2(global_id.xy); - let uv = vec2(pixel_coordinates) / view.viewport.zw; +fn remap_for_wave_reduction(a: u32) -> vec2u { + return vec2( + insertBits(extractBits(a, 2u, 3u), a, 0u, 1u), + insertBits(extractBits(a, 3u, 3u), extractBits(a, 1u, 2u), 0u, 2u), + ); +} - let edges0 = textureGather(0, depth_differences, point_clamp_sampler, uv); - let edges1 = textureGather(0, depth_differences, point_clamp_sampler, uv, vec2(2i, 0i)); - let edges2 = textureGather(0, depth_differences, point_clamp_sampler, uv, vec2(1i, 2i)); - let visibility0 = textureGather(0, ambient_occlusion_noisy, point_clamp_sampler, uv); - let visibility1 = textureGather(0, ambient_occlusion_noisy, point_clamp_sampler, uv, vec2(2i, 0i)); - let visibility2 = textureGather(0, ambient_occlusion_noisy, point_clamp_sampler, uv, vec2(0i, 2i)); - let visibility3 = textureGather(0, ambient_occlusion_noisy, point_clamp_sampler, uv, vec2(2i, 2i)); +fn reduce_load_mip_0(tex: vec2u) -> f32 { + let uv = (vec2f(tex) + 0.5) / vec2f(textureDimensions(mip_0)); + return reduce_4(textureGather(mip_0, samplr, uv)); +} - let left_edges = unpack4x8unorm(edges0.x); - let right_edges = unpack4x8unorm(edges1.x); - let top_edges = unpack4x8unorm(edges0.z); - let bottom_edges = unpack4x8unorm(edges2.w); - var center_edges = unpack4x8unorm(edges0.y); - center_edges *= vec4(left_edges.y, right_edges.x, top_edges.w, bottom_edges.z); +fn reduce_load_mip_6(tex: vec2u) -> f32 { + return reduce_4(vec4( + textureLoad(mip_6, tex + vec2(0u, 0u)).r, + textureLoad(mip_6, tex + vec2(0u, 1u)).r, + textureLoad(mip_6, tex + vec2(1u, 0u)).r, + textureLoad(mip_6, tex + vec2(1u, 1u)).r, + )); +} - let center_weight = 1.2; - let left_weight = center_edges.x; - let right_weight = center_edges.y; - let top_weight = center_edges.z; - let bottom_weight = center_edges.w; - let top_left_weight = 0.425 * (top_weight * top_edges.x + left_weight * left_edges.z); - let top_right_weight = 0.425 * (top_weight * top_edges.y + right_weight * right_edges.z); - let bottom_left_weight = 0.425 * (bottom_weight * bottom_edges.x + left_weight * left_edges.w); - let bottom_right_weight = 0.425 * (bottom_weight * bottom_edges.y + right_weight * right_edges.w); +fn reduce_4(v: vec4f) -> f32 { + return min(min(v.x, v.y), min(v.z, v.w)); +} - let center_visibility = visibility0.y; - let left_visibility = visibility0.x; - let right_visibility = visibility0.z; - let top_visibility = visibility1.x; - let bottom_visibility = visibility2.z; - let top_left_visibility = visibility0.w; - let top_right_visibility = visibility1.w; - let bottom_left_visibility = visibility2.w; - let bottom_right_visibility = visibility3.w; +``` - var sum = center_visibility; - sum += left_visibility * left_weight; - sum += right_visibility * right_weight; - sum += top_visibility * top_weight; - sum += bottom_visibility * bottom_weight; - sum += top_left_visibility * top_left_weight; - sum += top_right_visibility * top_right_weight; - sum += bottom_left_visibility * bottom_left_weight; - sum += bottom_right_visibility * bottom_right_weight; +### bevy_shaders/custom_material_screenspace_texture - var sum_weight = center_weight; - sum_weight += left_weight; - sum_weight += right_weight; - sum_weight += top_weight; - sum_weight += bottom_weight; - sum_weight += top_left_weight; - sum_weight += top_right_weight; - sum_weight += bottom_left_weight; - sum_weight += bottom_right_weight; +```rust +#import bevy_pbr::{ + mesh_view_bindings::view, + forward_io::VertexOutput, + utils::coords_to_viewport_uv, +} - let denoised_visibility = sum / sum_weight; +@group(2) @binding(0) var texture: texture_2d; +@group(2) @binding(1) var texture_sampler: sampler; - textureStore(ambient_occlusion, pixel_coordinates, vec4(denoised_visibility, 0.0, 0.0, 0.0)); +@fragment +fn fragment( + mesh: VertexOutput, +) -> @location(0) vec4 { + let viewport_uv = coords_to_viewport_uv(mesh.position.xy, view.viewport); + let color = textureSample(texture, texture_sampler, viewport_uv); + return color; } ``` -### crates/bevy_pbr/src/ssao/gtao +### bevy_shaders/auto_exposure ```rust -// Ground Truth-based Ambient Occlusion (GTAO) -// Paper: https://www.activision.com/cdn/research/Practical_Real_Time_Strategies_for_Accurate_Indirect_Occlusion_NEW%20VERSION_COLOR.pdf -// Presentation: https://blog.selfshadow.com/publications/s2016-shading-course/activision/s2016_pbs_activision_occlusion.pdf +// Auto exposure +// +// This shader computes an auto exposure value for the current frame, +// which is then used as an exposure correction in the tone mapping shader. +// +// The auto exposure value is computed in two passes: +// * The compute_histogram pass calculates a histogram of the luminance values in the scene, +// taking into account the metering mask texture. The metering mask is a grayscale texture +// that defines the areas of the screen that should be given more weight when calculating +// the average luminance value. For example, the middle area of the screen might be more important +// than the edges. +// * The compute_average pass calculates the average luminance value of the scene, taking +// into account the low_percent and high_percent settings. These settings define the +// percentage of the histogram that should be excluded when calculating the average. This +// is useful to avoid overexposure when you have a lot of shadows, or underexposure when you +// have a lot of bright specular reflections. +// +// The final target_exposure is finally used to smoothly adjust the exposure value over time. -// Source code heavily based on XeGTAO v1.30 from Intel -// https://github.com/GameTechDev/XeGTAO/blob/0d177ce06bfa642f64d8af4de1197ad1bcb862d4/Source/Rendering/Shaders/XeGTAO.hlsli +#import bevy_render::view::View +#import bevy_render::globals::Globals -#import bevy_pbr::{ - gtao_utils::fast_acos, - utils::{PI, HALF_PI}, +// Constant to convert RGB to luminance, taken from Real Time Rendering, Vol 4 pg. 278, 4th edition +const RGB_TO_LUM = vec3(0.2125, 0.7154, 0.0721); + +struct AutoExposure { + min_log_lum: f32, + inv_log_lum_range: f32, + log_lum_range: f32, + low_percent: f32, + high_percent: f32, + speed_up: f32, + speed_down: f32, + exponential_transition_distance: f32, } -#import bevy_render::{ - view::View, - globals::Globals, + +struct CompensationCurve { + min_log_lum: f32, + inv_log_lum_range: f32, + min_compensation: f32, + compensation_range: f32, } -@group(0) @binding(0) var preprocessed_depth: texture_2d; -@group(0) @binding(1) var normals: texture_2d; -@group(0) @binding(2) var hilbert_index_lut: texture_2d; -@group(0) @binding(3) var ambient_occlusion: texture_storage_2d; -@group(0) @binding(4) var depth_differences: texture_storage_2d; -@group(0) @binding(5) var globals: Globals; -@group(1) @binding(0) var point_clamp_sampler: sampler; -@group(1) @binding(1) var view: View; +@group(0) @binding(0) var globals: Globals; -fn load_noise(pixel_coordinates: vec2) -> vec2 { - var index = textureLoad(hilbert_index_lut, pixel_coordinates % 64, 0).r; +@group(0) @binding(1) var settings: AutoExposure; -#ifdef TEMPORAL_JITTER - index += 288u * (globals.frame_count % 64u); -#endif +@group(0) @binding(2) var tex_color: texture_2d; - // R2 sequence - http://extremelearning.com.au/unreasonable-effectiveness-of-quasirandom-sequences - return fract(0.5 + f32(index) * vec2(0.75487766624669276005, 0.5698402909980532659114)); -} +@group(0) @binding(3) var tex_mask: texture_2d; -// Calculate differences in depth between neighbor pixels (later used by the spatial denoiser pass to preserve object edges) -fn calculate_neighboring_depth_differences(pixel_coordinates: vec2) -> f32 { - // Sample the pixel's depth and 4 depths around it - let uv = vec2(pixel_coordinates) / view.viewport.zw; - let depths_upper_left = textureGather(0, preprocessed_depth, point_clamp_sampler, uv); - let depths_bottom_right = textureGather(0, preprocessed_depth, point_clamp_sampler, uv, vec2(1i, 1i)); - let depth_center = depths_upper_left.y; - let depth_left = depths_upper_left.x; - let depth_top = depths_upper_left.z; - let depth_bottom = depths_bottom_right.x; - let depth_right = depths_bottom_right.z; +@group(0) @binding(4) var tex_compensation: texture_1d; - // Calculate the depth differences (large differences represent object edges) - var edge_info = vec4(depth_left, depth_right, depth_top, depth_bottom) - depth_center; - let slope_left_right = (edge_info.y - edge_info.x) * 0.5; - let slope_top_bottom = (edge_info.w - edge_info.z) * 0.5; - let edge_info_slope_adjusted = edge_info + vec4(slope_left_right, -slope_left_right, slope_top_bottom, -slope_top_bottom); - edge_info = min(abs(edge_info), abs(edge_info_slope_adjusted)); - let bias = 0.25; // Using the bias and then saturating nudges the values a bit - let scale = depth_center * 0.011; // Weight the edges by their distance from the camera - edge_info = saturate((1.0 + bias) - edge_info / scale); // Apply the bias and scale, and invert edge_info so that small values become large, and vice versa +@group(0) @binding(5) var compensation_curve: CompensationCurve; - // Pack the edge info into the texture - let edge_info_packed = vec4(pack4x8unorm(edge_info), 0u, 0u, 0u); - textureStore(depth_differences, pixel_coordinates, edge_info_packed); +@group(0) @binding(6) var histogram: array, 64>; - return depth_center; -} +@group(0) @binding(7) var exposure: f32; -fn load_normal_view_space(uv: vec2) -> vec3 { - var world_normal = textureSampleLevel(normals, point_clamp_sampler, uv, 0.0).xyz; - world_normal = (world_normal * 2.0) - 1.0; - let inverse_view = mat3x3( - view.inverse_view[0].xyz, - view.inverse_view[1].xyz, - view.inverse_view[2].xyz, - ); - return inverse_view * world_normal; -} +@group(0) @binding(8) var view: View; -fn reconstruct_view_space_position(depth: f32, uv: vec2) -> vec3 { - let clip_xy = vec2(uv.x * 2.0 - 1.0, 1.0 - 2.0 * uv.y); - let t = view.inverse_projection * vec4(clip_xy, depth, 1.0); - let view_xyz = t.xyz / t.w; - return view_xyz; -} +var histogram_shared: array, 64>; -fn load_and_reconstruct_view_space_position(uv: vec2, sample_mip_level: f32) -> vec3 { - let depth = textureSampleLevel(preprocessed_depth, point_clamp_sampler, uv, sample_mip_level).r; - return reconstruct_view_space_position(depth, uv); +// For a given color, return the histogram bin index +fn color_to_bin(hdr: vec3) -> u32 { + // Convert color to luminance + let lum = dot(hdr, RGB_TO_LUM); + + if lum < exp2(settings.min_log_lum) { + return 0u; + } + + // Calculate the log_2 luminance and express it as a value in [0.0, 1.0] + // where 0.0 represents the minimum luminance, and 1.0 represents the max. + let log_lum = saturate((log2(lum) - settings.min_log_lum) * settings.inv_log_lum_range); + + // Map [0, 1] to [1, 63]. The zeroth bin is handled by the epsilon check above. + return u32(log_lum * 62.0 + 1.0); } -@compute -@workgroup_size(8, 8, 1) -fn gtao(@builtin(global_invocation_id) global_id: vec3) { - let slice_count = f32(#SLICE_COUNT); - let samples_per_slice_side = f32(#SAMPLES_PER_SLICE_SIDE); - let effect_radius = 0.5 * 1.457; - let falloff_range = 0.615 * effect_radius; - let falloff_from = effect_radius * (1.0 - 0.615); - let falloff_mul = -1.0 / falloff_range; - let falloff_add = falloff_from / falloff_range + 1.0; +// Read the metering mask at the given UV coordinates, returning a weight for the histogram. +// +// Since the histogram is summed in the compute_average step, there is a limit to the amount of +// distinct values that can be represented. When using the chosen value of 16, the maximum +// amount of pixels that can be weighted and summed is 2^32 / 16 = 16384^2. +fn metering_weight(coords: vec2) -> u32 { + let pos = vec2(coords * vec2(textureDimensions(tex_mask))); + let mask = textureLoad(tex_mask, pos, 0).r; + return u32(mask * 16.0); +} + +@compute @workgroup_size(16, 16, 1) +fn compute_histogram( + @builtin(global_invocation_id) global_invocation_id: vec3, + @builtin(local_invocation_index) local_invocation_index: u32 +) { + // Clear the workgroup shared histogram + if local_invocation_index < 64 { + histogram_shared[local_invocation_index] = 0u; + } - let pixel_coordinates = vec2(global_id.xy); - let uv = (vec2(pixel_coordinates) + 0.5) / view.viewport.zw; + // Wait for all workgroup threads to clear the shared histogram + workgroupBarrier(); - var pixel_depth = calculate_neighboring_depth_differences(pixel_coordinates); - pixel_depth += 0.00001; // Avoid depth precision issues + let dim = vec2(textureDimensions(tex_color)); + let uv = vec2(global_invocation_id.xy) / vec2(dim); - let pixel_position = reconstruct_view_space_position(pixel_depth, uv); - let pixel_normal = load_normal_view_space(uv); - let view_vec = normalize(-pixel_position); + if global_invocation_id.x < dim.x && global_invocation_id.y < dim.y { + let col = textureLoad(tex_color, vec2(global_invocation_id.xy), 0).rgb; + let index = color_to_bin(col); + let weight = metering_weight(uv); - let noise = load_noise(pixel_coordinates); - let sample_scale = (-0.5 * effect_radius * view.projection[0][0]) / pixel_position.z; + // Increment the shared histogram bin by the weight obtained from the metering mask + atomicAdd(&histogram_shared[index], weight); + } - var visibility = 0.0; - for (var slice_t = 0.0; slice_t < slice_count; slice_t += 1.0) { - let slice = slice_t + noise.x; - let phi = (PI / slice_count) * slice; - let omega = vec2(cos(phi), sin(phi)); + // Wait for all workgroup threads to finish updating the workgroup histogram + workgroupBarrier(); - let direction = vec3(omega.xy, 0.0); - let orthographic_direction = direction - (dot(direction, view_vec) * view_vec); - let axis = cross(direction, view_vec); - let projected_normal = pixel_normal - axis * dot(pixel_normal, axis); - let projected_normal_length = length(projected_normal); + // Accumulate the workgroup histogram into the global histogram. + // Note that the global histogram was not cleared at the beginning, + // as it will be cleared in compute_average. + atomicAdd(&histogram[local_invocation_index], histogram_shared[local_invocation_index]); +} - let sign_norm = sign(dot(orthographic_direction, projected_normal)); - let cos_norm = saturate(dot(projected_normal, view_vec) / projected_normal_length); - let n = sign_norm * fast_acos(cos_norm); +@compute @workgroup_size(1, 1, 1) +fn compute_average(@builtin(local_invocation_index) local_index: u32) { + var histogram_sum = 0u; - let min_cos_horizon_1 = cos(n + HALF_PI); - let min_cos_horizon_2 = cos(n - HALF_PI); - var cos_horizon_1 = min_cos_horizon_1; - var cos_horizon_2 = min_cos_horizon_2; - let sample_mul = vec2(omega.x, -omega.y) * sample_scale; - for (var sample_t = 0.0; sample_t < samples_per_slice_side; sample_t += 1.0) { - var sample_noise = (slice_t + sample_t * samples_per_slice_side) * 0.6180339887498948482; - sample_noise = fract(noise.y + sample_noise); + // Calculate the cumulative histogram and clear the histogram bins. + // Each bin in the cumulative histogram contains the sum of all bins up to that point. + // This way we can quickly exclude the portion of lowest and highest samples as required by + // the low_percent and high_percent settings. + for (var i=0u; i<64u; i+=1u) { + histogram_sum += histogram[i]; + histogram_shared[i] = histogram_sum; - var s = (sample_t + sample_noise) / samples_per_slice_side; - s *= s; // https://github.com/GameTechDev/XeGTAO#sample-distribution - let sample = s * sample_mul; + // Clear the histogram bin for the next frame + histogram[i] = 0u; + } - // * view.viewport.zw gets us from [0, 1] to [0, viewport_size], which is needed for this to get the correct mip levels - let sample_mip_level = clamp(log2(length(sample * view.viewport.zw)) - 3.3, 0.0, 5.0); // https://github.com/GameTechDev/XeGTAO#memory-bandwidth-bottleneck - let sample_position_1 = load_and_reconstruct_view_space_position(uv + sample, sample_mip_level); - let sample_position_2 = load_and_reconstruct_view_space_position(uv - sample, sample_mip_level); + let first_index = u32(f32(histogram_sum) * settings.low_percent); + let last_index = u32(f32(histogram_sum) * settings.high_percent); - let sample_difference_1 = sample_position_1 - pixel_position; - let sample_difference_2 = sample_position_2 - pixel_position; - let sample_distance_1 = length(sample_difference_1); - let sample_distance_2 = length(sample_difference_2); - var sample_cos_horizon_1 = dot(sample_difference_1 / sample_distance_1, view_vec); - var sample_cos_horizon_2 = dot(sample_difference_2 / sample_distance_2, view_vec); + var count = 0u; + var sum = 0.0; + for (var i=1u; i<64u; i+=1u) { + // The number of pixels in the bin. The histogram values are clamped to + // first_index and last_index to exclude the lowest and highest samples. + let bin_count = + clamp(histogram_shared[i], first_index, last_index) - + clamp(histogram_shared[i - 1u], first_index, last_index); + + sum += f32(bin_count) * f32(i); + count += bin_count; + } - let weight_1 = saturate(sample_distance_1 * falloff_mul + falloff_add); - let weight_2 = saturate(sample_distance_2 * falloff_mul + falloff_add); - sample_cos_horizon_1 = mix(min_cos_horizon_1, sample_cos_horizon_1, weight_1); - sample_cos_horizon_2 = mix(min_cos_horizon_2, sample_cos_horizon_2, weight_2); + var avg_lum = settings.min_log_lum; - cos_horizon_1 = max(cos_horizon_1, sample_cos_horizon_1); - cos_horizon_2 = max(cos_horizon_2, sample_cos_horizon_2); - } + if count > 0u { + // The average luminance of the included histogram samples. + avg_lum = sum / (f32(count) * 63.0) + * settings.log_lum_range + + settings.min_log_lum; + } - let horizon_1 = fast_acos(cos_horizon_1); - let horizon_2 = -fast_acos(cos_horizon_2); - let v1 = (cos_norm + 2.0 * horizon_1 * sin(n) - cos(2.0 * horizon_1 - n)) / 4.0; - let v2 = (cos_norm + 2.0 * horizon_2 * sin(n) - cos(2.0 * horizon_2 - n)) / 4.0; - visibility += projected_normal_length * (v1 + v2); + // The position in the compensation curve texture to sample for avg_lum. + let u = (avg_lum - compensation_curve.min_log_lum) * compensation_curve.inv_log_lum_range; + + // The target exposure is the negative of the average log luminance. + // The compensation value is added to the target exposure to adjust the exposure for + // artistic purposes. + let target_exposure = textureLoad(tex_compensation, i32(saturate(u) * 255.0), 0).r + * compensation_curve.compensation_range + + compensation_curve.min_compensation + - avg_lum; + + // Smoothly adjust the `exposure` towards the `target_exposure` + let delta = target_exposure - exposure; + if target_exposure > exposure { + let speed_down = settings.speed_down * globals.delta_time; + let exp_down = speed_down / settings.exponential_transition_distance; + exposure = exposure + min(speed_down, delta * exp_down); + } else { + let speed_up = settings.speed_up * globals.delta_time; + let exp_up = speed_up / settings.exponential_transition_distance; + exposure = exposure + max(-speed_up, delta * exp_up); } - visibility /= slice_count; - visibility = clamp(visibility, 0.03, 1.0); - textureStore(ambient_occlusion, pixel_coordinates, vec4(visibility, 0.0, 0.0, 0.0)); + // Apply the exposure to the color grading settings, from where it will be used for the color + // grading pass. + view.color_grading.exposure += exposure; } ``` -### crates/bevy_pbr/src/ssao/gtao_utils +### src/shader_utils/common ```rust -#define_import_path bevy_pbr::gtao_utils +#define_import_path shadplay::shader_utils::common -#import bevy_pbr::utils::{PI, HALF_PI} +// The circle family +const PI:f32 = 3.14159265359; +const HALF_PI = 1.57079632679; +const NEG_HALF_PI = -1.57079632679; +const NEG_QUARTER_PI = -0.78539816339; +const QUARTER_PI = -0.78539816339; +const TAU:f32 = 6.28318530718; -// Approximates single-bounce ambient occlusion to multi-bounce ambient occlusion -// https://blog.selfshadow.com/publications/s2016-shading-course/activision/s2016_pbs_activision_occlusion.pdf#page=78 -fn gtao_multibounce(visibility: f32, base_color: vec3) -> vec3 { - let a = 2.0404 * base_color - 0.3324; - let b = -4.7951 * base_color + 0.6417; - let c = 2.7552 * base_color + 0.6903; - let x = vec3(visibility); - return max(x, ((x * a + b) * x + c) * x); +// Euler's number / Napier's constant +const E: f32 = 2.71828182845; + +// Pythagoras' constants +const SQRT_OF_2:f32 = 1.41421356237; +const SQRT_OF_3:f32 = 1.73205080756; + +// The golden ratio +const PHI:f32 = 1.61803398874; + +/// Turn your `uv` coords into polar coords +fn intoPolar(uv: vec2)-> vec2{ + return vec2f(atan2(uv.x, uv.y), length(uv)); } -fn fast_sqrt(x: f32) -> f32 { - return bitcast(0x1fbd1df5 + (bitcast(x) >> 1u)); +/// Clockwise by `theta` +fn rotate2D(theta: f32) -> mat2x2 { + let c = cos(theta); + let s = sin(theta); + return mat2x2(c, s, -s, c); } -fn fast_acos(in_x: f32) -> f32 { - let x = abs(in_x); - var res = -0.156583 * x + HALF_PI; - res *= fast_sqrt(1.0 - x); - return select(PI - res, res, in_x >= 0.0); +/// Move from the HueSaturationValue to RedGreenBlue +fn hsv2rgb(c: vec3f) -> vec3f { + var rgb: vec3f = clamp( + abs((c.x * 6.0 + vec3f(0.0, 4.0, 2.0)) % 6.0 - 3.0) - 1.0, + vec3f(0.0), + vec3f(1.0) + ); + return c.z * mix(vec3f(1.0), rgb, c.y); } -``` +// Signed distance field for a 2D circle +fn sd_circle(pt: vec2f, radius: f32) -> f32 { + return length(pt) - radius; +} -### crates/bevy_pbr/src/ssao/preprocess_depth +/// This is the default (and rather pretty) shader you start with in ShaderToy +fn shader_toy_default(t: f32, uv: vec2f) -> vec3f { + var col = vec3f(0.0); + let v = vec3(t) + vec3(uv.xyx) + vec3(0., 2., 4.); + return 0.5 + 0.5 * cos(v); +} -```rust -// Inputs a depth texture and outputs a MIP-chain of depths. -// -// Because SSAO's performance is bound by texture reads, this increases -// performance over using the full resolution depth for every sample. +fn dist_line(ray_origin: vec3f, ray_dir: vec3f, pt: vec3f) -> f32 { + return length(cross(pt - ray_origin, ray_dir)) / length(ray_dir); +} -// Reference: https://research.nvidia.com/sites/default/files/pubs/2012-06_Scalable-Ambient-Obscurance/McGuire12SAO.pdf, section 2.2 +fn sd_capsule(p: vec3f, a: vec3f, b: vec3f, r: f32) -> f32 { + let pa = p - a; + let ba = b - a; + let h = clamp(dot(pa, ba) / dot(ba, ba), 0., 1.); + return length(pa - ba * h) - r; +} -#import bevy_render::view::View +fn sd_capped_cylinder(p: vec3f, h: vec2f) -> f32 { + let d: vec2f = abs(vec2f(length(p.xz), p.y)) - h; + return min(max(d.x, d.y), 0.0) + length(max(d, vec2f(0.0))); +} -@group(0) @binding(0) var input_depth: texture_depth_2d; -@group(0) @binding(1) var preprocessed_depth_mip0: texture_storage_2d; -@group(0) @binding(2) var preprocessed_depth_mip1: texture_storage_2d; -@group(0) @binding(3) var preprocessed_depth_mip2: texture_storage_2d; -@group(0) @binding(4) var preprocessed_depth_mip3: texture_storage_2d; -@group(0) @binding(5) var preprocessed_depth_mip4: texture_storage_2d; -@group(1) @binding(0) var point_clamp_sampler: sampler; -@group(1) @binding(1) var view: View; +fn sd_torus(p: vec3f, t: vec2f) -> f32 { + let q: vec2f = vec2f(length(p.xz) - t.x, p.y); + return length(q) - t.y; +} +// License: MIT, author: Inigo Quilez, found: https://iquilezles.org/www/articles/distfunctions2d/distfunctions2d.htm +fn sd_hexagon(p: vec2f, r: f32) -> f32 { + let k = vec3f(-0.866025404, 0.5, 0.577350269); + var q: vec2f = abs(p); + q = q - 2. * min(dot(k.xy, q), 0.) * k.xy; + q = q - vec2f(clamp(q.x, -k.z * r, k.z * r), r); + return length(q) * sign(q.y); +} -// Using 4 depths from the previous MIP, compute a weighted average for the depth of the current MIP -fn weighted_average(depth0: f32, depth1: f32, depth2: f32, depth3: f32) -> f32 { - let depth_range_scale_factor = 0.75; - let effect_radius = depth_range_scale_factor * 0.5 * 1.457; - let falloff_range = 0.615 * effect_radius; - let falloff_from = effect_radius * (1.0 - 0.615); - let falloff_mul = -1.0 / falloff_range; - let falloff_add = falloff_from / falloff_range + 1.0; +/// Signed distance field for a Sphere (3d) +fn sd_sphere(p: vec3f, radius: f32) -> f32 { + return (length(p) - radius); +} - let min_depth = min(min(depth0, depth1), min(depth2, depth3)); - let weight0 = saturate((depth0 - min_depth) * falloff_mul + falloff_add); - let weight1 = saturate((depth1 - min_depth) * falloff_mul + falloff_add); - let weight2 = saturate((depth2 - min_depth) * falloff_mul + falloff_add); - let weight3 = saturate((depth3 - min_depth) * falloff_mul + falloff_add); - let weight_total = weight0 + weight1 + weight2 + weight3; +// Hexagonal tiling +fn hextile(_p: vec2f) -> vec2f { + // See Art of Code: Hexagonal Tiling Explained! + // https://www.youtube.com/watch?v=VmrIDyYiJBA + var p = _p; - return ((weight0 * depth0) + (weight1 * depth1) + (weight2 * depth2) + (weight3 * depth3)) / weight_total; + // Define constants + let sz: vec2f = vec2f(1.0, sqrt(3.0)); + let hsz: vec2f = 0.5 * sz; + + // Calculate p1 and p2 + let p1: vec2f = (p % sz) - hsz; + let p2: vec2f = ((p - hsz) % sz) - hsz; + + // Choose p3 based on dot product + var p3: vec2f = vec2(0.); + if dot(p1, p1) < dot(p2, p2) { + p3 = p1; + } else { + p3 = p2; + } + + // Calculate n + var n: vec2f = ((p3 - p + hsz) / sz); + p = p3; + + // Adjust n and round for well-behaved hextile 0,0 + n -= vec2(0.5); + return round(n * 2.0) * 0.5; +} + +// From : https://www.shadertoy.com/view/tsBXW3 +fn hash(x: f32) -> f32 { + return (fract(sin(x) * 152754.742)); +} + +/// Signed distance field for a Bezier curve. +fn sd_bezier(p: vec2f, A: vec2f, B: vec2f, C: vec2f) -> vec2f { + let a = B - A; + let b = A - 2. * B + C; + let c = a * 2.; + let d = A - p; + let kk = 1. / dot(b, b); + let kx = kk * dot(a, b); + let ky = kk * (2. * dot(a, a) + dot(d, b)) / 3.; + let kz = kk * dot(d, a); + + let p1 = ky - kx * kx; + let p3 = p1 * p1 * p1; + let q = kx * (2.0 * kx * kx - 3.0 * ky) + kz; + var h: f32 = q * q + 4. * p3; + + var res: vec2f; + if h >= 0. { + h = sqrt(h); + let x = (vec2f(h, -h) - q) / 2.; + let uv = sign(x) * pow(abs(x), vec2f(1. / 3.)); + let t = clamp(uv.x + uv.y - kx, 0., 1.); + let f = d + (c + b * t) * t; + res = vec2f(dot(f, f), t); + } else { + let z = sqrt(-p1); + let v = acos(q / (p1 * z * 2.)) / 3.; + let m = cos(v); + let n = sin(v) * 1.732050808; + let t = clamp(vec2f(m + m, -n - m) * z - kx, vec2f(0.0), vec2f(1.0)); + let f = d + (c + b * t.x) * t.x; + var dis: f32 = dot(f, f); + res = vec2f(dis, t.x); + + let g = d + (c + b * t.y) * t.y; + dis = dot(g, g); + res = select(res, vec2f(dis, t.y), dis < res.x); + } + res.x = sqrt(res.x); + return res; } -// Used to share the depths from the previous MIP level between all invocations in a workgroup -var previous_mip_depth: array, 8>; -@compute -@workgroup_size(8, 8, 1) -fn preprocess_depth(@builtin(global_invocation_id) global_id: vec3, @builtin(local_invocation_id) local_id: vec3) { - let base_coordinates = vec2(global_id.xy); +/// coff +fn coff(h: f32, time: f32) -> vec2 { + let h0: f32 = h; + let h1: f32 = fract(h0 * 9677.0); + let h2: f32 = fract(h0 * 8677.0); + let t: f32 = mix(0.5, 1.0, h2 * h2) * time + 1234.5 * h0; + return mix(vec2(0.1, 0.1), vec2(0.2, 0.2), h1 * h1) * sin(t * vec2(1.0, sqrt(0.5))); +} - // MIP 0 - Copy 4 texels from the input depth (per invocation, 8x8 invocations per workgroup) - let pixel_coordinates0 = base_coordinates * 2i; - let pixel_coordinates1 = pixel_coordinates0 + vec2(1i, 0i); - let pixel_coordinates2 = pixel_coordinates0 + vec2(0i, 1i); - let pixel_coordinates3 = pixel_coordinates0 + vec2(1i, 1i); - let depths_uv = vec2(pixel_coordinates0) / view.viewport.zw; - let depths = textureGather(0, input_depth, point_clamp_sampler, depths_uv, vec2(1i, 1i)); - textureStore(preprocessed_depth_mip0, pixel_coordinates0, vec4(depths.w, 0.0, 0.0, 0.0)); - textureStore(preprocessed_depth_mip0, pixel_coordinates1, vec4(depths.z, 0.0, 0.0, 0.0)); - textureStore(preprocessed_depth_mip0, pixel_coordinates2, vec4(depths.x, 0.0, 0.0, 0.0)); - textureStore(preprocessed_depth_mip0, pixel_coordinates3, vec4(depths.y, 0.0, 0.0, 0.0)); +/// approx aces colour-space +fn aces_approx(_v: vec3) -> vec3 { + var v = max(_v, vec3(0.0, 0.0, 0.0)); + v *= 0.6; + let a: f32 = 2.51; + let b: f32 = 0.03; + let c: f32 = 2.43; + let d: f32 = 0.59; + let e: f32 = 0.14; + return clamp((v * (a * v + b)) / (v * (c * v + d) + e), vec3(0.0, 0.0, 0.0), vec3(1.0, 1.0, 1.0)); +} - // MIP 1 - Weighted average of MIP 0's depth values (per invocation, 8x8 invocations per workgroup) - let depth_mip1 = weighted_average(depths.w, depths.z, depths.x, depths.y); - textureStore(preprocessed_depth_mip1, base_coordinates, vec4(depth_mip1, 0.0, 0.0, 0.0)); - previous_mip_depth[local_id.x][local_id.y] = depth_mip1; +``` - workgroupBarrier(); +### screenshots/11-10-23/19-40-28/screeenshot - // MIP 2 - Weighted average of MIP 1's depth values (per invocation, 4x4 invocations per workgroup) - if all(local_id.xy % vec2(2u) == vec2(0u)) { - let depth0 = previous_mip_depth[local_id.x + 0u][local_id.y + 0u]; - let depth1 = previous_mip_depth[local_id.x + 1u][local_id.y + 0u]; - let depth2 = previous_mip_depth[local_id.x + 0u][local_id.y + 1u]; - let depth3 = previous_mip_depth[local_id.x + 1u][local_id.y + 1u]; - let depth_mip2 = weighted_average(depth0, depth1, depth2, depth3); - textureStore(preprocessed_depth_mip2, base_coordinates / 2i, vec4(depth_mip2, 0.0, 0.0, 0.0)); - previous_mip_depth[local_id.x][local_id.y] = depth_mip2; - } +```rust - workgroupBarrier(); +//! Showing how to use a texture, drag-n-drop for you own texture will be supported soon. - // MIP 3 - Weighted average of MIP 2's depth values (per invocation, 2x2 invocations per workgroup) - if all(local_id.xy % vec2(4u) == vec2(0u)) { - let depth0 = previous_mip_depth[local_id.x + 0u][local_id.y + 0u]; - let depth1 = previous_mip_depth[local_id.x + 2u][local_id.y + 0u]; - let depth2 = previous_mip_depth[local_id.x + 0u][local_id.y + 2u]; - let depth3 = previous_mip_depth[local_id.x + 2u][local_id.y + 2u]; - let depth_mip3 = weighted_average(depth0, depth1, depth2, depth3); - textureStore(preprocessed_depth_mip3, base_coordinates / 4i, vec4(depth_mip3, 0.0, 0.0, 0.0)); - previous_mip_depth[local_id.x][local_id.y] = depth_mip3; - } +#import bevy_pbr::mesh_vertex_output MeshVertexOutput - workgroupBarrier(); +@group(1) @binding(1) var texture: texture_2d; +@group(1) @binding(2) var texture_sampler: sampler; - // MIP 4 - Weighted average of MIP 3's depth values (per invocation, 1 invocation per workgroup) - if all(local_id.xy % vec2(8u) == vec2(0u)) { - let depth0 = previous_mip_depth[local_id.x + 0u][local_id.y + 0u]; - let depth1 = previous_mip_depth[local_id.x + 4u][local_id.y + 0u]; - let depth2 = previous_mip_depth[local_id.x + 0u][local_id.y + 4u]; - let depth3 = previous_mip_depth[local_id.x + 4u][local_id.y + 4u]; - let depth_mip4 = weighted_average(depth0, depth1, depth2, depth3); - textureStore(preprocessed_depth_mip4, base_coordinates / 8i, vec4(depth_mip4, 0.0, 0.0, 0.0)); - } -} + + +@group(3) @binding(0) var mouse: vec2f; + + +@fragment +fn fragment(in: MeshVertexOutput) -> @location(0) vec4 { + let texture_uvs = in.uv; + + let tex: vec4f = texture_sample(texture, texture_sampler, texture_uvs); + + return tex; +} + ``` -### crates/bevy_pbr/src/deferred/deferred_lighting +### screenshots/11-10-23/19-08-42/screeenshot ```rust -#import bevy_pbr::{ - prepass_utils, - pbr_types::STANDARD_MATERIAL_FLAGS_UNLIT_BIT, - pbr_functions, - pbr_deferred_functions::pbr_input_from_deferred_gbuffer, - pbr_deferred_types::unpack_unorm3x4_plus_unorm_20_, - lighting, - mesh_view_bindings::deferred_prepass_texture, -} -#ifdef SCREEN_SPACE_AMBIENT_OCCLUSION -#import bevy_pbr::mesh_view_bindings::screen_space_ambient_occlusion_texture -#import bevy_pbr::gtao_utils::gtao_multibounce -#endif +//! Showing how to use a texture, drag-n-drop for you own texture will be supported soon. -struct FullscreenVertexOutput { - @builtin(position) - position: vec4, - @location(0) - uv: vec2, -}; +#import bevy_pbr::mesh_vertex_output MeshVertexOutput -struct PbrDeferredLightingDepthId { - depth_id: u32, // limited to u8 -#ifdef SIXTEEN_BYTE_ALIGNMENT - // WebGL2 structs must be 16 byte aligned. - _webgl2_padding_0: f32, - _webgl2_padding_1: f32, - _webgl2_padding_2: f32, -#endif -} -@group(1) @binding(0) -var depth_id: PbrDeferredLightingDepthId; +@group(1) @binding(1) var texture: texture_2d; +@group(1) @binding(2) var texture_sampler: sampler; -@vertex -fn vertex(@builtin(vertex_index) vertex_index: u32) -> FullscreenVertexOutput { - // See the full screen vertex shader for explanation above for how this works. - let uv = vec2(f32(vertex_index >> 1u), f32(vertex_index & 1u)) * 2.0; - // Depth is stored as unorm, so we are dividing the u8 depth_id by 255.0 here. - let clip_position = vec4(uv * vec2(2.0, -2.0) + vec2(-1.0, 1.0), f32(depth_id.depth_id) / 255.0, 1.0); - return FullscreenVertexOutput(clip_position, uv); -} +@group(1) @binding(0) var mouse: vec2; + + +@fragment +fn fragment(in: MeshVertexOutput) -> @location(0) vec4 { + let texture_uvs = in.uv; + + let tex: vec4f = texture_sample(texture, texture_sampler, texture_uvs); + + return tex; +} + + +``` + +### screenshots/08-11-23/22-29-33/screenshot -@fragment -fn fragment(in: FullscreenVertexOutput) -> @location(0) vec4 { - var frag_coord = vec4(in.position.xy, 0.0, 0.0); +```rust +//! +//! The default 3d Shader. +//! +#import bevy_pbr::forward_io::VertexOutput +#import bevy_pbr::utils PI +#import bevy_sprite::mesh2d_view_bindings globals +#import shadplay::shader_utils::common NEG_HALF_PI, shader_toy_default, rotate2D - let deferred_data = textureLoad(deferred_prepass_texture, vec2(frag_coord.xy), 0); +#import bevy_render::view View +@group(0) @binding(0) var view: View; -#ifdef WEBGL2 - frag_coord.z = unpack_unorm3x4_plus_unorm_20_(deferred_data.b).w; -#else -#ifdef DEPTH_PREPASS - frag_coord.z = prepass_utils::prepass_depth(in.position, 0u); -#endif -#endif +@group(1) @binding(1) var texture: texture_2d; +@group(1) @binding(2) var texture_sampler: sampler; - var pbr_input = pbr_input_from_deferred_gbuffer(frag_coord, deferred_data); - var output_color = vec4(0.0); +const SPEED:f32 = 1.0; - // NOTE: Unlit bit not set means == 0 is true, so the true case is if lit - if ((pbr_input.material.flags & STANDARD_MATERIAL_FLAGS_UNLIT_BIT) == 0u) { +@fragment +fn fragment(in: VertexOutput) -> @location(0) vec4 { + // ensure our uv coords match shadertoy/the-lil-book-of-shaders + let texture_uvs = in.uv; -#ifdef SCREEN_SPACE_AMBIENT_OCCLUSION - let ssao = textureLoad(screen_space_ambient_occlusion_texture, vec2(in.position.xy), 0i).r; - let ssao_multibounce = gtao_multibounce(ssao, pbr_input.material.base_color.rgb); - pbr_input.diffuse_occlusion = min(pbr_input.diffuse_occlusion, ssao_multibounce); + let tex: vec4f = textureSample(texture, texture_sampler, texture_uvs); + return tex; +} +``` - // Neubelt and Pettineo 2013, "Crafting a Next-gen Material Pipeline for The Order: 1886" - let NdotV = max(dot(pbr_input.N, pbr_input.V), 0.0001); - var perceptual_roughness: f32 = pbr_input.material.perceptual_roughness; - let roughness = lighting::perceptualRoughnessToRoughness(perceptual_roughness); - // Use SSAO to estimate the specular occlusion. - // Lagarde and Rousiers 2014, "Moving Frostbite to Physically Based Rendering" - pbr_input.specular_occlusion = saturate(pow(NdotV + ssao, exp2(-16.0 * roughness - 1.0)) - 1.0 + ssao); -#endif // SCREEN_SPACE_AMBIENT_OCCLUSION +### screenshots/08-11-23/22-29-35/screenshot - output_color = pbr_functions::apply_pbr_lighting(pbr_input); - } else { - output_color = pbr_input.material.base_color; - } +```rust +//! +//! The default 3d Shader. +//! +#import bevy_pbr::forward_io::VertexOutput +#import bevy_pbr::utils PI +#import bevy_sprite::mesh2d_view_bindings globals +#import shadplay::shader_utils::common NEG_HALF_PI, shader_toy_default, rotate2D - output_color = pbr_functions::main_pass_post_lighting_processing(pbr_input, output_color); +#import bevy_render::view View +@group(0) @binding(0) var view: View; - return output_color; -} +@group(1) @binding(1) var texture: texture_2d; +@group(1) @binding(2) var texture_sampler: sampler; +const SPEED:f32 = 1.0; + +@fragment +fn fragment(in: VertexOutput) -> @location(0) vec4 { + // ensure our uv coords match shadertoy/the-lil-book-of-shaders + let texture_uvs = in.uv; + let tex: vec4f = textureSample(texture, texture_sampler, texture_uvs); + return tex; +} ``` -### crates/bevy_pbr/src/deferred/pbr_deferred_functions +### screenshots/08-11-23/22-29-32/screenshot ```rust -#define_import_path bevy_pbr::pbr_deferred_functions +//! +//! The default 3d Shader. +//! +#import bevy_pbr::forward_io::VertexOutput +#import bevy_pbr::utils PI +#import bevy_sprite::mesh2d_view_bindings globals +#import shadplay::shader_utils::common NEG_HALF_PI, shader_toy_default, rotate2D -#import bevy_pbr::{ - pbr_types::{PbrInput, pbr_input_new, STANDARD_MATERIAL_FLAGS_UNLIT_BIT}, - pbr_deferred_types as deferred_types, - pbr_functions, - rgb9e5, - mesh_view_bindings::view, - utils::{octahedral_encode, octahedral_decode}, - prepass_io::FragmentOutput, - view_transformations::{position_ndc_to_world, frag_coord_to_ndc}, -} +#import bevy_render::view View +@group(0) @binding(0) var view: View; -#ifdef MESHLET_MESH_MATERIAL_PASS -#import bevy_pbr::meshlet_visibility_buffer_resolve::VertexOutput -#else -#import bevy_pbr::prepass_io::VertexOutput -#endif +@group(1) @binding(1) var texture: texture_2d; +@group(1) @binding(2) var texture_sampler: sampler; -#ifdef MOTION_VECTOR_PREPASS - #import bevy_pbr::pbr_prepass_functions::calculate_motion_vector -#endif +const SPEED:f32 = 1.0; -// Creates the deferred gbuffer from a PbrInput. -fn deferred_gbuffer_from_pbr_input(in: PbrInput) -> vec4 { - // Only monochrome occlusion supported. May not be worth including at all. - // Some models have baked occlusion, GLTF only supports monochrome. - // Real time occlusion is applied in the deferred lighting pass. - // Deriving luminance via Rec. 709. coefficients - // https://en.wikipedia.org/wiki/Rec._709 - let diffuse_occlusion = dot(in.diffuse_occlusion, vec3(0.2126, 0.7152, 0.0722)); -#ifdef WEBGL2 // More crunched for webgl so we can also fit depth. - var props = deferred_types::pack_unorm3x4_plus_unorm_20_(vec4( - in.material.reflectance, - in.material.metallic, - diffuse_occlusion, - in.frag_coord.z)); -#else - var props = deferred_types::pack_unorm4x8_(vec4( - in.material.reflectance, // could be fewer bits - in.material.metallic, // could be fewer bits - diffuse_occlusion, // is this worth including? - 0.0)); // spare -#endif // WEBGL2 - let flags = deferred_types::deferred_flags_from_mesh_material_flags(in.flags, in.material.flags); - let octahedral_normal = octahedral_encode(normalize(in.N)); - var base_color_srgb = vec3(0.0); - var emissive = in.material.emissive.rgb; - if ((in.material.flags & STANDARD_MATERIAL_FLAGS_UNLIT_BIT) != 0u) { - // Material is unlit, use emissive component of gbuffer for color data. - // Unlit materials are effectively emissive. - emissive = in.material.base_color.rgb; - } else { - base_color_srgb = pow(in.material.base_color.rgb, vec3(1.0 / 2.2)); - } - let deferred = vec4( - deferred_types::pack_unorm4x8_(vec4(base_color_srgb, in.material.perceptual_roughness)), - rgb9e5::vec3_to_rgb9e5_(emissive), - props, - deferred_types::pack_24bit_normal_and_flags(octahedral_normal, flags), - ); - return deferred; +@fragment +fn fragment(in: VertexOutput) -> @location(0) vec4 { + // ensure our uv coords match shadertoy/the-lil-book-of-shaders + let texture_uvs = in.uv; + + let tex: vec4f = textureSample(texture, texture_sampler, texture_uvs); + return tex; } +``` -// Creates a PbrInput from the deferred gbuffer. -fn pbr_input_from_deferred_gbuffer(frag_coord: vec4, gbuffer: vec4) -> PbrInput { - var pbr = pbr_input_new(); +### screenshots/14-10-23/12-28-54/screenshot - let flags = deferred_types::unpack_flags(gbuffer.a); - let deferred_flags = deferred_types::mesh_material_flags_from_deferred_flags(flags); - pbr.flags = deferred_flags.x; - pbr.material.flags = deferred_flags.y; +```rust +/// +/// This is a port of CyberAnimArrowX by float1987 +/// Source: https://www.shadertoy.com/view/DsjfDt +/// Authour: https://www.shadertoy.com/user/float1987 +/// +#import bevy_pbr::mesh_vertex_output MeshVertexOutput +#import bevy_sprite::mesh2d_view_bindings globals +#import shadplay::shader_utils::common rotate2D, QUARTER_PI + +#import bevy_render::view View +@group(0) @binding(0) var view: View; - let base_rough = deferred_types::unpack_unorm4x8_(gbuffer.r); - pbr.material.perceptual_roughness = base_rough.a; - let emissive = rgb9e5::rgb9e5_to_vec3_(gbuffer.g); - if ((pbr.material.flags & STANDARD_MATERIAL_FLAGS_UNLIT_BIT) != 0u) { - pbr.material.base_color = vec4(emissive, 1.0); - pbr.material.emissive = vec4(vec3(0.0), 1.0); - } else { - pbr.material.base_color = vec4(pow(base_rough.rgb, vec3(2.2)), 1.0); - pbr.material.emissive = vec4(emissive, 1.0); - } -#ifdef WEBGL2 // More crunched for webgl so we can also fit depth. - let props = deferred_types::unpack_unorm3x4_plus_unorm_20_(gbuffer.b); - // Bias to 0.5 since that's the value for almost all materials. - pbr.material.reflectance = saturate(props.r - 0.03333333333); -#else - let props = deferred_types::unpack_unorm4x8_(gbuffer.b); - pbr.material.reflectance = props.r; -#endif // WEBGL2 - pbr.material.metallic = props.g; - pbr.diffuse_occlusion = vec3(props.b); - let octahedral_normal = deferred_types::unpack_24bit_normal(gbuffer.a); - let N = octahedral_decode(octahedral_normal); +const SPEED:f32 = 0.2; //Global Speed multiplier +const NUM_ARROWS = 3.0; // Number of arrows spawned (see the for-loop below) +const COLOUR_TEMP = 0.02; // The 'intensity' of the red channel in the arrows. - let world_position = vec4(position_ndc_to_world(frag_coord_to_ndc(frag_coord)), 1.0); - let is_orthographic = view.projection[3].w == 1.0; - let V = pbr_functions::calculate_view(world_position, is_orthographic); +@fragment +fn fragment(in: MeshVertexOutput) -> @location(0) vec4 { + var uv = (in.uv * 2.0) - 1.0; // | + let resolution = view.viewport.zw; // | + uv.x *= resolution.x / resolution.y; // normalising uvs. - pbr.frag_coord = frag_coord; - pbr.world_normal = N; - pbr.world_position = world_position; - pbr.N = N; - pbr.V = V; - pbr.is_orthographic = is_orthographic; + let t = globals.time * SPEED; - return pbr; -} + uv *= rotate2D(t); // Play with the time to adjust the speed at which the arrows rotate, or commen out to prevent spin entirely. + // what happens is you put the negative -QUARTER_PI in here? -#ifdef PREPASS_PIPELINE -fn deferred_output(in: VertexOutput, pbr_input: PbrInput) -> FragmentOutput { - var out: FragmentOutput; + return cyber_anim_arror_x(uv, t); +} + +fn cyber_anim_arror_x(uv: vec2f, t: f32) -> vec4f { + var out = vec3f(0.0); - // gbuffer - out.deferred = deferred_gbuffer_from_pbr_input(pbr_input); - // lighting pass id (used to determine which lighting shader to run for the fragment) - out.deferred_lighting_pass_id = pbr_input.material.deferred_lighting_pass_id; - // normal if required -#ifdef NORMAL_PREPASS - out.normal = vec4(in.world_normal * 0.5 + vec3(0.5), 1.0); -#endif - // motion vectors if required -#ifdef MOTION_VECTOR_PREPASS -#ifdef MESHLET_MESH_MATERIAL_PASS - out.motion_vector = in.motion_vector; -#else - out.motion_vector = calculate_motion_vector(in.world_position, in.previous_world_position); -#endif -#endif + for (var i: f32 = 0.0; i < NUM_ARROWS; i += 1.0) { + // HOMEWORK IDEA 1: + // there's several colour pallete creators in this codebase, kishimisu, shaderToyDefault etc (grep for them), + // maybe you can add your own colour and multiply the sdf_arrow by that? + out += draw_arrow(uv, i) * vec3f(COLOUR_TEMP, abs(cos(t)), abs(sin(t) * cos(t))); - return out; + // HOMEWORK IDEA 2: + // the dradraw_arrow() function is really just an sdf shape, maybe you can swap it out with some others https://gist.github.com/munrocket/30e645d584b5300ee69295e54674b3e4#bobbly-cross---exact + } + + return vec4f(out, 1.0); } -#endif -``` +/// Draws an sdf_arrow, by manipulating a square +fn draw_arrow(uv: vec2f, offset: f32) -> f32 { + var uv = uv; + var sign_x = sign(uv.x); -### crates/bevy_pbr/src/deferred/pbr_deferred_types + uv.y = abs(uv.y); + uv.x += sign_x * (uv.y - fract(globals.time) + offset); // Comment this out and you get a square. -```rust -#define_import_path bevy_pbr::pbr_deferred_types + var a = QUARTER_PI;// There are more constants in the common.wgsl -- try some others! + uv *= rotate2D(a); // rotating our uvs by angle 'a', naming your 'angles' a1, a2, a3 etc seems to be very common. -#import bevy_pbr::{ - mesh_types::MESH_FLAGS_SHADOW_RECEIVER_BIT, - pbr_types::{STANDARD_MATERIAL_FLAGS_FOG_ENABLED_BIT, STANDARD_MATERIAL_FLAGS_UNLIT_BIT}, + var t1 = smoothstep(0.3, 0.29, abs(uv.x) + abs(uv.y)); + var t2 = smoothstep(0.29, 0.28, abs(uv.x) + abs(uv.y)); + var t = step(0.1, t1 - t2); + + return t; } -// Maximum of 8 bits available -const DEFERRED_FLAGS_UNLIT_BIT: u32 = 1u; -const DEFERRED_FLAGS_FOG_ENABLED_BIT: u32 = 2u; -const DEFERRED_MESH_FLAGS_SHADOW_RECEIVER_BIT: u32 = 4u; -fn deferred_flags_from_mesh_material_flags(mesh_flags: u32, mat_flags: u32) -> u32 { - var flags = 0u; - flags |= u32((mesh_flags & MESH_FLAGS_SHADOW_RECEIVER_BIT) != 0u) * DEFERRED_MESH_FLAGS_SHADOW_RECEIVER_BIT; - flags |= u32((mat_flags & STANDARD_MATERIAL_FLAGS_FOG_ENABLED_BIT) != 0u) * DEFERRED_FLAGS_FOG_ENABLED_BIT; - flags |= u32((mat_flags & STANDARD_MATERIAL_FLAGS_UNLIT_BIT) != 0u) * DEFERRED_FLAGS_UNLIT_BIT; - return flags; -} +``` -fn mesh_material_flags_from_deferred_flags(deferred_flags: u32) -> vec2 { - var mat_flags = 0u; - var mesh_flags = 0u; - mesh_flags |= u32((deferred_flags & DEFERRED_MESH_FLAGS_SHADOW_RECEIVER_BIT) != 0u) * MESH_FLAGS_SHADOW_RECEIVER_BIT; - mat_flags |= u32((deferred_flags & DEFERRED_FLAGS_FOG_ENABLED_BIT) != 0u) * STANDARD_MATERIAL_FLAGS_FOG_ENABLED_BIT; - mat_flags |= u32((deferred_flags & DEFERRED_FLAGS_UNLIT_BIT) != 0u) * STANDARD_MATERIAL_FLAGS_UNLIT_BIT; - return vec2(mesh_flags, mat_flags); -} +### screenshots/14-10-23/12-28-55/screenshot -const U12MAXF = 4095.0; -const U16MAXF = 65535.0; -const U20MAXF = 1048575.0; +```rust +/// +/// This is a port of CyberAnimArrowX by float1987 +/// Source: https://www.shadertoy.com/view/DsjfDt +/// Authour: https://www.shadertoy.com/user/float1987 +/// +#import bevy_pbr::mesh_vertex_output MeshVertexOutput +#import bevy_sprite::mesh2d_view_bindings globals +#import shadplay::shader_utils::common rotate2D, QUARTER_PI + +#import bevy_render::view View +@group(0) @binding(0) var view: View; -// Storing normals as oct24. -// Flags are stored in the remaining 8 bits. -// https://jcgt.org/published/0003/02/01/paper.pdf -// Could possibly go down to oct20 if the space is needed. +const SPEED:f32 = 0.2; //Global Speed multiplier +const NUM_ARROWS = 3.0; // Number of arrows spawned (see the for-loop below) +const COLOUR_TEMP = 0.02; // The 'intensity' of the red channel in the arrows. -fn pack_24bit_normal_and_flags(octahedral_normal: vec2, flags: u32) -> u32 { - let unorm1 = u32(saturate(octahedral_normal.x) * U12MAXF + 0.5); - let unorm2 = u32(saturate(octahedral_normal.y) * U12MAXF + 0.5); - return (unorm1 & 0xFFFu) | ((unorm2 & 0xFFFu) << 12u) | ((flags & 0xFFu) << 24u); -} +@fragment +fn fragment(in: MeshVertexOutput) -> @location(0) vec4 { + var uv = (in.uv * 2.0) - 1.0; // | + let resolution = view.viewport.zw; // | + uv.x *= resolution.x / resolution.y; // normalising uvs. -fn unpack_24bit_normal(packed: u32) -> vec2 { - let unorm1 = packed & 0xFFFu; - let unorm2 = (packed >> 12u) & 0xFFFu; - return vec2(f32(unorm1) / U12MAXF, f32(unorm2) / U12MAXF); -} + let t = globals.time * SPEED; -fn unpack_flags(packed: u32) -> u32 { - return (packed >> 24u) & 0xFFu; -} + uv *= rotate2D(t); // Play with the time to adjust the speed at which the arrows rotate, or commen out to prevent spin entirely. + // what happens is you put the negative -QUARTER_PI in here? -// The builtin one didn't work in webgl. -// "'unpackUnorm4x8' : no matching overloaded function found" -// https://github.com/gfx-rs/naga/issues/2006 -fn unpack_unorm4x8_(v: u32) -> vec4 { - return vec4( - f32(v & 0xFFu), - f32((v >> 8u) & 0xFFu), - f32((v >> 16u) & 0xFFu), - f32((v >> 24u) & 0xFFu) - ) / 255.0; -} + return cyber_anim_arror_x(uv, t); +} + +fn cyber_anim_arror_x(uv: vec2f, t: f32) -> vec4f { + var out = vec3f(0.0); -// 'packUnorm4x8' : no matching overloaded function found -// https://github.com/gfx-rs/naga/issues/2006 -fn pack_unorm4x8_(values: vec4) -> u32 { - let v = vec4(saturate(values) * 255.0 + 0.5); - return (v.w << 24u) | (v.z << 16u) | (v.y << 8u) | v.x; -} + for (var i: f32 = 0.0; i < NUM_ARROWS; i += 1.0) { + // HOMEWORK IDEA 1: + // there's several colour pallete creators in this codebase, kishimisu, shaderToyDefault etc (grep for them), + // maybe you can add your own colour and multiply the sdf_arrow by that? + out += draw_arrow(uv, i) * vec3f(COLOUR_TEMP, abs(cos(t)), abs(sin(t) * cos(t))); -// Pack 3x 4bit unorm + 1x 20bit -fn pack_unorm3x4_plus_unorm_20_(v: vec4) -> u32 { - let sm = vec3(saturate(v.xyz) * 15.0 + 0.5); - let bg = u32(saturate(v.w) * U20MAXF + 0.5); - return (bg << 12u) | (sm.z << 8u) | (sm.y << 4u) | sm.x; + // HOMEWORK IDEA 2: + // the dradraw_arrow() function is really just an sdf shape, maybe you can swap it out with some others https://gist.github.com/munrocket/30e645d584b5300ee69295e54674b3e4#bobbly-cross---exact + } + + return vec4f(out, 1.0); } -// Unpack 3x 4bit unorm + 1x 20bit -fn unpack_unorm3x4_plus_unorm_20_(v: u32) -> vec4 { - return vec4( - f32(v & 0xfu) / 15.0, - f32((v >> 4u) & 0xFu) / 15.0, - f32((v >> 8u) & 0xFu) / 15.0, - f32((v >> 12u) & 0xFFFFFFu) / U20MAXF, - ); +/// Draws an sdf_arrow, by manipulating a square +fn draw_arrow(uv: vec2f, offset: f32) -> f32 { + var uv = uv; + var sign_x = sign(uv.x); + + uv.y = abs(uv.y); + uv.x += sign_x * (uv.y - fract(globals.time) + offset); // Comment this out and you get a square. + + var a = QUARTER_PI;// There are more constants in the common.wgsl -- try some others! + uv *= rotate2D(a); // rotating our uvs by angle 'a', naming your 'angles' a1, a2, a3 etc seems to be very common. + + var t1 = smoothstep(0.3, 0.29, abs(uv.x) + abs(uv.y)); + var t2 = smoothstep(0.29, 0.28, abs(uv.x) + abs(uv.y)); + var t = step(0.1, t1 - t2); + + return t; } + ``` -### assets/shaders/line_material +### screenshots/27-01-24/21-40-42/screenshot ```rust -#import bevy_pbr::forward_io::VertexOutput +/// ***************************** /// +/// THIS IS THE DEFAULT 2D SHADER /// +/// You can always get back to this with `python3 scripts/reset-2d.py` /// +/// ***************************** /// -struct LineMaterial { - color: vec4, -}; +#import bevy_sprite::mesh2d_view_bindings::globals +#import shadplay::shader_utils::common::{NEG_HALF_PI, shader_toy_default, rotate2D, TWO_PI} +#import bevy_render::view::View +#import bevy_pbr::forward_io::VertexOutput; -@group(2) @binding(0) var material: LineMaterial; +@group(0) @binding(0) var view: View; + +const SPEED:f32 = 1.0; @fragment -fn fragment( - mesh: VertexOutput, -) -> @location(0) vec4 { - return material.color; -} +fn fragment(in: VertexOutput) -> @location(0) vec4 { + // ensure our uv coords match shadertoy/the-lil-book-of-shaders + var uv = (in.uv * 2.0) - 1.0; + let resolution = view.viewport.zw; + let t = globals.time * SPEED; + uv.x *= resolution.x / resolution.y; + uv *= rotate2D(NEG_HALF_PI); + + return vec4f(shader_toy_default(t, uv), 1.0); +} + ``` -### assets/shaders/instancing +### screenshots/17-11-23/06-01-11/screenshot ```rust -#import bevy_pbr::mesh_functions::{get_model_matrix, mesh_position_local_to_clip} - -struct Vertex { - @location(0) position: vec3, - @location(1) normal: vec3, - @location(2) uv: vec2, +/// ***************************** /// +/// THIS IS THE DEFAULT 2D SHADER /// +/// You can always get back to this with `python3 scripts/reset-2d.py` /// +/// ***************************** /// - @location(3) i_pos_scale: vec4, - @location(4) i_color: vec4, -}; +#import bevy_sprite::mesh2d_view_bindings::globals +#import shadplay::shader_utils::common::{NEG_HALF_PI, shader_toy_default, rotate2D, TWO_PI} +#import bevy_render::view::View +#import bevy_pbr::forward_io::VertexOutput; -struct VertexOutput { - @builtin(position) clip_position: vec4, - @location(0) color: vec4, -}; +@group(0) @binding(0) var view: View; -@vertex -fn vertex(vertex: Vertex) -> VertexOutput { - let position = vertex.position * vertex.i_pos_scale.w + vertex.i_pos_scale.xyz; - var out: VertexOutput; - // NOTE: Passing 0 as the instance_index to get_model_matrix() is a hack - // for this example as the instance_index builtin would map to the wrong - // index in the Mesh array. This index could be passed in via another - // uniform instead but it's unnecessary for the example. - out.clip_position = mesh_position_local_to_clip( - get_model_matrix(0u), - vec4(position, 1.0) - ); - out.color = vertex.i_color; - return out; -} +const SPEED:f32 = 1.0; @fragment fn fragment(in: VertexOutput) -> @location(0) vec4 { - return in.color; -} + // ensure our uv coords match shadertoy/the-lil-book-of-shaders + var uv = (in.uv * 2.0) - 1.0; + let resolution = view.viewport.zw; + let t = globals.time * SPEED; + uv.x *= resolution.x / resolution.y; + uv *= rotate2D(NEG_HALF_PI); + + return vec4f(shader_toy_default(t, uv), 1.0); +} + ``` -### assets/shaders/gpu_readback +### screenshots/17-11-23/16-49-11/screenshot ```rust -// This shader is used for the gpu_readback example -// The actual work it does is not important for the example +//! +//! The default 3d Shader. +//! +#import bevy_pbr::forward_io::VertexOutput +#import bevy_pbr::utils PI +#import bevy_sprite::mesh2d_view_bindings globals +#import shadplay::shader_utils::common NEG_HALF_PI, shader_toy_default, rotate2D -// This is the data that lives in the gpu only buffer -@group(0) @binding(0) var data: array; +#import bevy_render::view View +@group(0) @binding(0) var view: View; + +@group(1) @binding(1) var texture: texture_2d; +@group(1) @binding(2) var texture_sampler: sampler; + +const SPEED:f32 = 1.0; + +@fragment +fn fragment(in: VertexOutput) -> @location(0) vec4 { + // ensure our uv coords match shadertoy/the-lil-book-of-shaders + let texture_uvs = in.uv; + + let tex: vec4f = textureSample(texture, texture_sampler, texture_uvs); + if texture_uvs.x > 0.2 { + return vec4f(tex.xyz, 0.003); + + }else{ + return vec4f(0.0); + } -@compute @workgroup_size(1) -fn main(@builtin(global_invocation_id) global_id: vec3) { - // We use the global_id to index the array to make sure we don't - // access data used in another workgroup - data[global_id.x] += 1u; } + +fn circle(p: vec2, r: f32) -> f32 { + return smoothstep(0.1, 0., abs(length(p) - r)); +} + ``` -### assets/shaders/shader_defs +### screenshots/22-10-23/19-39-14/screenshot ```rust -#import bevy_pbr::forward_io::VertexOutput +/// ***************************** /// +/// THIS IS THE DEFAULT 2D SHADER /// +/// You can always get back to this with `python3 scripts/reset-2d.py` /// +/// ***************************** /// -struct CustomMaterial { - color: vec4, -}; +#import bevy_pbr::mesh_vertex_output MeshVertexOutput +#import bevy_sprite::mesh2d_view_bindings globals +#import shadplay::shader_utils::common NEG_HALF_PI, rotate2D -@group(2) @binding(0) var material: CustomMaterial; +#import bevy_render::view View +@group(0) @binding(0) var view: View; + +const SPEED:f32 = 3.0; +const NUM_ITERATIONS: f32 = 24.0; +const LINE_THICKNESS:f32 = 0.28; @fragment -fn fragment( - mesh: VertexOutput, -) -> @location(0) vec4 { -#ifdef IS_RED - return vec4(1.0, 0.0, 0.0, 1.0); -#else - return material.color; -#endif +fn fragment(in: MeshVertexOutput) -> @location(0) vec4 { + // ensure our uv coords match shadertoy/the-lil-book-of-shaders + var uv = (in.uv * 2.0) - 1.0; + let resolution = view.viewport.zw; + let time = globals.time * SPEED; + uv.x *= resolution.x / resolution.y; + uv *= rotate2D(NEG_HALF_PI); + + let col = semi_circle_wave(uv, resolution, time); + return col; +} + + +// @param in - The input mesh vertex output +// @return The computed color for the fragment +fn semi_circle_wave(uv: vec2f, resolution: vec2f, time: f32) -> vec4 { + var angle: f32 = atan2(uv.x, uv.y) + 1.57; + let length_normalized: f32 = length(uv) * NUM_ITERATIONS; + let rounded_length: f32 = min(ceil(length_normalized), NUM_ITERATIONS); + + angle *= rounded_length; + angle = clamp(angle, 0.1, rounded_length * 0.57 * (sin(time + rounded_length / NUM_ITERATIONS * 4.7))); + + var color_intensity: f32 = (LINE_THICKNESS - length(vec2(angle - angle, length_normalized - rounded_length + 0.5))) * resolution.y / 100.0; + color_intensity = min(color_intensity, 0.9); + + let color: vec4 = sqrt(cos(length_normalized / NUM_ITERATIONS * 5.0 + vec4(2.0, 4.0, 6.0, 0.0)) + 1.0) * vec4(color_intensity); + + return color; } ``` -### assets/shaders/custom_material_import +### screenshots/21-01-24/18-50-41/screenshot ```rust -// this is made available to the importing module -const COLOR_MULTIPLIER: vec4 = vec4(1.0, 1.0, 1.0, 0.5); +/// ***************************** /// +/// THIS IS THE DEFAULT 2D SHADER /// +/// You can always get back to this with `python3 scripts/reset-2d.py` /// +/// ***************************** /// + +#import bevy_sprite::mesh2d_view_bindings::globals +#import shadplay::shader_utils::common::{NEG_HALF_PI, shader_toy_default, rotate2D, TWO_PI} +#import bevy_render::view::View +#import bevy_pbr::forward_io::VertexOutput; + +@group(0) @binding(0) var view: View; + +const SPEED:f32 = 1.0; + +@fragment +fn fragment(in: VertexOutput) -> @location(0) vec4 { + // ensure our uv coords match shadertoy/the-lil-book-of-shaders + var uv = (in.uv * 2.0) - 1.0; + let resolution = view.viewport.zw; + let t = globals.time * SPEED; + uv.x *= resolution.x / resolution.y; + uv *= rotate2D(NEG_HALF_PI); + + return vec4f(shader_toy_default(t, uv), 1.0); +} + ``` -### assets/shaders/extended_material +### screenshots/02-11-23/07-48-05/screenshot ```rust -#import bevy_pbr::{ - pbr_fragment::pbr_input_from_standard_material, - pbr_functions::alpha_discard, -} +/// ***************************** /// +/// THIS IS THE DEFAULT 2D SHADER /// +/// You can always get back to this with `python3 scripts/reset-2d.py` /// +/// ***************************** /// -#ifdef PREPASS_PIPELINE -#import bevy_pbr::{ - prepass_io::{VertexOutput, FragmentOutput}, - pbr_deferred_functions::deferred_output, -} -#else -#import bevy_pbr::{ - forward_io::{VertexOutput, FragmentOutput}, - pbr_functions::{apply_pbr_lighting, main_pass_post_lighting_processing}, -} -#endif +#import bevy_pbr::mesh_vertex_output MeshVertexOutput +#import bevy_sprite::mesh2d_view_bindings globals +#import shadplay::shader_utils::common NEG_HALF_PI, shader_toy_default, rotate2D -struct MyExtendedMaterial { - quantize_steps: u32, -} +#import bevy_render::view View +@group(0) @binding(0) var view: View; -@group(2) @binding(100) -var my_extended_material: MyExtendedMaterial; +const SPEED:f32 = 1.0; @fragment -fn fragment( - in: VertexOutput, - @builtin(front_facing) is_front: bool, -) -> FragmentOutput { - // generate a PbrInput struct from the StandardMaterial bindings - var pbr_input = pbr_input_from_standard_material(in, is_front); +fn fragment(in: MeshVertexOutput) -> @location(0) vec4 { + // ensure our uv coords match shadertoy/the-lil-book-of-shaders + var uv = (in.uv * 2.0) - 1.0; + let resolution = view.viewport.zw; + let t = globals.time * SPEED; + uv.x *= resolution.x / resolution.y; + uv *= rotate2D(NEG_HALF_PI); - // we can optionally modify the input before lighting and alpha_discard is applied - pbr_input.material.base_color.b = pbr_input.material.base_color.r; + return vec4f(shader_toy_default(t, uv), 1.0); +} + - // alpha discard - pbr_input.material.base_color = alpha_discard(pbr_input.material, pbr_input.material.base_color); +``` -#ifdef PREPASS_PIPELINE - // in deferred mode we can't modify anything after that, as lighting is run in a separate fullscreen shader. - let out = deferred_output(in, pbr_input); -#else - var out: FragmentOutput; - // apply lighting - out.color = apply_pbr_lighting(pbr_input); +### screenshots/16-11-23/21-23-45/screenshot - // we can optionally modify the lit color before post-processing is applied - out.color = vec4(vec4(out.color * f32(my_extended_material.quantize_steps))) / f32(my_extended_material.quantize_steps); +```rust +/// ***************************** /// +/// This is a port of 'Perlin Waves' by zilian: https://www.shadertoy.com/view/DlVcRW /// +/// ***************************** /// - // apply in-shader post processing (fog, alpha-premultiply, and also tonemapping, debanding if the camera is non-hdr) - // note this does not include fullscreen postprocessing effects like bloom. - out.color = main_pass_post_lighting_processing(pbr_input, out.color); +#import bevy_sprite::mesh2d_view_bindings::globals +#import shadplay::shader_utils::common::{NEG_HALF_PI, shader_toy_default, rotate2D, TWO_PI} +#import bevy_render::view::View +#import bevy_pbr::forward_io::VertexOutput; - // we can optionally modify the final result here - out.color = out.color * 2.0; -#endif +@group(0) @binding(0) var view: View; + + +const TEMPERATURE: f32 = 5.; +const NOISESCALE: f32 = 0.2; +const EFFECTWIDTH: f32 = 1.; +const LINETHICKNESS: f32 = 0.008; +const SPEED: f32 = 0.2; + + +/// This is a port of 'Perlin Waves' by zilian: https://www.shadertoy.com/view/DlVcRW /// +@fragment +fn fragment(in: VertexOutput) -> @location(0) vec4 { + let resolution = view.viewport.zw; + var uv = in.uv; + uv.y *= 0.5; // Bumping the Y down a bit. + + let y_inverted_location = vec2(i32(uv.x), i32(resolution.y) - i32(uv.y)); + let location = vec2(i32(uv.x), i32(uv.y)); + + var fragColor: vec4; + var fragCoord = vec2(f32(location.x), f32(location.y) ); + + var sampleY: f32 = 0.; + sampleY = sampleY + (globals.time * SPEED); + var finalColor: vec3 = vec3(0.); + let deltaY: f32 = 0.003; + + for (var i: f32 = -10.; i <= 10.; i = i + (1.)) { + let p: vec2 = uv + vec2(0.06 * i, 0.05 * i); + sampleY = sampleY + (i * deltaY); + if (p.x < -EFFECTWIDTH || p.x > EFFECTWIDTH) { + continue; + } + let line: f32 = perline(p, sampleY, LINETHICKNESS, NOISESCALE); + let opacity: f32 = exp(-abs(i * 0.2)); + let col: vec3 = palette(i * 0.04 + 0.3) * 2. * line * opacity; + finalColor = max(finalColor, col); + } + + return vec4f(finalColor, 1.0); +} + + +fn fade(t: vec2) -> vec2 { + return t * t * t * (t * (t * 6. - 15.) + 10.); +} + +fn permute(x: vec4) -> vec4 { + return (((x * 34. + 1.) * x) % (289.)); +} + +fn cnoise(P: vec2) -> f32 { + var Pi: vec4 = floor(P.xyxy) + vec4(0., 0., 1., 1.); + let Pf: vec4 = fract(P.xyxy) - vec4(0., 0., 1., 1.); + Pi = ((Pi) % (289.)); + let ix: vec4 = Pi.xzxz; + let iy: vec4 = Pi.yyww; + let fx: vec4 = Pf.xzxz; + let fy: vec4 = Pf.yyww; + var i: vec4 = permute(permute(ix) + iy); + var gx: vec4 = 2. * fract(i * 0.024390243) - 1.; + let gy: vec4 = abs(gx) - 0.5; + let tx: vec4 = floor(gx + 0.5); + gx = gx - tx; + var g00: vec2 = vec2(gx.x, gy.x); + var g10: vec2 = vec2(gx.y, gy.y); + var g01: vec2 = vec2(gx.z, gy.z); + var g11: vec2 = vec2(gx.w, gy.w); + let norm: vec4 = 1.7928429 - 0.85373473 * vec4(dot(g00, g00), dot(g01, g01), dot(g10, g10), dot(g11, g11)); + g00 = g00 * (norm.x); + g01 = g01 * (norm.y); + g10 = g10 * (norm.z); + g11 = g11 * (norm.w); + let n00: f32 = dot(g00, vec2(fx.x, fy.x)); + let n10: f32 = dot(g10, vec2(fx.y, fy.y)); + let n01: f32 = dot(g01, vec2(fx.z, fy.z)); + let n11: f32 = dot(g11, vec2(fx.w, fy.w)); + let fade_xy: vec2 = fade(Pf.xy); + let n_x: vec2 = mix(vec2(n00, n01), vec2(n10, n11), fade_xy.x); + let n_xy: f32 = mix(n_x.x, n_x.y, fade_xy.y); + return 2.3 * n_xy; +} + +fn perline(p: vec2, noiseY: f32, lineThickness: f32, noiseScale: f32) -> f32 { + let x: f32 = p.x / 2.; + let s: f32 = cnoise(vec2(x, noiseY) * TEMPERATURE) * noiseScale; + let distanceToLine: f32 = abs(p.y - s); + return 0.009 / distanceToLine; +} + +/// Regular shadplayers will recognise this one... +fn palette(t: f32) -> vec3 { + let a: vec3 = vec3(0.5, 0.5, 0.5); + let b: vec3 = vec3(0.5, 0.5, 0.5); + let c: vec3 = vec3(1., 1., 1.); + let d: vec3 = vec3(0.263, 0.416, 0.557); + return a + b * cos(6.28318 * (c * t + d)); +} - return out; -} ``` -### assets/shaders/custom_gltf_2d +### screenshots/16-11-23/21-25-04/screenshot ```rust -#import bevy_sprite::{ - mesh2d_view_bindings::globals, - mesh2d_functions::{get_model_matrix, mesh2d_position_local_to_clip}, -} +/// ***************************** /// +/// This is a port of 'Perlin Waves' by zilian: https://www.shadertoy.com/view/DlVcRW /// +/// ***************************** /// -struct Vertex { - @builtin(instance_index) instance_index: u32, - @location(0) position: vec3, - @location(1) color: vec4, - @location(2) barycentric: vec3, -}; +#import bevy_sprite::mesh2d_view_bindings::globals +#import shadplay::shader_utils::common::{NEG_HALF_PI, shader_toy_default, rotate2D, TWO_PI} +#import bevy_render::view::View +#import bevy_pbr::forward_io::VertexOutput; -struct VertexOutput { - @builtin(position) clip_position: vec4, - @location(0) color: vec4, - @location(1) barycentric: vec3, -}; +@group(0) @binding(0) var view: View; -@vertex -fn vertex(vertex: Vertex) -> VertexOutput { - var out: VertexOutput; - let model = get_model_matrix(vertex.instance_index); - out.clip_position = mesh2d_position_local_to_clip(model, vec4(vertex.position, 1.0)); - out.color = vertex.color; - out.barycentric = vertex.barycentric; - return out; -} -struct FragmentInput { - @location(0) color: vec4, - @location(1) barycentric: vec3, -}; +const TEMPERATURE: f32 = 5.; +const NOISESCALE: f32 = 0.2; +const EFFECTWIDTH: f32 = 1.; +const LINETHICKNESS: f32 = 0.008; +const SPEED: f32 = 0.2; + +/// This is a port of 'Perlin Waves' by zilian: https://www.shadertoy.com/view/DlVcRW /// @fragment -fn fragment(input: FragmentInput) -> @location(0) vec4 { - let d = min(input.barycentric.x, min(input.barycentric.y, input.barycentric.z)); - let t = 0.05 * (0.85 + sin(5.0 * globals.time)); - return mix(vec4(1.0,1.0,1.0,1.0), input.color, smoothstep(t, t+0.01, d)); -} +fn fragment(in: VertexOutput) -> @location(0) vec4 { + let resolution = view.viewport.zw; + var uv = in.uv; + uv.y *= 0.5; // Bumping the Y down a bit. + + let y_inverted_location = vec2(i32(uv.x), i32(resolution.y) - i32(uv.y)); + let location = vec2(i32(uv.x), i32(uv.y)); + + var fragColor: vec4; + var fragCoord = vec2(f32(location.x), f32(location.y) ); + + var sampleY: f32 = 0.; + sampleY = sampleY + (globals.time * SPEED); + var finalColor: vec3 = vec3(0.); + let deltaY: f32 = 0.003; + + for (var i: f32 = -10.; i <= 10.; i = i + (1.)) { + let p: vec2 = uv + vec2(0.06 * i, 0.05 * i); + sampleY = sampleY + (i * deltaY); + if (p.x < -EFFECTWIDTH || p.x > EFFECTWIDTH) { + continue; + } + let line: f32 = perline(p, sampleY, LINETHICKNESS, NOISESCALE); + let opacity: f32 = exp(-abs(i * 0.2)); + let col: vec3 = palette(i * 0.04 + 0.3) * 2. * line * opacity; + finalColor = max(finalColor, col); + } + + return vec4f(finalColor, 1.0); +} + + +fn fade(t: vec2) -> vec2 { + return t * t * t * (t * (t * 6. - 15.) + 10.); +} + +fn permute(x: vec4) -> vec4 { + return (((x * 34. + 1.) * x) % (289.)); +} + +fn cnoise(P: vec2) -> f32 { + var Pi: vec4 = floor(P.xyxy) + vec4(0., 0., 1., 1.); + let Pf: vec4 = fract(P.xyxy) - vec4(0., 0., 1., 1.); + Pi = ((Pi) % (289.)); + let ix: vec4 = Pi.xzxz; + let iy: vec4 = Pi.yyww; + let fx: vec4 = Pf.xzxz; + let fy: vec4 = Pf.yyww; + var i: vec4 = permute(permute(ix) + iy); + var gx: vec4 = 2. * fract(i * 0.024390243) - 1.; + let gy: vec4 = abs(gx) - 0.5; + let tx: vec4 = floor(gx + 0.5); + gx = gx - tx; + var g00: vec2 = vec2(gx.x, gy.x); + var g10: vec2 = vec2(gx.y, gy.y); + var g01: vec2 = vec2(gx.z, gy.z); + var g11: vec2 = vec2(gx.w, gy.w); + let norm: vec4 = 1.7928429 - 0.85373473 * vec4(dot(g00, g00), dot(g01, g01), dot(g10, g10), dot(g11, g11)); + g00 = g00 * (norm.x); + g01 = g01 * (norm.y); + g10 = g10 * (norm.z); + g11 = g11 * (norm.w); + let n00: f32 = dot(g00, vec2(fx.x, fy.x)); + let n10: f32 = dot(g10, vec2(fx.y, fy.y)); + let n01: f32 = dot(g01, vec2(fx.z, fy.z)); + let n11: f32 = dot(g11, vec2(fx.w, fy.w)); + let fade_xy: vec2 = fade(Pf.xy); + let n_x: vec2 = mix(vec2(n00, n01), vec2(n10, n11), fade_xy.x); + let n_xy: f32 = mix(n_x.x, n_x.y, fade_xy.y); + return 2.3 * n_xy; +} + +fn perline(p: vec2, noiseY: f32, lineThickness: f32, noiseScale: f32) -> f32 { + let x: f32 = p.x / 2.; + let s: f32 = cnoise(vec2(x, noiseY) * TEMPERATURE) * noiseScale; + let distanceToLine: f32 = abs(p.y - s); + return 0.009 / distanceToLine; +} + +/// Regular shadplayers will recognise this one... +fn palette(t: f32) -> vec3 { + let a: vec3 = vec3(0.5, 0.5, 0.5); + let b: vec3 = vec3(0.5, 0.5, 0.5); + let c: vec3 = vec3(1., 1., 1.); + let d: vec3 = vec3(0.263, 0.416, 0.557); + return a + b * cos(6.28318 * (c * t + d)); +} + ``` -### assets/shaders/custom_vertex_attribute +### screenshots/09-06-24/20-43-57/screenshot ```rust -#import bevy_pbr::mesh_functions::{get_model_matrix, mesh_position_local_to_clip} +/// ***************************** /// +/// THIS IS THE DEFAULT 2D SHADER /// +/// You can always get back to this with `python3 scripts/reset-2d.py` /// +/// ***************************** /// -struct CustomMaterial { - color: vec4, -}; -@group(2) @binding(0) var material: CustomMaterial; - -struct Vertex { - @builtin(instance_index) instance_index: u32, - @location(0) position: vec3, - @location(1) blend_color: vec4, -}; +#import bevy_sprite::mesh2d_view_bindings::globals +#import shadplay::shader_utils::common::{NEG_HALF_PI, shader_toy_default, rotate2D, TWO_PI} +#import bevy_render::view::View +#import bevy_pbr::forward_io::VertexOutput; -struct VertexOutput { - @builtin(position) clip_position: vec4, - @location(0) blend_color: vec4, -}; +@group(0) @binding(0) var view: View; -@vertex -fn vertex(vertex: Vertex) -> VertexOutput { - var out: VertexOutput; - out.clip_position = mesh_position_local_to_clip( - get_model_matrix(vertex.instance_index), - vec4(vertex.position, 1.0), - ); - out.blend_color = vertex.blend_color; - return out; -} +const SPEED:f32 = 1.0; -struct FragmentInput { - @location(0) blend_color: vec4, -}; +@fragment +fn fragment(in: VertexOutput) -> @location(0) vec4 { + // ensure our uv coords match shadertoy/the-lil-book-of-shaders + var uv = (in.uv * 2.0) - 1.0; + let resolution = view.viewport.zw; + let t = globals.time * SPEED; + uv.x *= resolution.x / resolution.y; + uv *= rotate2D(NEG_HALF_PI); -@fragment -fn fragment(input: FragmentInput) -> @location(0) vec4 { - return material.color * input.blend_color; -} + return vec4f(shader_toy_default(t, uv), 1.0); +} + ``` -### assets/shaders/texture_binding_array +### screenshots/09-10-23/07-22-16/screeenshot ```rust -#import bevy_pbr::forward_io::VertexOutput +#import bevy_pbr::mesh_vertex_output MeshVertexOutput +#import bevy_sprite::mesh2d_view_bindings globals +#import bevy_pbr::utils PI +#import shadplay::common TAU -@group(2) @binding(0) var textures: binding_array>; -@group(2) @binding(1) var nearest_sampler: sampler; -// We can also have array of samplers -// var samplers: binding_array; +#import bevy_render::view View +@group(0) @binding(0) var view: View; + +const HEIGHT:f32 = 4.128; +const SPEED:f32 = 1.80; +const CAM_DISTANCE: f32 = -2.; +const SIZE: f32 = 1.2; @fragment -fn fragment( - mesh: VertexOutput, -) -> @location(0) vec4 { - // Select the texture to sample from using non-uniform uv coordinates - let coords = clamp(vec2(mesh.uv * 4.0), vec2(0u), vec2(3u)); - let index = coords.y * 4u + coords.x; - let inner_uv = fract(mesh.uv * 4.0); - return textureSample(textures[index], nearest_sampler, inner_uv); +fn fragment(in: MeshVertexOutput) -> @location(0) vec4 { + // ensure our uv coords match shadertoy/the-lil-book-of-shaders + var uv = (in.uv * 2.0) - 1.0; + let resolution = view.viewport.zw; + let t = globals.time * SPEED; + uv.x *= resolution.x / resolution.y; + uv *= rotate2D(PI / -2.0); + + // Create some colour, do nothing with it. + var col = vec4f(0.0); + var base_colour = shader_toy_default(t, uv); + + // sdf for a 2D circle + uv /= SIZE; // Make uvs bigger + let d = -1.0 * sd_circle(uv, 0.3); // -1 to flip it so we're drawing the circle in colour, not the space around it. + + base_colour *= smoothstep(0.02, 0.09, d); // use the smoothstep to colour BY the circle's sdf. + col = vec4f(base_colour, d); // use the circle's sdf to, in the same way it supplies values to the smoothstep above, also be the alpha values -- so our 'background' is transparent. + + return col; } -``` +fn sd_circle(pt: vec2f, radius: f32)->f32{ + return length(pt) - radius; +} -### assets/shaders/circle_shader +/// This is the default (and rather pretty) shader you start with in ShaderToy +fn shader_toy_default(t: f32, uv: vec2f)-> vec3f{ + var col = vec3f(0.0); + let v = vec3(t) + vec3(uv.xyx) + vec3(0., 2., 4.); + return 0.5 + 0.5 * cos(v); +} -```rust -// This shader draws a circle with a given input color -#import bevy_ui::ui_vertex_output::UiVertexOutput +fn dist_line(ray_origin: vec3f, ray_dir: vec3f, pt: vec3f) -> f32 { + return length(cross(pt - ray_origin, ray_dir)) / length(ray_dir); +} -struct CustomUiMaterial { - @location(0) color: vec4 +fn sd_capsule(p: vec3f, a: vec3f, b: vec3f, r: f32) -> f32 { + let pa = p - a; + let ba = b - a; + let h = clamp(dot(pa, ba) / dot(ba, ba), 0., 1.); + return length(pa - ba * h) - r; +} +fn sd_sphere(pt: vec3f, radius: f32) -> f32 { + return length(pt) - radius; } -@group(1) @binding(0) -var input: CustomUiMaterial; -@fragment -fn fragment(in: UiVertexOutput) -> @location(0) vec4 { - // the UVs are now adjusted around the middle of the rect. - let uv = in.uv * 2.0 - 1.0; +/// Clockwise by `theta` +fn rotate2D(theta: f32) -> mat2x2 { + let c = cos(theta); + let s = sin(theta); + return mat2x2(c, s, -s, c); +} - // circle alpha, the higher the power the harsher the falloff. - let alpha = 1.0 - pow(sqrt(dot(uv, uv)), 100.0); +fn sd_capped_cylinder(p: vec3f, h: vec2f) -> f32 { + let d: vec2f = abs(vec2f(length(p.xz), p.y)) - h; + return min(max(d.x, d.y), 0.0) + length(max(d, vec2f(0.0))); +} - return vec4(input.color.rgb, alpha); +fn sd_torus(p: vec3f, t: vec2f) -> f32 { + let q: vec2f = vec2f(length(p.xz) - t.x, p.y); + return length(q) - t.y; } + ``` -### assets/shaders/tonemapping_test_patterns +### screenshots/07-10-23/11-16-48/screeenshot ```rust -#import bevy_pbr::{ - mesh_view_bindings, - forward_io::VertexOutput, - utils::PI, -} +#import bevy_pbr::mesh_vertex_output MeshVertexOutput +#import bevy_sprite::mesh2d_view_bindings globals +#import bevy_pbr::utils PI -#ifdef TONEMAP_IN_SHADER -#import bevy_core_pipeline::tonemapping::tone_mapping -#endif +#import bevy_render::view View -// Sweep across hues on y axis with value from 0.0 to +15EV across x axis -// quantized into 24 steps for both axis. -fn color_sweep(uv_input: vec2) -> vec3 { - var uv = uv_input; - let steps = 24.0; - uv.y = uv.y * (1.0 + 1.0 / steps); - let ratio = 2.0; - - let h = PI * 2.0 * floor(1.0 + steps * uv.y) / steps; - let L = floor(uv.x * steps * ratio) / (steps * ratio) - 0.5; +const HEIGHT:f32 = 4.128; +const INTENSITY:f32 = 5.0; +const NUM_LINES:f32 = 4.0; +const SPEED:f32 = 1.0; + +@fragment +// fn fragment(in: MeshVertexOutput) -> @location(0) vec4 { +fn fragment(in: FullscreenVertexOutput) -> @location(0) vec4 { + // ensure our uv coords match shadertoy/the-lil-book-of-shaders + var uv = (in.uv.xy * 2.0) - 1.0; + uv *= rotate2D(PI / -2.0); + + var col = vec4f(0.0); + + let pt = uv.xy; + let radius = 0.725; + + col.r += -1.0 * sd_circle(pt, radius); - var color = vec3(0.0); - if uv.y < 1.0 { - color = cos(h + vec3(0.0, 1.0, 2.0) * PI * 2.0 / 3.0); - let maxRGB = max(color.r, max(color.g, color.b)); - let minRGB = min(color.r, min(color.g, color.b)); - color = exp(15.0 * L) * (color - minRGB) / (maxRGB - minRGB); - } else { - color = vec3(exp(15.0 * L)); - } - return color; -} -fn hsv_to_srgb(c: vec3) -> vec3 { - let K = vec4(1.0, 2.0 / 3.0, 1.0 / 3.0, 3.0); - let p = abs(fract(c.xxx + K.xyz) * 6.0 - K.www); - return c.z * mix(K.xxx, clamp(p - K.xxx, vec3(0.0), vec3(1.0)), c.y); + + + col.a = 1.0; + return col; } -// Generates a continuous sRGB sweep. -fn continuous_hue(uv: vec2) -> vec3 { - return hsv_to_srgb(vec3(uv.x, 1.0, 1.0)) * max(0.0, exp2(uv.y * 9.0) - 1.0); +fn sd_circle(pt: vec2f, radius: f32) -> f32 { + return length(pt) - radius; } -@fragment -fn fragment( - in: VertexOutput, -) -> @location(0) vec4 { - var uv = in.uv; - var out = vec3(0.0); - if uv.y > 0.5 { - uv.y = 1.0 - uv.y; - out = color_sweep(vec2(uv.x, uv.y * 2.0)); - } else { - out = continuous_hue(vec2(uv.y * 2.0, uv.x)); - } - var color = vec4(out, 1.0); -#ifdef TONEMAP_IN_SHADER - color = tone_mapping(color, mesh_view_bindings::view.color_grading); -#endif - return color; + +/// Clockwise by `theta` +fn rotate2D(theta: f32) -> mat2x2 { + let c = cos(theta); + let s = sin(theta); + return mat2x2(c, s, -s, c); } ``` -### assets/shaders/custom_material_screenspace_texture +### screenshots/07-10-23/aspect_ratio_correction_showed_with_circle/screeenshot ```rust -#import bevy_pbr::{ - mesh_view_bindings::view, - forward_io::VertexOutput, - utils::coords_to_viewport_uv, -} +/// A circle, normalised aspect ratio. -@group(2) @binding(0) var texture: texture_2d; -@group(2) @binding(1) var texture_sampler: sampler; +#import bevy_pbr::mesh_vertex_output MeshVertexOutput +#import bevy_sprite::mesh2d_view_bindings globals +#import bevy_pbr::utils PI + +#import bevy_render::view View +@group(0) @binding(0) var view: View; + +const HEIGHT:f32 = 4.128; +const INTENSITY:f32 = 5.0; +const NUM_LINES:f32 = 4.0; +const SPEED:f32 = 1.0; @fragment -fn fragment( - mesh: VertexOutput, -) -> @location(0) vec4 { - let viewport_uv = coords_to_viewport_uv(mesh.position.xy, view.viewport); - let color = textureSample(texture, texture_sampler, viewport_uv); - return color; -} +fn fragment(in: MeshVertexOutput) -> @location(0) vec4 { + // ensure our uv coords match shadertoy/the-lil-book-of-shaders + var uv = (in.uv.xy * 2.0) - 1.0; + let resolution = view.viewport.zw; + uv.x *= resolution.x / resolution.y; + uv *= rotate2D(PI / -2.0); -``` + var col = vec4f(0.0); -### assets/shaders/custom_material_2d + let pt = uv.xy; + let radius = 0.725; -```rust -#import bevy_sprite::mesh2d_vertex_output::VertexOutput -// we can import items from shader modules in the assets folder with a quoted path -#import "shaders/custom_material_import.wgsl"::COLOR_MULTIPLIER + let circle = -1.0 * sd_circle(pt, radius); + col.a += circle; + col.b += circle; -@group(2) @binding(0) var material_color: vec4; -@group(2) @binding(1) var base_color_texture: texture_2d; -@group(2) @binding(2) var base_color_sampler: sampler; -@fragment -fn fragment(mesh: VertexOutput) -> @location(0) vec4 { - return material_color * textureSample(base_color_texture, base_color_sampler, mesh.uv) * COLOR_MULTIPLIER; + return col; +} + +fn sd_circle(pt: vec2f, radius: f32) -> f32 { + return length(pt) - radius; +} + + +/// Clockwise by `theta` +fn rotate2D(theta: f32) -> mat2x2 { + let c = cos(theta); + let s = sin(theta); + return mat2x2(c, s, -s, c); } ``` -### assets/shaders/cubemap_unlit +### screenshots/07-10-23/12-03-36/screeenshot ```rust -#import bevy_pbr::forward_io::VertexOutput +/// Spin a sphere.. +/// Trying to understand rotating something I draw in its 3d space, rotate2D is very useful, I want to rotate3D now. -#ifdef CUBEMAP_ARRAY -@group(2) @binding(0) var base_color_texture: texture_cube_array; -#else -@group(2) @binding(0) var base_color_texture: texture_cube; -#endif +#import bevy_pbr::mesh_vertex_output MeshVertexOutput +#import bevy_sprite::mesh2d_view_bindings globals +#import bevy_pbr::utils PI -@group(2) @binding(1) var base_color_sampler: sampler; +#import bevy_render::view View +@group(0) @binding(0) var view: View; + +const HEIGHT:f32 = 4.128; +const INTENSITY:f32 = 5.0; +const NUM_LINES:f32 = 4.0; +const SPEED:f32 = 1.0; + +const CAM_DISTANCE: f32 = -2.; @fragment -fn fragment( - mesh: VertexOutput, -) -> @location(0) vec4 { - let fragment_position_view_lh = mesh.world_position.xyz * vec3(1.0, 1.0, -1.0); - return textureSample( - base_color_texture, - base_color_sampler, - fragment_position_view_lh - ); -} +fn fragment(in: MeshVertexOutput) -> @location(0) vec4 { + // ensure our uv coords match shadertoy/the-lil-book-of-shaders -``` + var uv = in.uv; + uv *= rotate2D(PI / -2.0); + uv = (uv * 2.0) - 1.0; + let resolution = view.viewport.zw; + let t = globals.time; + uv.x *= resolution.x / resolution.y; -### assets/shaders/fallback_image_test + var col = vec4f(0.0); -```rust -#import bevy_pbr::forward_io::VertexOutput + let pt = vec3f(vec2f(uv), 0.0); + let radius = 0.725; -@group(2) @binding(0) var test_texture_1d: texture_1d; -@group(2) @binding(1) var test_texture_1d_sampler: sampler; + let ray_origin = vec3f(0., 0., CAM_DISTANCE); // The camera is at -2.0 + let ray_dir = vec3f(uv.y, uv.x, 0.0) - ray_origin; //NOTE: due to our Rotate2D above we need to flip here. -@group(2) @binding(2) var test_texture_2d: texture_2d; -@group(2) @binding(3) var test_texture_2d_sampler: sampler; + let pt_dist = vec3(cos(t), 0.0, 3.0 + sin(t)); + var dist = dist_line(ray_origin, ray_dir, pt_dist); -@group(2) @binding(4) var test_texture_2d_array: texture_2d_array; -@group(2) @binding(5) var test_texture_2d_array_sampler: sampler; + dist = smoothstep(0.1, 0.09, dist); -@group(2) @binding(6) var test_texture_cube: texture_cube; -@group(2) @binding(7) var test_texture_cube_sampler: sampler; -@group(2) @binding(8) var test_texture_cube_array: texture_cube_array; -@group(2) @binding(9) var test_texture_cube_array_sampler: sampler; -@group(2) @binding(10) var test_texture_3d: texture_3d; -@group(2) @binding(11) var test_texture_3d_sampler: sampler; -@fragment -fn fragment(in: VertexOutput) {} -``` + col = vec4(dist); + return col; +} -### assets/shaders/custom_material +fn dist_line(ray_origin: vec3f, ray_dir: vec3f, pt:vec3f) -> f32{ + return length(cross(pt - ray_origin, ray_dir))/length(ray_dir); +} -```rust -#import bevy_pbr::forward_io::VertexOutput -// we can import items from shader modules in the assets folder with a quoted path -#import "shaders/custom_material_import.wgsl"::COLOR_MULTIPLIER +fn sd_capsule(p: vec3f, a: vec3f, b: vec3f, r: f32) -> f32 { + let pa = p - a; + let ba = b - a; + let h = clamp(dot(pa, ba) / dot(ba, ba), 0., 1.); + return length(pa - ba * h) - r; +} +fn sd_sphere(pt: vec3f, radius: f32) -> f32 { + return length(pt) - radius; +} -@group(2) @binding(0) var material_color: vec4; -@group(2) @binding(1) var material_color_texture: texture_2d; -@group(2) @binding(2) var material_color_sampler: sampler; -@fragment -fn fragment( - mesh: VertexOutput, -) -> @location(0) vec4 { - return material_color * textureSample(material_color_texture, material_color_sampler, mesh.uv) * COLOR_MULTIPLIER; +/// Clockwise by `theta` +fn rotate2D(theta: f32) -> mat2x2 { + let c = cos(theta); + let s = sin(theta); + return mat2x2(c, s, -s, c); } ``` -### assets/shaders/array_texture +### screenshots/07-10-23/21-43-49/screeenshot ```rust -#import bevy_pbr::{ - forward_io::VertexOutput, - mesh_view_bindings::view, - pbr_types::{STANDARD_MATERIAL_FLAGS_DOUBLE_SIDED_BIT, PbrInput, pbr_input_new}, - pbr_functions as fns, -} -#import bevy_core_pipeline::tonemapping::tone_mapping +/// Spin a sphere.. +/// Trying to understand rotating something I draw in its 3d space, rotate2D is very useful, I want to rotate3D now. -@group(2) @binding(0) var my_array_texture: texture_2d_array; -@group(2) @binding(1) var my_array_texture_sampler: sampler; +#import bevy_pbr::mesh_vertex_output MeshVertexOutput +#import bevy_sprite::mesh2d_view_bindings globals +#import bevy_pbr::utils PI + +#import bevy_render::view View +@group(0) @binding(0) var view: View; + +const HEIGHT:f32 = 4.128; +const INTENSITY:f32 = 5.0; +const NUM_LINES:f32 = 4.0; +const SPEED:f32 = 0.20; +const CAM_DISTANCE: f32 = -2.; @fragment -fn fragment( - @builtin(front_facing) is_front: bool, - mesh: VertexOutput, -) -> @location(0) vec4 { - let layer = i32(mesh.world_position.x) & 0x3; +fn fragment(in: MeshVertexOutput) -> @location(0) vec4 { + // ensure our uv coords match shadertoy/the-lil-book-of-shaders + var uv = (in.uv * 2.0) - 1.0; + let resolution = view.viewport.zw; + let t = globals.time * SPEED; + uv.x *= resolution.x / resolution.y; + uv *= rotate2D(PI / -2.0); - // Prepare a 'processed' StandardMaterial by sampling all textures to resolve - // the material members - var pbr_input: PbrInput = pbr_input_new(); + var col = vec3f(0.0); + col = shader_toy_default(t, uv); - pbr_input.material.base_color = textureSample(my_array_texture, my_array_texture_sampler, mesh.uv, layer); -#ifdef VERTEX_COLORS - pbr_input.material.base_color = pbr_input.material.base_color * mesh.color; -#endif + return vec4(col, 1.0); +} - let double_sided = (pbr_input.material.flags & STANDARD_MATERIAL_FLAGS_DOUBLE_SIDED_BIT) != 0u; +/// This is the default (and rather pretty) shader you start with in ShaderToy +fn shader_toy_default(t: f32, uv: vec2f)-> vec3f{ + var col = vec3f(0.0); + let v = vec3(t) + vec3(uv.xyx) + vec3(0., 2., 4.); + return 0.5 + 0.5 * cos(v); +} - pbr_input.frag_coord = mesh.position; - pbr_input.world_position = mesh.world_position; - pbr_input.world_normal = fns::prepare_world_normal( - mesh.world_normal, - double_sided, - is_front, - ); +fn dist_line(ray_origin: vec3f, ray_dir: vec3f, pt: vec3f) -> f32 { + return length(cross(pt - ray_origin, ray_dir)) / length(ray_dir); +} - pbr_input.is_orthographic = view.projection[3].w == 1.0; +fn sd_capsule(p: vec3f, a: vec3f, b: vec3f, r: f32) -> f32 { + let pa = p - a; + let ba = b - a; + let h = clamp(dot(pa, ba) / dot(ba, ba), 0., 1.); + return length(pa - ba * h) - r; +} +fn sd_sphere(pt: vec3f, radius: f32) -> f32 { + return length(pt) - radius; +} - pbr_input.N = fns::apply_normal_mapping( - pbr_input.material.flags, - mesh.world_normal, - double_sided, - is_front, -#ifdef VERTEX_TANGENTS -#ifdef STANDARD_MATERIAL_NORMAL_MAP - mesh.world_tangent, -#endif -#endif - mesh.uv, - view.mip_bias, - ); - pbr_input.V = fns::calculate_view(mesh.world_position, pbr_input.is_orthographic); - return tone_mapping(fns::apply_pbr_lighting(pbr_input), view.color_grading); +/// Clockwise by `theta` +fn rotate2D(theta: f32) -> mat2x2 { + let c = cos(theta); + let s = sin(theta); + return mat2x2(c, s, -s, c); +} + +fn sd_capped_cylinder(p: vec3f, h: vec2f) -> f32 { + let d: vec2f = abs(vec2f(length(p.xz), p.y)) - h; + return min(max(d.x, d.y), 0.0) + length(max(d, vec2f(0.0))); +} + +fn sd_torus(p: vec3f, t: vec2f) -> f32 { + let q: vec2f = vec2f(length(p.xz) - t.x, p.y); + return length(q) - t.y; } + ``` -### assets/shaders/show_prepass +### screenshots/24-10-23/21-14-59/screenshot ```rust -#import bevy_pbr::{ - mesh_view_bindings::globals, - prepass_utils, - forward_io::VertexOutput, -} +/// ***************************** /// +/// This is a shadertoy port of 'Tileable Water Caustic' by Dave_Hoskins, who claims to of sound it on glsl sandbox, by 'joltz0r' +/// I have been unable to find the original. +/// ***************************** /// -struct ShowPrepassSettings { - show_depth: u32, - show_normals: u32, - show_motion_vectors: u32, - padding_1: u32, - padding_2: u32, -} -@group(2) @binding(0) var settings: ShowPrepassSettings; +#import bevy_pbr::mesh_vertex_output MeshVertexOutput +#import bevy_sprite::mesh2d_view_bindings globals +#import shadplay::shader_utils::common NEG_HALF_PI, shader_toy_default, rotate2D, TAU + +#import bevy_render::view View +@group(0) @binding(0) var view: View; +const MAX_ITER: i32 = 3; +const SPEED:f32 = 1.0; + @fragment -fn fragment( -#ifdef MULTISAMPLED - @builtin(sample_index) sample_index: u32, -#endif - mesh: VertexOutput, -) -> @location(0) vec4 { -#ifndef MULTISAMPLED - let sample_index = 0u; -#endif - if settings.show_depth == 1u { - let depth = bevy_pbr::prepass_utils::prepass_depth(mesh.position, sample_index); - return vec4(depth, depth, depth, 1.0); - } else if settings.show_normals == 1u { - let normal = bevy_pbr::prepass_utils::prepass_normal(mesh.position, sample_index); - return vec4(normal, 1.0); - } else if settings.show_motion_vectors == 1u { - let motion_vector = bevy_pbr::prepass_utils::prepass_motion_vector(mesh.position, sample_index); - return vec4(motion_vector / globals.delta_time, 0.0, 1.0); +fn fragment(in: MeshVertexOutput) -> @location(0) vec4 { + let time: f32 = globals.time * 0.5 + 23.0; + var uv: vec2 = in.uv; + + // Tiling calculation + var p: vec2; + // Note: Choose one of the following two lines based on whether SHOW_TILING is defined or not + // p = uv * TAU * 2.0 % TAU - 250.0; // show TILING + p = uv * TAU % TAU - 250.0; // hide TILING + + var i: vec2 = vec2(p); + var c: f32 = 1.0; + let inten: f32 = 0.005; + + for (var n: i32 = 0; n < MAX_ITER; n = n + 1) { + let t: f32 = time * (1.0 - (3.5 / f32(n + 1))); + i = p + vec2(cos(t - i.x) + sin(t + i.y), sin(t - i.y) + cos(t + i.x)); + c += 1.0 / length(vec2(p.x / (sin(i.x + t) / inten), p.y / (cos(i.y + t) / inten))); } - return vec4(0.0); + c /= f32(MAX_ITER); + c = 1.17 - pow(c, 1.4); + var colour: vec3 = vec3(pow(abs(c), 8.0)); + colour = clamp(colour + vec3(0.0, 0.35, 0.5), vec3(0.0, 0.0, 0.0), vec3(1.0, 1.0, 1.0)); + + + // Show grid: + // let pixel: vec2 = vec2(2.0) / view.viewport.zw; + // uv *= 2.0; + // let f: f32 = floor(globals.time * 0.5 % 2.0); + // let first: vec2 = step(pixel, uv) * f; + // uv = step(fract(uv), pixel); + // colour = mix(colour, vec3(1.0, 1.0, 0.0), (uv.x + uv.y) * first.x * first.y); + + return vec4(colour, 1.0); } ``` -### assets/shaders/irradiance_volume_voxel_visualization +### screenshots/24-01-24/06-51-53/screenshot ```rust -#import bevy_pbr::forward_io::VertexOutput -#import bevy_pbr::irradiance_volume -#import bevy_pbr::mesh_view_bindings +/// ***************************** /// +/// THIS IS THE DEFAULT 2D SHADER /// +/// You can always get back to this with `python3 scripts/reset-2d.py` /// +/// ***************************** /// -struct VoxelVisualizationIrradianceVolumeInfo { - transform: mat4x4, - inverse_transform: mat4x4, - resolution: vec3, - // A scale factor that's applied to the diffuse and specular light from the - // light probe. This is in units of cd/m² (candela per square meter). - intensity: f32, -} +#import bevy_sprite::mesh2d_view_bindings::globals +#import shadplay::shader_utils::common::{NEG_HALF_PI, shader_toy_default, rotate2D, TWO_PI} +#import bevy_render::view::View +#import bevy_pbr::forward_io::VertexOutput; -@group(2) @binding(100) -var irradiance_volume_info: VoxelVisualizationIrradianceVolumeInfo; +@group(0) @binding(0) var view: View; -@fragment -fn fragment(mesh: VertexOutput) -> @location(0) vec4 { - // Snap the world position we provide to `irradiance_volume_light()` to the - // middle of the nearest texel. - var unit_pos = (irradiance_volume_info.inverse_transform * - vec4(mesh.world_position.xyz, 1.0f)).xyz; - let resolution = vec3(irradiance_volume_info.resolution); - let stp = clamp((unit_pos + 0.5) * resolution, vec3(0.5f), resolution - vec3(0.5f)); - let stp_rounded = round(stp - 0.5f) + 0.5f; - let rounded_world_pos = (irradiance_volume_info.transform * vec4(stp_rounded, 1.0f)).xyz; +const SPEED:f32 = 1.0; - // `irradiance_volume_light()` multiplies by intensity, so cancel it out. - // If we take intensity into account, the cubes will be way too bright. - let rgb = irradiance_volume::irradiance_volume_light( - mesh.world_position.xyz, - mesh.world_normal) / irradiance_volume_info.intensity; +@fragment +fn fragment(in: VertexOutput) -> @location(0) vec4 { + // ensure our uv coords match shadertoy/the-lil-book-of-shaders + var uv = (in.uv * 2.0) - 1.0; + let resolution = view.viewport.zw; + let t = globals.time * SPEED; + uv.x *= resolution.x / resolution.y; + uv *= rotate2D(NEG_HALF_PI); - return vec4(rgb, 1.0f); -} + return vec4f(shader_toy_default(t, uv), 1.0); +} + ``` -### assets/shaders/game_of_life +### screenshots/24-01-24/06-36-04/screenshot ```rust -// The shader reads the previous frame's state from the `input` texture, and writes the new state of -// each pixel to the `output` texture. The textures are flipped each step to progress the -// simulation. -// Two textures are needed for the game of life as each pixel of step N depends on the state of its -// neighbors at step N-1. +/// ***************************** /// +/// THIS IS THE DEFAULT 2D SHADER /// +/// You can always get back to this with `python3 scripts/reset-2d.py` /// +/// ***************************** /// -@group(0) @binding(0) var input: texture_storage_2d; +#import bevy_sprite::mesh2d_view_bindings::globals +#import shadplay::shader_utils::common::{NEG_HALF_PI, shader_toy_default, rotate2D, TWO_PI} +#import bevy_render::view::View +#import bevy_pbr::forward_io::VertexOutput; -@group(0) @binding(1) var output: texture_storage_2d; +@group(0) @binding(0) var view: View; -fn hash(value: u32) -> u32 { - var state = value; - state = state ^ 2747636419u; - state = state * 2654435769u; - state = state ^ state >> 16u; - state = state * 2654435769u; - state = state ^ state >> 16u; - state = state * 2654435769u; - return state; -} +const SPEED:f32 = 1.0; -fn randomFloat(value: u32) -> f32 { - return f32(hash(value)) / 4294967295.0; -} +@fragment +fn fragment(in: VertexOutput) -> @location(0) vec4 { + // ensure our uv coords match shadertoy/the-lil-book-of-shaders + var uv = (in.uv * 2.0) - 1.0; + let resolution = view.viewport.zw; + let t = globals.time * SPEED; + uv.x *= resolution.x / resolution.y; + uv *= rotate2D(NEG_HALF_PI); -@compute @workgroup_size(8, 8, 1) -fn init(@builtin(global_invocation_id) invocation_id: vec3, @builtin(num_workgroups) num_workgroups: vec3) { - let location = vec2(i32(invocation_id.x), i32(invocation_id.y)); + return vec4f(shader_toy_default(t, uv), 1.0); +} + - let randomNumber = randomFloat(invocation_id.y << 16u | invocation_id.x); - let alive = randomNumber > 0.9; - let color = vec4(f32(alive)); +``` - textureStore(output, location, color); -} +### screenshots/24-01-24/06-37-20/screenshot -fn is_alive(location: vec2, offset_x: i32, offset_y: i32) -> i32 { - let value: vec4 = textureLoad(input, location + vec2(offset_x, offset_y)); - return i32(value.x); -} +```rust +/// ***************************** /// +/// THIS IS THE DEFAULT 2D SHADER /// +/// You can always get back to this with `python3 scripts/reset-2d.py` /// +/// ***************************** /// -fn count_alive(location: vec2) -> i32 { - return is_alive(location, -1, -1) + - is_alive(location, -1, 0) + - is_alive(location, -1, 1) + - is_alive(location, 0, -1) + - is_alive(location, 0, 1) + - is_alive(location, 1, -1) + - is_alive(location, 1, 0) + - is_alive(location, 1, 1); -} +#import bevy_sprite::mesh2d_view_bindings::globals +#import shadplay::shader_utils::common::{NEG_HALF_PI, shader_toy_default, rotate2D, TWO_PI} +#import bevy_render::view::View +#import bevy_pbr::forward_io::VertexOutput; -@compute @workgroup_size(8, 8, 1) -fn update(@builtin(global_invocation_id) invocation_id: vec3) { - let location = vec2(i32(invocation_id.x), i32(invocation_id.y)); +@group(0) @binding(0) var view: View; - let n_alive = count_alive(location); +const SPEED:f32 = 1.0; - var alive: bool; - if (n_alive == 3) { - alive = true; - } else if (n_alive == 2) { - let currently_alive = is_alive(location, 0, 0); - alive = bool(currently_alive); - } else { - alive = false; - } - let color = vec4(f32(alive)); +@fragment +fn fragment(in: VertexOutput) -> @location(0) vec4 { + // ensure our uv coords match shadertoy/the-lil-book-of-shaders + var uv = (in.uv * 2.0) - 1.0; + let resolution = view.viewport.zw; + let t = globals.time * SPEED; + uv.x *= resolution.x / resolution.y; + uv *= rotate2D(NEG_HALF_PI); - textureStore(output, location, color); -} + return vec4f(shader_toy_default(t, uv), 1.0); +} + ``` -### assets/shaders/animate_shader +### screenshots/24-01-24/06-25-45/screenshot ```rust -// The time since startup data is in the globals binding which is part of the mesh_view_bindings import -#import bevy_pbr::{ - mesh_view_bindings::globals, - forward_io::VertexOutput, -} +/// ***************************** /// +/// THIS IS THE DEFAULT 2D SHADER /// +/// You can always get back to this with `python3 scripts/reset-2d.py` /// +/// ***************************** /// -fn oklab_to_linear_srgb(c: vec3) -> vec3 { - let L = c.x; - let a = c.y; - let b = c.z; +#import bevy_sprite::mesh2d_view_bindings::globals +#import shadplay::shader_utils::common::{NEG_HALF_PI, shader_toy_default, rotate2D, TWO_PI} +#import bevy_render::view::View +#import bevy_pbr::forward_io::VertexOutput; - let l_ = L + 0.3963377774 * a + 0.2158037573 * b; - let m_ = L - 0.1055613458 * a - 0.0638541728 * b; - let s_ = L - 0.0894841775 * a - 1.2914855480 * b; +@group(0) @binding(0) var view: View; - let l = l_ * l_ * l_; - let m = m_ * m_ * m_; - let s = s_ * s_ * s_; +const SPEED:f32 = 1.0; - return vec3( - 4.0767416621 * l - 3.3077115913 * m + 0.2309699292 * s, - -1.2684380046 * l + 2.6097574011 * m - 0.3413193965 * s, - -0.0041960863 * l - 0.7034186147 * m + 1.7076147010 * s, - ); -} +@fragment +fn fragment(in: VertexOutput) -> @location(0) vec4 { + // ensure our uv coords match shadertoy/the-lil-book-of-shaders + var uv = (in.uv * 2.0) - 1.0; + let resolution = view.viewport.zw; + let t = globals.time * SPEED; + uv.x *= resolution.x / resolution.y; + uv *= rotate2D(NEG_HALF_PI); + + return vec4f(shader_toy_default(t, uv), 1.0); +} + + +``` + +### screenshots/24-01-24/06-36-05/screenshot + +```rust +/// ***************************** /// +/// THIS IS THE DEFAULT 2D SHADER /// +/// You can always get back to this with `python3 scripts/reset-2d.py` /// +/// ***************************** /// + +#import bevy_sprite::mesh2d_view_bindings::globals +#import shadplay::shader_utils::common::{NEG_HALF_PI, shader_toy_default, rotate2D, TWO_PI} +#import bevy_render::view::View +#import bevy_pbr::forward_io::VertexOutput; + +@group(0) @binding(0) var view: View; + +const SPEED:f32 = 1.0; @fragment fn fragment(in: VertexOutput) -> @location(0) vec4 { - let speed = 2.0; - // The globals binding contains various global values like time - // which is the time since startup in seconds - let t_1 = sin(globals.time * speed) * 0.5 + 0.5; - let t_2 = cos(globals.time * speed); + // ensure our uv coords match shadertoy/the-lil-book-of-shaders + var uv = (in.uv * 2.0) - 1.0; + let resolution = view.viewport.zw; + let t = globals.time * SPEED; + uv.x *= resolution.x / resolution.y; + uv *= rotate2D(NEG_HALF_PI); - let distance_to_center = distance(in.uv, vec2(0.5)) * 1.4; + return vec4f(shader_toy_default(t, uv), 1.0); +} + - // blending is done in a perceptual color space: https://bottosson.github.io/posts/oklab/ - let red = vec3(0.627955, 0.224863, 0.125846); - let green = vec3(0.86644, -0.233887, 0.179498); - let blue = vec3(0.701674, 0.274566, -0.169156); - let white = vec3(1.0, 0.0, 0.0); - let mixed = mix(mix(red, blue, t_1), mix(green, white, t_2), distance_to_center); +``` - return vec4(oklab_to_linear_srgb(mixed), 1.0); -} +### screenshots/24-01-24/06-36-03/screenshot + +```rust +/// ***************************** /// +/// THIS IS THE DEFAULT 2D SHADER /// +/// You can always get back to this with `python3 scripts/reset-2d.py` /// +/// ***************************** /// + +#import bevy_sprite::mesh2d_view_bindings::globals +#import shadplay::shader_utils::common::{NEG_HALF_PI, shader_toy_default, rotate2D, TWO_PI} +#import bevy_render::view::View +#import bevy_pbr::forward_io::VertexOutput; + +@group(0) @binding(0) var view: View; + +const SPEED:f32 = 1.0; + +@fragment +fn fragment(in: VertexOutput) -> @location(0) vec4 { + // ensure our uv coords match shadertoy/the-lil-book-of-shaders + var uv = (in.uv * 2.0) - 1.0; + let resolution = view.viewport.zw; + let t = globals.time * SPEED; + uv.x *= resolution.x / resolution.y; + uv *= rotate2D(NEG_HALF_PI); + + return vec4f(shader_toy_default(t, uv), 1.0); +} + ``` -### assets/shaders/post_processing +### screenshots/27-11-23/20-44-03/screenshot ```rust -// This shader computes the chromatic aberration effect +//! +//! The default 3d Shader. +//! +#import bevy_pbr::forward_io::VertexOutput +#import bevy_pbr::mesh_view_bindings::globals; +#import bevy_pbr::utils PI +#import shadplay::shader_utils::common NEG_HALF_PI, shader_toy_default, rotate2D -// Since post processing is a fullscreen effect, we use the fullscreen vertex shader provided by bevy. -// This will import a vertex shader that renders a single fullscreen triangle. -// -// A fullscreen triangle is a single triangle that covers the entire screen. -// The box in the top left in that diagram is the screen. The 4 x are the corner of the screen -// -// Y axis -// 1 | x-----x...... -// 0 | | s | . ´ -// -1 | x_____x´ -// -2 | : .´ -// -3 | :´ -// +--------------- X axis -// -1 0 1 2 3 -// -// As you can see, the triangle ends up bigger than the screen. -// -// You don't need to worry about this too much since bevy will compute the correct UVs for you. -#import bevy_core_pipeline::fullscreen_vertex_shader::FullscreenVertexOutput +#import bevy_render::view View +@group(0) @binding(0) var view: View; -@group(0) @binding(0) var screen_texture: texture_2d; -@group(0) @binding(1) var texture_sampler: sampler; -struct PostProcessSettings { - intensity: f32, -#ifdef SIXTEEN_BYTE_ALIGNMENT - // WebGL2 structs must be 16 byte aligned. - _webgl2_padding: vec3 -#endif -} -@group(0) @binding(2) var settings: PostProcessSettings; +@group(1) @binding(1) var texture: texture_2d; +@group(1) @binding(2) var texture_sampler: sampler; + +const SPEED:f32 = 1.0; @fragment -fn fragment(in: FullscreenVertexOutput) -> @location(0) vec4 { - // Chromatic aberration strength - let offset_strength = settings.intensity; +fn fragment(in: VertexOutput) -> @location(0) vec4 { + // ensure our uv coords match shadertoy/the-lil-book-of-shaders + let texture_uvs = in.uv; - // Sample each color channel with an arbitrary shift - return vec4( - textureSample(screen_texture, texture_sampler, in.uv + vec2(offset_strength, -offset_strength)).r, - textureSample(screen_texture, texture_sampler, in.uv + vec2(-offset_strength, 0.0)).g, - textureSample(screen_texture, texture_sampler, in.uv + vec2(0.0, offset_strength)).b, - 1.0 - ); + let tex: vec4f = textureSample(texture, texture_sampler, texture_uvs); + return tex; } +fn circle(p: vec2, r: f32) -> f32 { + return smoothstep(0.1, 0., abs(length(p) - r)); +} + ``` diff --git a/bevy-shaders-cheatsheet.md b/bevy-shaders-cheatsheet.md index d9552a0..b9f25fb 100755 --- a/bevy-shaders-cheatsheet.md +++ b/bevy-shaders-cheatsheet.md @@ -102,6 +102,7 @@ fn hsv_to_srgb(c: vec3) -> vec3 { ``` > NOTE: if you're in 2d, the globals is in a diff spot: `#import bevy_sprite::mesh2d_view_bindings globals` +> NOTE: #import bevy_sprite::mesh2d_vertex_output::VertexOutput is also NOT the same as bevy_pbr::forward_io::VertexOutput, make sure you're using the right one! ______________________________________________________________________ diff --git a/examples/might/Cargo.toml b/examples/might/Cargo.toml index 6288adf..ae46e18 100755 --- a/examples/might/Cargo.toml +++ b/examples/might/Cargo.toml @@ -4,13 +4,12 @@ version = "0.1.0" edition = "2021" [dependencies] -bevy = { version = "0.13.0", features = [ +bevy = { version = "0.14.0", features = [ "file_watcher", "dynamic_linking", "jpeg", ] } -bevy_editor_pls = "0.8.0" -bevy_panorbit_camera = "0.15.0" +bevy_panorbit_camera = { git = "https://github.com/kristoff3r/bevy_panorbit_camera" } #"0.18.3?" [dev-dependencies.bevy] opt-level = 3 diff --git a/src/main.rs b/src/main.rs index af5130e..636bfab 100755 --- a/src/main.rs +++ b/src/main.rs @@ -3,7 +3,7 @@ /// use bevy::{input::keyboard::KeyboardInput, prelude::*, window::WindowResized}; -use shadplay::{plugin::ShadPlayPlugin, system::config::UserSession, utils::AppState}; +use shadplay::{plugin::ShadPlayPlugin, system::config::UserSession}; fn main() { // Get UserConfig for the Shadplay window dimensions, decorations toggle etc. @@ -16,7 +16,6 @@ fn main() { let mut app = App::new(); let shadplay = app - .init_state::() .insert_resource(user_config) .insert_resource(ClearColor(Color::NONE)) .add_plugins(( diff --git a/src/plugin.rs b/src/plugin.rs index 5f232f1..d148671 100755 --- a/src/plugin.rs +++ b/src/plugin.rs @@ -1,5 +1,6 @@ use bevy::{ - input::keyboard::KeyboardInput, prelude::*, sprite::Material2dPlugin, window::WindowResized, + input::keyboard::KeyboardInput, log::tracing_subscriber::util::SubscriberInitExt, prelude::*, + sprite::Material2dPlugin, window::WindowResized, }; use bevy_panorbit_camera::PanOrbitCameraPlugin; @@ -10,7 +11,8 @@ pub struct ShadPlayPlugin; impl Plugin for ShadPlayPlugin { fn build(&self, app: &mut bevy::prelude::App) { - app.add_plugins(ShadplayShaderLibrary) // Something of a library with common functions. + app.insert_state(AppState::TwoD) + .add_plugins(ShadplayShaderLibrary) // Something of a library with common functions. .add_plugins(crate::system::ScreenshotPlugin) //NOTE: this is not Bevy's one! .add_plugins(ColourPickerPlugin) .add_plugins(MaterialPlugin::::default()) diff --git a/src/shader_utils/mod.rs b/src/shader_utils/mod.rs index 6ba200f..d1ff29c 100755 --- a/src/shader_utils/mod.rs +++ b/src/shader_utils/mod.rs @@ -21,11 +21,11 @@ pub struct DragNDropShader { #[derive(Asset, AsBindGroup, TypePath, Debug, Clone)] // #[uuid = "a3d71c04-d054-4946-80f8-ba6cfbc90cad"] pub struct YourShader { - #[uniform(0)] - pub color: Color, //RGBA + #[uniform(100)] + pub color: LinearRgba, //RGBA - #[texture(1, dimension = "2d")] - #[sampler(2)] + #[texture(101, dimension = "2d")] + #[sampler(102)] pub img: Handle, } // 3d impl @@ -42,11 +42,11 @@ impl Material for YourShader { #[derive(Asset, AsBindGroup, TypePath, Debug, Clone)] // #[uuid = "f528511f-dcf2-4b0b-9522-a9df3a1a795b"] pub struct YourShader2D { - #[uniform(0)] + #[uniform(100)] pub(crate) mouse_pos: MousePos, - #[texture(1, dimension = "2d")] - #[sampler(2)] + #[texture(101, dimension = "2d")] + #[sampler(102)] pub img: Handle, } @@ -70,14 +70,14 @@ impl Material2d for YourShader2D { #[derive(Asset, AsBindGroup, TypePath, Debug, Clone)] // #[uuid = "c74e039a-3df7-4f71-bd1d-7fe4b25a2230"] struct DottedLineShader { - #[uniform(0)] + #[uniform(100)] uniforms: Holder, //RGBA } /// Simplified holding struct to make passing across uniform(n) simpler. #[derive(ShaderType, Default, Clone, Debug)] struct Holder { - tint: Color, + tint: LinearRgba, /// How wide do you want the line as a % of its availablu uv space: 0.5 would be 50% of the surface of the geometry line_width: f32, /// How many segments (transparent 'cuts') do you want? diff --git a/src/ui/colour_picker_plugin.rs b/src/ui/colour_picker_plugin.rs index d91420b..8be2b37 100755 --- a/src/ui/colour_picker_plugin.rs +++ b/src/ui/colour_picker_plugin.rs @@ -26,7 +26,7 @@ pub struct ColourPickerPlugin; impl Plugin for ColourPickerPlugin { fn build(&self, app: &mut App) { - app.add_plugins(EguiPlugin); + // app.add_plugins(EguiPlugin); app.insert_resource(ColourPickerTool::default()) .insert_resource(Toggle::default()) @@ -36,9 +36,9 @@ impl Plugin for ColourPickerPlugin { Update, ( toggle_ui, - ColourPickerTool::draw_ui - .run_if(resource_exists::) - .run_if(resource_equals(Toggle { open: true })), + // ColourPickerTool::draw_ui + // .run_if(resource_exists::) + // .run_if(resource_equals(Toggle { open: true })), ), ); } diff --git a/src/ui/help_ui.rs b/src/ui/help_ui.rs index 359d531..fa4069d 100755 --- a/src/ui/help_ui.rs +++ b/src/ui/help_ui.rs @@ -23,7 +23,7 @@ impl Plugin for HelpUIPlugin { Update, ( toggle_help_ui, - help_window.run_if(resource_equals(HelpUIToggle { open: true })), + // help_window.run_if(resource_equals(HelpUIToggle { open: true })), ), ); } diff --git a/src/utils.rs b/src/utils.rs index f76c657..832ff57 100755 --- a/src/utils.rs +++ b/src/utils.rs @@ -10,6 +10,7 @@ use crate::prelude::*; /// Used by: cam_switch_system, screenshot #[derive(Debug, Clone, Copy, Default, Eq, PartialEq, Hash, States)] pub enum AppState { + // Startup, #[default] TwoD, ThreeD, @@ -238,7 +239,7 @@ pub fn init_shapes( })), transform: Transform::from_xyz(0.0, 0.3, 0.0), material: materials.add(crate::shader_utils::YourShader { - color: Color::default(), + color: Color::default().into(), img: texture.clone(), }), ..default() @@ -254,7 +255,7 @@ pub fn init_shapes( mesh: meshes.add(Mesh::from(Cuboid::new(1.85, 1.85, 1.85))), transform: Transform::from_xyz(0.0, 0.3, 0.0), material: materials.add(crate::shader_utils::YourShader { - color: Color::default(), + color: Color::default().into(), img: texture.clone(), }), ..default() @@ -270,7 +271,7 @@ pub fn init_shapes( mesh: meshes.add(Sphere { radius: 1.40 }), transform: Transform::from_xyz(0.0, 0.3, 0.0), material: materials.add(crate::shader_utils::YourShader { - color: Color::default(), + color: Color::default().into(), img: texture.clone(), }), ..default()