diff --git a/.github/workflows/rust.yml b/.github/workflows/rust.yml index 93cc5532..39ce75ea 100644 --- a/.github/workflows/rust.yml +++ b/.github/workflows/rust.yml @@ -2,16 +2,75 @@ name: Rust on: pull_request: - branches: [ "dev", "rewrite/v3" ] + branches: [ "master", "rewrite/v3" ] push: - branches: [ "dev", "rewrite/v3" ] + branches: [ "master", "rewrite/v3" ] workflow_dispatch: env: CARGO_TERM_COLOR: always jobs: + format: + name: Check code formatting + runs-on: ubuntu-latest + steps: + - name: Checkout repository + uses: actions/checkout@v4 + + - name: Install Rust nightly + uses: dtolnay/rust-toolchain@nightly + + - name: Install rustfmt + run: rustup component add rustfmt + + - name: Run cargo fmt + run: cargo fmt --all -- --check + + clippy: + name: Run Clippy + runs-on: ubuntu-latest + steps: + - name: Checkout repository + uses: actions/checkout@v4 + + - name: Restore cargo cache + uses: actions/cache@v3 + with: + path: | + ~/.cargo/registry + ~/.cargo/git + key: ${{ runner.os }}-cargo-${{ hashFiles('**/Cargo.lock') }} + restore-keys: | + ${{ runner.os }}-cargo- + + - name: Install Rust nightly + uses: dtolnay/rust-toolchain@nightly + + - name: Install clippy + run: rustup component add clippy + + - name: Run Clippy + run: cargo clippy --all-targets -- -Dwarnings + + security: + name: Check for security vulnerabilities + runs-on: ubuntu-latest + steps: + - name: Checkout repository + uses: actions/checkout@v4 + + - name: Install Rust nightly + uses: dtolnay/rust-toolchain@nightly + + - name: Install audit + run: cargo install cargo-audit + + - name: Run cargo audit + run: cargo install cargo-audit && cargo audit + build: + name: Build project runs-on: ${{ matrix.os }} strategy: matrix: @@ -28,33 +87,56 @@ jobs: uses: actions/checkout@v4 - name: Cache dependencies - uses: swatinem/rust-cache@v2 + uses: actions/cache@v3 + with: + path: | + ~/.cargo/registry + ~/.cargo/git + target + key: ${{ runner.os }}-cargo-${{ hashFiles('**/Cargo.lock') }} + restore-keys: | + ${{ runner.os }}-cargo- - name: Install Rust nightly uses: dtolnay/rust-toolchain@nightly with: targets: ${{ matrix.target }} - - name: Run Clippy - uses: ClementTsang/cargo-action@v0.0.6 - with: - args: -- -Dwarnings - command: clippy - - name: Build - uses: ClementTsang/cargo-action@v0.0.6 - with: - args: --release --target ${{ matrix.target }} --verbose - command: build + run: cargo build --release --target ${{ matrix.target }} --verbose - - name: Run tests - uses: ClementTsang/cargo-action@v0.0.6 + test: + name: Run tests + runs-on: ${{ matrix.os }} + strategy: + matrix: + include: + - os: macos-14 + target: aarch64-apple-darwin + - os: ubuntu-latest + target: x86_64-unknown-linux-gnu + - os: windows-latest + target: x86_64-pc-windows-msvc + + steps: + - name: Checkout repository + uses: actions/checkout@v4 + + - name: Cache dependencies + uses: actions/cache@v3 with: - args: --target ${{ matrix.target }} --verbose - command: test + path: | + ~/.cargo/registry + ~/.cargo/git + target + key: ${{ runner.os }}-cargo-${{ hashFiles('**/Cargo.lock') }} + restore-keys: | + ${{ runner.os }}-cargo- - - name: Upload executable - uses: actions/upload-artifact@v4 + - name: Install Rust nightly + uses: dtolnay/rust-toolchain@nightly with: - name: ferrumc-${{ matrix.os }} - path: target/${{ matrix.target }}/release/ferrumc* + targets: ${{ matrix.target }} + + - name: Run tests + run: cargo test --target ${{ matrix.target }} --verbose diff --git a/CODE_OF_CONDUCT.md b/CODE_OF_CONDUCT.md index 9447d914..b71e335a 100644 --- a/CODE_OF_CONDUCT.md +++ b/CODE_OF_CONDUCT.md @@ -56,7 +56,7 @@ a project may be further defined and clarified by project maintainers. ## Enforcement Instances of abusive, harassing, or otherwise unacceptable behavior may be -reported by contacting the project team at [INSERT EMAIL ADDRESS]. All +reported by contacting the project team at stranger8722@gmail.com. All complaints will be reviewed and investigated and will result in a response that is deemed necessary and appropriate to the circumstances. The project team is obligated to maintain confidentiality with regard to the reporter of an incident. diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index d73aeecd..1e1d711e 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -2,46 +2,65 @@ When contributing to this repository, you'll have more luck with getting PRs approved if you come chat with us in the Discord server and letting us know about what you are fixing/adding. +Keep in mind that clippy, rustfmt and cargo-audit are enforced on CI, so make sure your code passes these checks. ## Pull Request Process 1. Make sure all tests and lints pass. PRs that don't pass CI will be rejected if your code is the cause of the failing -tests/lints. + tests/lints. 2. Make sure all needed files are also included and not using absolute paths. 3. Include a sufficient explanation of your PR. What is it adding/fixing, why does this feature need to be added/fixed, -who have you discussed this with, etc. If these questions were answered in a conversation on this Discord, mention who -you talked with and what consensus was reached. Unexplained PRs will rarely be accepted. + who have you discussed this with, etc. If these questions were answered in a conversation on this Discord, mention + who you talked with and what consensus was reached. Unexplained PRs will rarely be accepted. 4. Check again that tests pass. 5. Check a 3rd time. 6. Check that Clippy passes with no issues. `cargo clippy --all-targets -- -Dwarnings` is used on CI. -7. Submit PR. +7. Check that Rustfmt passes with no issues. `cargo fmt --all -- --check` is used on CI. +8. Check that Cargo-audit passes with no issues. `cargo audit` is used on CI. +9. Submit PR. ## Project specific guidelines + Just some rules to try to keep the repo nice and organised + ### Branches -#### `rewrite/v3` + +#### `master` + This branch is the main branch. This is where all PRs should be made to. This branch is the most up to date and should only be merged into with completed features. + #### `feature/feature-name` + This branch is for developing a feature. Once the feature is complete, a PR should be -made to the dev branch. This branch should be branched off of the dev branch. +made to the master branch. This branch should be branched off of the master branch. + #### `fix/fixed-thing` -This branch is for fixing a bug. Once the bug is fixed, a PR should be made to the dev -branch. This branch should be branched off of the dev branch. + +This branch is for fixing a bug. Once the bug is fixed, a PR should be made to the master +branch. This branch should be branched off of the master branch. + #### `rework/refactored-thing` -This branch is for refactoring code. Once the code is refactored, a PR should be made to the dev branch. + +This branch is for refactoring code. Once the code is refactored, a PR should be made to the master branch. + #### `housekeeping` + This branch is for stuff relating to the repo itself. This could be updating the README, adding -new CI checks, etc. This branch should be branched off of the dev branch. +new CI checks, etc. This branch should be branched off of the master branch. + #### `docs` -This branch is for updating the documentation. This branch should be branched off of the dev branch. + +This branch is for updating the documentation. This branch should be branched off of the master branch. This is used for stuff that doesn't actually modify the code, but the documentation. ### Project Layout + ```text +---.etc | Non-code files +---.github | GitHub specific files +---assets | Assets for the Readme ++---scripts | Scripts for the project, usually python or bash +---src | Source code | +---bin | The main binary that stitches everything together | +---lib | The libraries that provide the business logic @@ -57,45 +76,74 @@ This is used for stuff that doesn't actually modify the code, but the documentat | | \---world | Code for interacting with the world | \---tests | Unit tests ``` + If you add a new directory, please add it to the above list along with its purpose. ### Code rules + 1. Tests that only generate/dump data must be `#[ignore]`d. These tests are not useful for CI and should not be run. 2. No absolute paths. This will break the CI and make it harder to run the code on different machines. -3. Try to avoid just chaining `../` to get to the root of the project. This makes it harder to move files around and work -out where a referenced file is. There is a `root!()` macro that can be used to get the root of the project as a string. +3. Try to avoid just chaining `../` to get to the root of the project. This makes it harder to move files around and + work + out where a referenced file is. There is a `get_root_path()` function that can be used to get the root of the project + as a + PathBuf. 4. Don't be lazy and use `unwrap()`. If you are sure that a value will always be `Some`, use `expect()`. If you are not -sure, use `match` or `if let`. Please also have a more detailed `error!()` message if you are using `expect()`. -5. Avoid `.clone()`ing data. If you need to clone data, make sure that it is necessary and that the data is not too large. -Cloning is ok however in sections of code that only need to run once and small performance hits are acceptable (eg, loading -config files, starting up the database). + sure, use `match` or `if let`. Please also have a more detailed `error!()` message if you are using `expect()`. +5. Avoid `.clone()`ing data. If you need to clone data, make sure that it is necessary and that the data is not too + large. + Cloning is ok however in sections of code that only need to run once and small performance hits are acceptable (eg, + loading config files, starting up the database). 6. New dependencies should be added to the workspace `Cargo.toml` file. This will make it easier to manage dependencies -and will make sure that all dependencies are of the same version. + and will make sure that all dependencies are of the same version. 7. If you are adding a new feature that warrants major separation, add it as a new crate and then include it in the -workspace `Cargo.toml` file. This will make it easier to manage the code and will make sure that the code is well -separated. + workspace `Cargo.toml` file. This will make it easier to manage the code and will make sure that the code is well + separated. 8. If you are adding an extra sub-crate, you must create a new set of `thiserror` based error types for that crate. This -will make it easier to understand where an error is coming from and will make it easier to handle errors. + will make it easier to understand where an error is coming from and will make it easier to handle errors. 9. Use `cargo clippy` to check for any issues with the code. This will be checked in CI and will cause the build to fail -if there are any issues. There is no excuse for *your* code to fail the lints. -10. Use `#[expect(lint)]` instead of `#[allow(lint)]` if you are sure that the lint is not an issue. This will make it -easier to find and remove these lints in the future. -11. Use `#[cfg(test)]` to only include code in tests. This will make the code easier to read and understand. -12. Where applicable, add doc strings to functions and modules. This will make it easier for others to understand the code. -Check https://doc.rust-lang.org/nightly/rustdoc/how-to-write-documentation.html for more information on how to write good -documentation. -13. Unsafe code is ok as long as it is well documented and the reason for the unsafe code is explained. If you are not sure -if the code is safe, ask in the Discord. -14. Limit the use of raw instructions as much as possible. This will make the code easier to read and understand. There -are some cases where raw instructions are needed, but these should be kept to a minimum. -15. You will be asked to fix your PR if folders like `.vscode` or `.idea` are included in the PR. These folders are -specific to your IDE and should not be included in the PR. -16. If you are adding a new feature, make sure to add tests for it. This will make sure that the feature works as expected -and will help prevent regressions in the future. -17. If you are fixing a bug, make sure to add a test that reproduces the bug. This will make sure that the bug is fixed -and will help prevent regressions in the future. -18. If your code isn't sufficiently documented, you will be asked to add documentation. -19. If your code doesn't have tests where it should, you will be asked to add tests. + if there are any issues. There is no excuse for *your* code to fail the lints. +10. Use `cargo fmt` to format the code. This will be checked in CI and will cause the build to fail if the code is not + formatted correctly. There is no excuse for *your* code to fail the formatting. +11. Use `#[expect(lint)]` instead of `#[allow(lint)]` if you are sure that the lint is not an issue. This will make it + easier to find and remove these lints in the future. +12. Use `#[cfg(test)]` to only include code in tests. This will make the code easier to read and understand. +13. Where applicable, add doc strings to functions and modules. This will make it easier for others to understand the + code. + Check https://doc.rust-lang.org/nightly/rustdoc/how-to-write-documentation.html for more information on how to write + good documentation. +14. Unsafe code is ok as long as it is well documented and the reason for the unsafe code is explained. If you are not + sure if the code is safe, ask in the Discord. +15. Limit the use of raw instructions as much as possible. This will make the code easier to read and understand. There + are some cases where raw instructions are needed, but these should be kept to a minimum. +16. You will be asked to fix your PR if folders like `.vscode` or `.idea` are included in the PR. These folders are + specific to your IDE and should not be included in the PR. +17. If you are adding a new feature, make sure to add tests for it. This will make sure that the feature works as + expected and will help prevent regressions in the future. +18. If you are fixing a bug, make sure to add a test that reproduces the bug. This will make sure that the bug is fixed + and will help prevent regressions in the future. +19. If your code isn't sufficiently documented, you will be asked to add documentation. +20. If your code doesn't have tests where it should, you will be asked to add tests. + +## Notes on formatting + +Some IDEs have an automatic formatter that will format the code when you save. It is recommended to use this feature to +keep the code formatted correctly. +
If you are using VSCode, you can use the `rust-analyzer` extension to format the +code +automatically. This [StackOverflow answer](https://stackoverflow.com/a/67861602/15894829) explains how to set this +up.
+If you are using a JetBrains IDE (Intellij, RustRover, CLion, etc.), you can use the `Rust` plugin to format the code +automatically (This plugin is not required for RustRover). +This [Docs page](https://www.jetbrains.com/help/idea/reformat-and-rearrange-code.html#reformat-on-save) +explains how to set this up. Clippy formatting on the fly is recommended as well, though this can cause a noticeable +performance hit. + +Automatic formatting is highly recommended as it will ensure that the code you write is correctly formatted as you go, +instead of running `cargo clippy` when you are done and having 400 clippy errors to fix at once. You should still run +the clippy and fmt commands before submitting a PR to make sure that the code is correctly formatted and passes the +lints, +but automatic formatting will help to catch most of these issues as you go. ## Code of Conduct diff --git a/Cargo.toml b/Cargo.toml index e130eb0f..66091ef7 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -13,11 +13,13 @@ resolver = "2" members = [ "src/bin", "src/lib/adapters/anvil", - "src/lib/adapters/mca", + "src/lib/adapters/anvil", + "src/lib/adapters/nbt", "src/lib/adapters/nbt", "src/lib/core", "src/lib/core/state", "src/lib/derive_macros", + "src/lib/derive_macros", "src/lib/ecs", "src/lib/events", "src/lib/net", @@ -25,15 +27,12 @@ members = [ "src/lib/net/crates/encryption", "src/lib/plugins", "src/lib/storage", + "src/lib/text", "src/lib/utils", "src/lib/utils/general_purpose", "src/lib/utils/logging", "src/lib/utils/profiling", "src/lib/world", - "src/lib/derive_macros", - "src/lib/adapters/nbt", "src/lib/adapters/mca", - "src/tests", "src/lib/adapters/anvil", - "src/lib/text", ] #================== Lints ==================# @@ -71,29 +70,38 @@ debug-assertions = false overflow-checks = false panic = "abort" +[profile.hyper] +inherits = "release" +lto = true +opt-level = 3 +debug = false +debug-assertions = false +overflow-checks = false +panic = "abort" +codegen-units = 1 + #=============== Dependencies ==============# [workspace.dependencies] # Workspace members +ferrumc-anvil = { path = "src/lib/adapters/anvil" } +ferrumc-config = { path = "src/lib/utils/config" } ferrumc-core = { path = "src/lib/core" } ferrumc-ecs = { path = "src/lib/ecs" } ferrumc-events = { path = "src/lib/events" } +ferrumc-general-purpose = { path = "src/lib/utils/general_purpose" } +ferrumc-logging = { path = "src/lib/utils/logging" } +ferrumc-macros = { path = "src/lib/derive_macros" } +ferrumc-nbt = { path = "src/lib/adapters/nbt" } ferrumc-net = { path = "src/lib/net" } -ferrumc-text = { path = "src/lib/text" } -ferrumc-net-encryption = { path = "src/lib/net/crates/encryption" } ferrumc-net-codec = { path = "src/lib/net/crates/codec" } +ferrumc-net-encryption = { path = "src/lib/net/crates/encryption" } ferrumc-plugins = { path = "src/lib/plugins" } +ferrumc-profiling = { path = "src/lib/utils/profiling" } +ferrumc-state = { path = "src/lib/core/state" } ferrumc-storage = { path = "src/lib/storage" } +ferrumc-text = { path = "src/lib/text" } ferrumc-utils = { path = "src/lib/utils" } -ferrumc-profiling = { path = "src/lib/utils/profiling" } -ferrumc-logging = { path = "src/lib/utils/logging" } -ferrumc-config = { path = "src/lib/utils/config" } -ferrumc-general-purpose = { path = "src/lib/utils/general_purpose" } -ferrumc-macros = { path = "src/lib/derive_macros" } ferrumc-world = { path = "src/lib/world" } -ferrumc-nbt = { path = "src/lib/adapters/nbt" } -ferrumc-anvil = { path = "src/lib/adapters/anvil" } -ferrumc-tests = { path = "src/tests" } -ferrumc-state = { path = "src/lib/core/state"} # Asynchronous @@ -110,15 +118,13 @@ log = "0.4.22" # Concurrency/Parallelism parking_lot = "0.12.3" rayon = "1.10.0" -crossbeam = "0.8.4" # Network # Error handling -thiserror = "1.0.63" +thiserror = "2.0.3" # Cryptography -rsa = "0.9.6" rand = "0.9.0-beta.0" fnv = "1.0.7" @@ -176,12 +182,4 @@ memmap2 = "0.9.5" # Benchmarking criterion = { version = "0.5.1", features = ["html_reports"] } -[profile.hyper] -inherits = "release" -lto = true -opt-level = 3 -debug = false -debug-assertions = false -overflow-checks = false -panic = "abort" -codegen-units = 1 + diff --git a/README.md b/README.md index 8abe991a..62446783 100644 --- a/README.md +++ b/README.md @@ -1,90 +1,243 @@ -# Ferrumc Rewrite [v3] - -> [!WARNING] -> FerrumC is going through a major rewrite **for 1.21.1**. This branch contains all of the new code.\ -> To view the original code, go to the [dev branch](https://github.com/ferrumc-rs/ferrumc/tree/dev). - -# Roadmap -To view the roadmap, see [plans.md](assets/plans/plans.md) - -> [!IMPORTANT] -> Use pull requests instead of direct pushes. - -## Contributing -**Want to contribute to FerrumC?**\ -Make sure to check out [CONTRIBUTING.md](CONTRIBUTING.md).\ -We would highly recommend you join our [Discord](https://discord.gg/FqT5J8EMjwk). - -## 📥 Installation/ 🖥️ Usage -### Use docker - -This method comes with a default world and might be easier assuming you arent doing development and dont have cargo already installed. Just run the following command -```bash -docker run -d -p 25565:25565 -v ferrumc/ferrumc-example:latest -``` - - -// TODO: Throw the images in dockerhub under the ferrumc username. - -### Build from Source . +FerrumC Header +
+ License + Code Size + Lines of Code + Language + + Discord + +
+ +
+ About + • + Features + • + Getting Started + • + Development + • + License + • + FAQ +
+ +## 📖 About + +FerrumC is a **1.21.1** Minecraft server implementation written from the ground up in Rust. Leveraging the power of the +Rust +programming language, it is completely multithreaded and offers high performance as well as amazing memory efficiency! + +[Docs](https://docs.ferrumc.com) are currently under construction, but you can join +our [Discord server](https://discord.gg/qT5J8EMjwk) for help or to discuss the project! + +In-game screenshot + +## ✨ Key Features + + + +

✅ Upcoming features

+ + + +## 🚀 Getting Started + +### Prerequisites + +- Rust compiler (latest nightly version) +- Cargo (comes with Rust) + +## 📥 Installation + +[//]: # (#### Option 1: Download pre-compiled binary (Maybe outdated!)) + +[//]: # () + +[//]: # (1. Go to the [Releases](https://github.com/ferrumc-rs/ferrumc/releases) page) + +[//]: # (2. Download the latest version for your operating system) + +[//]: # (3. Extract the archive to your desired location) + +Unfortunately, the server is **not yet ready for production use**. We are still in the early +stages of development and are working hard to add more features and fix bugs. +For now, you can either **compile** the server from source or **download** from Github Actions. + +### [Option 1] Download from Github Actions + +![Where To Find](https://github.com/ferrumc-rs/ferrumc/assets/README/download_prebuilt.gif?raw=true) + +1. Go to the [Actions](https://github.com/ferrumc-rs/ferrumc/actions) tab +2. Click on the latest build +3. Scroll all the way down to the `Artifacts` section +4. Download the artifact for your operating system (Windows, Linux, or macOS) +5. Follow the instructions in the `Usage` section + +### [Option 2] Compile from source + +##### Clone and build the project. ```bash # Clone the repository -git clone https://github.com/Sweattypalms/ferrumc +git clone https://github.com/ferrumc-rs/ferrumc cd ferrumc # Build the project cargo build --release ``` -##### The binary will be in target/release/ +#### The binary will be in target/release/ -Then +## 🖥️ Usage 1. Move the FerrumC binary (`ferrumc.exe` or `ferrumc` depending on the OS) to your desired server directory 2. Open a terminal in that directory 3. (Optional) Generate a config file: `./ferrumc --setup` - Edit the generated `config.toml` file to customize your server settings -4. Import an existing world: Place the region files (`.mca`) in the folder named `import` then run - `./ferrumc --import`. - - The location of these files is explained [here](https://minecraft.wiki/w/Region_file_format#Location). - - If you want to modify batch size (default 150), you can use `./ferrumc --import --batch_size=`. - - Basically the number of chunks to import at once, higher => faster but more CPU intensive. - - Max is 1024, since that's the max number of chunks in a region(`.mca`) file. +4. Import an existing world: Either copy your world files to the server directory or specify the path to the world files + in the `config.toml` file. This should be the root directory of your world files, containing the `region` directory + as well as other folders like DIM1, playerdata, etc. The default import path is `import` so you should end up with a + directory structure like this: + ``` + server_directory + ├── config.toml + ├── ferrumc.exe + ├── import + │ ├── region + │ │ ├── r.0.0.mca + │ │ ├── r.0.1.mca + │ │ ├── ... + │ ├── DIM1 + │ ├── playerdata + │ ├── ... + ``` + - The location of these files is explained [here](https://minecraft.wiki/w/Region_file_format#Location). 5. Run the server: - Windows: `.\ferrumc.exe` - Linux/macOS: `./ferrumc` - You can change logging level by using `--log=`: - - e.g. `.\ferrumc.exe --log=info` for info level logging - - Possible values: - - `trace` (Extremely verbose) - - `debug` (Default, Slightly verbose, used for debugging) - - `info` (**Recommended**, useful information) - - `warn` (Only warnings) - - `error` (Only errors) + - e.g. `.\ferrumc.exe --log=info` for info level logging + - Possible values: + - `trace` (Extremely verbose) + - `debug` (Default, Slightly verbose, used for debugging) + - `info` (**Recommended**, useful information) + - `warn` (Only warnings) + - `error` (Only errors) + +## 🛠️ Development + +We welcome contributions! If you'd like to contribute to FerrumC, please follow these steps: + +1. Fork the repository +2. Create a new branch for your feature +3. Implement your changes +4. Write or update tests as necessary +5. Submit a pull request + +*Please* join our [Discord server](https://discord.gg/qT5J8EMjwk) to get help or discuss the project! +Also have a look at our [CONTRIBUTING.md](CONTRIBUTING.md) file for more information. + +## ❔ FAQ -*Note: You can specify the directory to treat as the root directory (the place where the config files, data files, -etc. live) by setting an environment variable `FERRUMC_ROOT` to the path of the directory. For example, I run -`set FERRUMC_ROOT=C:\Users\ReCor\Documents\Code\Rust\ferrumc` before running the server. This is useful if you -can't move the place the binary is executed from (`cargo run` for example).* +### How does this project differ from: ---- +- **Valence**: Valence is a framework for building your own custom server by pulling in different components of their + library. FerrumC aims to be a full replacement for the vanilla server. It's like + the difference between buying the ingredients to make a meal yourself or just buying a pre-made meal. +- **Minestom**: Same as Valence, it's a framework to build your own server, which is different to what we are trying to + do. +- **Paper/Spigot/Bukkit**: These are all great tools and have undoubtedly set the groundwork for projects like this to + exist, but ultimately they are still somewhat bound to the original server implementation. We aim to write the entire + server from the ground up, hopefully giving us a leg up. +- **Pumpkin**: It really doesn't differ that much. We are both trying to achieve the same thing. It's also not a + competition, we are both aware of each other's progress and to be honest the Pumpkin team are doing really well. We + won't tolerate any disrespect towards them as they are also undertaking the same monumental task. -## Nightly Dev Server +### Will we be implementing terrain generation? -There is a nightly deployed version of rewrite/v3 running at `ferrumc.nimq.xyz`. +Yes! Not currently on our list of priorities and it's very unlikely that we will be able to have 1:1 terrain generation +with the vanilla server, but we do plan on implementing some sort of terrain generation as soon as we can. -## Docs +### Will there be plugins? And how? -Documentation for rewrite/v3 can be found at: +We do very much plan to have a plugin system and as of right now, our plan is to leverage the +JVM to allow for plugins to be written in Kotlin, Java, or any other JVM language. We are also considering other +languages +such as Rust, JavaScript and possibly other native languages, but that is a fair way off for now. -- Unsecure https://docs.ferrumc.com/ -- HTTPS https://docs.nimq.xyz/ +### What does 'FerrumC' mean? -## Funding / Donations +It's a play on words. Ferrum is the Latin word for iron and it ***rust***s. And MC (Minecraft) in the end. +So it becomes Ferru*mc*. Get it? 😄 -If you would like to donate to the development of FerrumC, you can do so via the following methods: +## 📜 License -- OpenCollective: https://opencollective.com/ferrumc +This project is licensed under the MIT License - see the [LICENSE.md](LICENSE) file for details. +## 🌟 Star History + + + + + Star History Chart + + \ No newline at end of file diff --git a/assets/README/ECSBlockDiagram.png b/assets/README/ECSBlockDiagram.png new file mode 100644 index 00000000..4d36afa1 Binary files /dev/null and b/assets/README/ECSBlockDiagram.png differ diff --git a/assets/README/chunk_importing.gif b/assets/README/chunk_importing.gif new file mode 100644 index 00000000..0fc45c82 Binary files /dev/null and b/assets/README/chunk_importing.gif differ diff --git a/assets/README/chunk_loading.gif b/assets/README/chunk_loading.gif new file mode 100644 index 00000000..b664e420 Binary files /dev/null and b/assets/README/chunk_loading.gif differ diff --git a/assets/README/config.toml.png b/assets/README/config.toml.png new file mode 100644 index 00000000..3ce58ebe Binary files /dev/null and b/assets/README/config.toml.png differ diff --git a/assets/README/configuration.png b/assets/README/configuration.png new file mode 100644 index 00000000..b8289d81 Binary files /dev/null and b/assets/README/configuration.png differ diff --git a/assets/README/download_prebuilt.gif b/assets/README/download_prebuilt.gif new file mode 100644 index 00000000..31261632 Binary files /dev/null and b/assets/README/download_prebuilt.gif differ diff --git a/assets/README/header.svg b/assets/README/header.svg new file mode 100644 index 00000000..98bd9343 --- /dev/null +++ b/assets/README/header.svg @@ -0,0 +1,93 @@ + + + + + + + + +
+ + +
+
+
+
+

FerrumC

+

A high-performance Minecraft server implementation, crafted in Rust for unparalleled speed and efficiency

+
+
+
+ + + + +
+ + \ No newline at end of file diff --git a/assets/README/in_game.png b/assets/README/in_game.png new file mode 100644 index 00000000..6fcbda20 Binary files /dev/null and b/assets/README/in_game.png differ diff --git a/assets/README/mem_use.png b/assets/README/mem_use.png new file mode 100644 index 00000000..40359339 Binary files /dev/null and b/assets/README/mem_use.png differ diff --git a/assets/README/mind boggling.gif b/assets/README/mind boggling.gif new file mode 100644 index 00000000..8202fb5d Binary files /dev/null and b/assets/README/mind boggling.gif differ diff --git a/assets/README/server list.png b/assets/README/server list.png new file mode 100644 index 00000000..93f37d7a Binary files /dev/null and b/assets/README/server list.png differ diff --git a/rust-toolchain.toml b/rust-toolchain.toml index 5d56faf9..8e275b74 100644 --- a/rust-toolchain.toml +++ b/rust-toolchain.toml @@ -1,2 +1,3 @@ [toolchain] channel = "nightly" +components = ["rustfmt", "clippy"] diff --git a/src/bin/src/errors.rs b/src/bin/src/errors.rs index 2f0f6aa6..c44ba2d1 100644 --- a/src/bin/src/errors.rs +++ b/src/bin/src/errors.rs @@ -37,12 +37,12 @@ pub enum BinaryError { #[allow(dead_code)] #[error("{0}")] Custom(String), - + #[error("Tokio Join Error")] TokioJoinError(#[from] tokio::task::JoinError), #[error("IO error: {0}")] Io(#[from] std::io::Error), - + #[error("Root Path error: {0}")] RootPath(#[from] ferrumc_general_purpose::paths::RootPathError), } diff --git a/src/bin/src/main.rs b/src/bin/src/main.rs index 5f083103..62fd09b4 100644 --- a/src/bin/src/main.rs +++ b/src/bin/src/main.rs @@ -21,6 +21,8 @@ struct CLIArgs { #[clap(long)] #[arg(value_enum, default_value_t = LogLevel(Level::TRACE))] log: LogLevel, + #[clap(long)] + setup: bool, } // Wrapper struct for the Level enum @@ -82,6 +84,18 @@ async fn entry(cli_args: CLIArgs) -> Result<()> { return Ok(()); } + if cli_args.setup { + return if let Err(e) = ferrumc_config::setup::setup() { + error!("Could not set up the server: {}", e.to_string()); + Err(BinaryError::Custom( + "Could not set up the server.".to_string(), + )) + } else { + info!("Server setup complete."); + Ok(()) + }; + } + let state = create_state().await?; let global_state = Arc::new(state); diff --git a/src/bin/src/packet_handlers/handshake.rs b/src/bin/src/packet_handlers/handshake.rs index a58d6af6..13fb967a 100644 --- a/src/bin/src/packet_handlers/handshake.rs +++ b/src/bin/src/packet_handlers/handshake.rs @@ -1,12 +1,12 @@ +use ferrumc_ecs::errors::ECSError; use ferrumc_macros::event_handler; use ferrumc_net::connection::ConnectionState; -use ferrumc_net::errors::NetError::{Packet}; +use ferrumc_net::errors::NetError::Packet; use ferrumc_net::errors::{NetError, PacketError}; use ferrumc_net::packets::incoming::handshake::HandshakeEvent; +use ferrumc_net::utils::ecs_helpers::EntityExt; use ferrumc_state::GlobalState; use tracing::{error, trace}; -use ferrumc_ecs::errors::ECSError; -use ferrumc_net::utils::ecs_helpers::EntityExt; #[event_handler] async fn handle_handshake( @@ -18,8 +18,7 @@ async fn handle_handshake( // set connection state to handshake let entity = handshake_event.conn_id; - let Ok(mut connection_state) = entity - .get_mut::(&state) else { + let Ok(mut connection_state) = entity.get_mut::(&state) else { error!("Failed to get connection state"); return Err(NetError::ECSError(ECSError::ComponentNotFound)); }; diff --git a/src/bin/src/packet_handlers/login_process.rs b/src/bin/src/packet_handlers/login_process.rs index d96235b2..c26609aa 100644 --- a/src/bin/src/packet_handlers/login_process.rs +++ b/src/bin/src/packet_handlers/login_process.rs @@ -1,4 +1,7 @@ use ferrumc_core::identity::player_identity::PlayerIdentity; +use ferrumc_core::transform::grounded::OnGround; +use ferrumc_core::transform::position::Position; +use ferrumc_core::transform::rotation::Rotation; use ferrumc_ecs::components::storage::ComponentRefMut; use ferrumc_macros::event_handler; use ferrumc_net::connection::{ConnectionState, StreamWriter}; @@ -14,17 +17,14 @@ use ferrumc_net::packets::outgoing::game_event::GameEventPacket; use ferrumc_net::packets::outgoing::keep_alive::OutgoingKeepAlivePacket; use ferrumc_net::packets::outgoing::login_play::LoginPlayPacket; use ferrumc_net::packets::outgoing::login_success::LoginSuccessPacket; -use ferrumc_net::packets::outgoing::set_center_chunk::SetCenterChunk; -use ferrumc_net::packets::outgoing::set_render_distance::SetRenderDistance; use ferrumc_net::packets::outgoing::registry_data::get_registry_packets; +use ferrumc_net::packets::outgoing::set_center_chunk::SetCenterChunk; use ferrumc_net::packets::outgoing::set_default_spawn_position::SetDefaultSpawnPositionPacket; +use ferrumc_net::packets::outgoing::set_render_distance::SetRenderDistance; use ferrumc_net::packets::outgoing::synchronize_player_position::SynchronizePlayerPositionPacket; -use ferrumc_state::GlobalState; use ferrumc_net_codec::encode::NetEncodeOpts; +use ferrumc_state::GlobalState; use tracing::{debug, trace}; -use ferrumc_core::transform::grounded::OnGround; -use ferrumc_core::transform::position::Position; -use ferrumc_core::transform::rotation::Rotation; #[event_handler] async fn handle_login_start( @@ -124,14 +124,14 @@ async fn handle_ack_finish_configuration( let mut conn_state = state.universe.get_mut::(conn_id)?; *conn_state = ConnectionState::Play; - + // add components to the entity after the connection state has been set to play. // to avoid wasting resources on entities that are fetching stuff like server status etc. - state.universe + state + .universe .add_component::(conn_id, Position::default())? .add_component::(conn_id, Rotation::default())? .add_component::(conn_id, OnGround::default())?; - let mut writer = state.universe.get_mut::(conn_id)?; diff --git a/src/bin/src/packet_handlers/mod.rs b/src/bin/src/packet_handlers/mod.rs index 5e515d7c..32983a54 100644 --- a/src/bin/src/packet_handlers/mod.rs +++ b/src/bin/src/packet_handlers/mod.rs @@ -1,4 +1,4 @@ mod handshake; mod login_process; +mod tick_handler; mod transform; -mod tick_handler; \ No newline at end of file diff --git a/src/bin/src/systems/definition.rs b/src/bin/src/systems/definition.rs index e54fe459..9873becb 100644 --- a/src/bin/src/systems/definition.rs +++ b/src/bin/src/systems/definition.rs @@ -1,13 +1,13 @@ +use crate::systems::chunk_sender::ChunkSenderSystem; use crate::systems::keep_alive_system::KeepAliveSystem; use crate::systems::tcp_listener_system::TcpListenerSystem; use crate::systems::ticking_system::TickingSystem; use async_trait::async_trait; -use ferrumc_net::{NetResult}; +use ferrumc_net::NetResult; use ferrumc_state::GlobalState; use futures::stream::FuturesUnordered; use std::sync::{Arc, LazyLock}; use tracing::{debug, debug_span, info, Instrument}; -use crate::systems::chunk_sender::ChunkSenderSystem; #[async_trait] pub trait System: Send + Sync { @@ -17,15 +17,13 @@ pub trait System: Send + Sync { fn name(&self) -> &'static str; } -static SYSTEMS: LazyLock>> = LazyLock::new(|| { - create_systems() -}); +static SYSTEMS: LazyLock>> = LazyLock::new(create_systems); pub fn create_systems() -> Vec> { vec![ Arc::new(TcpListenerSystem), Arc::new(KeepAliveSystem::new()), Arc::new(TickingSystem), - Arc::new(ChunkSenderSystem::new()) + Arc::new(ChunkSenderSystem::new()), ] } pub async fn start_all_systems(state: GlobalState) -> NetResult<()> { diff --git a/src/bin/src/systems/mod.rs b/src/bin/src/systems/mod.rs index dda72719..06228141 100644 --- a/src/bin/src/systems/mod.rs +++ b/src/bin/src/systems/mod.rs @@ -1,6 +1,6 @@ pub(crate) mod definition; -mod tcp_listener_system; +mod chunk_sender; mod keep_alive_system; +mod tcp_listener_system; mod ticking_system; -mod chunk_sender; \ No newline at end of file diff --git a/src/bin/src/systems/tcp_listener_system.rs b/src/bin/src/systems/tcp_listener_system.rs index 8898f7a8..d230d5f5 100644 --- a/src/bin/src/systems/tcp_listener_system.rs +++ b/src/bin/src/systems/tcp_listener_system.rs @@ -1,10 +1,10 @@ -use std::sync::Arc; +use crate::systems::definition::System; +use crate::Result; use async_trait::async_trait; -use tracing::{debug, error, info, info_span, Instrument}; use ferrumc_net::connection::handle_connection; use ferrumc_state::GlobalState; -use crate::systems::definition::System; -use crate::Result; +use std::sync::Arc; +use tracing::{debug, error, info, info_span, Instrument}; pub struct TcpListenerSystem; @@ -29,18 +29,18 @@ impl TcpListenerSystem { async fn initiate_loop(state: GlobalState) -> Result<()> { let tcp_listener = &state.tcp_listener; info!("Server is listening on [{}]", tcp_listener.local_addr()?); - + loop { debug!("Accepting connection"); let (stream, _) = tcp_listener.accept().await?; let addy = stream.peer_addr()?; tokio::task::spawn( handle_connection(Arc::clone(&state), stream) - .instrument(info_span!("conn", %addy).or_current()) + .instrument(info_span!("conn", %addy).or_current()), ); } - + #[allow(unreachable_code)] Ok(()) } -} \ No newline at end of file +} diff --git a/src/lib/README.MD b/src/lib/README.MD deleted file mode 100644 index 786e01eb..00000000 --- a/src/lib/README.MD +++ /dev/null @@ -1,48 +0,0 @@ -# FerrumC source code - -Welcome to FerrumC project source code. - -The FerrumC project source code is organized as follows: - -## Code source layout - -FerrumC will define the following primary crates inside its source code: -- `ferrumc-core` -- `ferrumc-ecs` -- `ferrumc-events` -- `ferrumc-net` -- `ferrumc-storage` -- `ferrumc-utils` -- `ferrumc-world` -- `ferrumc-plugins` - -Each crate may contain subcrates within its directory to implement essential components. For example: -```md -- core/ # ferrumc-core -- net/ # ferrumc-net - - encode # ferrumc-net-encode -``` - -## Dependencies - -Dependencies are defined in the workspace Cargo.toml file and imported into each crate using the following syntax: -```toml -crate = { workspace = true } -``` - -Workspace members dependencies are defined as follows: -```toml -member = { workspace = true } -``` - -## Clippy - -FerrumC enforce the usage of clippy and this source code should not contains any clippy warning. - -## Components - -Any system within FerrumC is built with the ability to be shutdown at some point in time. - -## Tests - -Tests are located within their respective crates and do not have a separate tests/ folder. diff --git a/src/lib/adapters/anvil/benches/anvil.rs b/src/lib/adapters/anvil/benches/anvil.rs index 2f9d41b7..ee591afa 100644 --- a/src/lib/adapters/anvil/benches/anvil.rs +++ b/src/lib/adapters/anvil/benches/anvil.rs @@ -1,14 +1,14 @@ -use std::fs::File; -use std::path::PathBuf; use criterion::{black_box, criterion_group, criterion_main, Criterion}; -use ferrumc_utils::root; -use rayon::prelude::*; use fastanvil::Region; use ferrumc_anvil::load_anvil_file; +use ferrumc_utils::root; +use rayon::prelude::*; +use std::fs::File; +use std::path::PathBuf; fn criterion_benchmark(c: &mut Criterion) { let mut read_all_group = c.benchmark_group("Read All"); - + read_all_group.bench_function("FerrumC Rayon", |b| { b.iter(|| { let file_path = PathBuf::from(root!(".etc/r.0.0.mca")); @@ -21,7 +21,7 @@ fn criterion_benchmark(c: &mut Criterion) { }); }); }); - + read_all_group.bench_function("FerrumC", |b| { b.iter(|| { let file_path = PathBuf::from(root!(".etc/r.0.0.mca")); @@ -32,7 +32,7 @@ fn criterion_benchmark(c: &mut Criterion) { }); }); }); - + read_all_group.bench_function("FastAnvil", |b| { b.iter(|| { let file = File::open(root!(".etc/r.0.0.mca")).unwrap(); @@ -42,11 +42,11 @@ fn criterion_benchmark(c: &mut Criterion) { }); }); }); - + read_all_group.finish(); - + let mut read_one_group = c.benchmark_group("Read One"); - + read_one_group.bench_function("FerrumC", |b| { b.iter(|| { let file_path = PathBuf::from(root!(".etc/r.0.0.mca")); @@ -54,7 +54,7 @@ fn criterion_benchmark(c: &mut Criterion) { black_box(loaded_file.get_chunk(0, 0)); }); }); - + read_one_group.bench_function("FastAnvil", |b| { b.iter(|| { let file = File::open(root!(".etc/r.0.0.mca")).unwrap(); @@ -62,11 +62,9 @@ fn criterion_benchmark(c: &mut Criterion) { black_box(region.read_chunk(0, 0).unwrap()); }); }); - + read_one_group.finish(); } - - criterion_group!(benches, criterion_benchmark); -criterion_main!(benches); \ No newline at end of file +criterion_main!(benches); diff --git a/src/lib/adapters/anvil/src/lib.rs b/src/lib/adapters/anvil/src/lib.rs index f0cc6a65..b91dd929 100644 --- a/src/lib/adapters/anvil/src/lib.rs +++ b/src/lib/adapters/anvil/src/lib.rs @@ -1,11 +1,11 @@ pub mod errors; +use crate::errors::AnvilError; +use memmap2::Mmap; use std::io::Read; use std::path::PathBuf; -use memmap2::Mmap; use tracing::error; use yazi::Adler32; -use crate::errors::AnvilError; pub struct LoadedAnvilFile { pub table: [u8; 4096], @@ -52,7 +52,6 @@ pub fn get_chunk(x: u32, z: u32, file_path: PathBuf) -> Option> { /// ``` #[allow(unsafe_code)] pub fn load_anvil_file(file_path: PathBuf) -> Result { - // Check if the file exists if !file_path.exists() { return Err(AnvilError::FileNotFound(file_path)); @@ -70,13 +69,10 @@ pub fn load_anvil_file(file_path: PathBuf) -> Result Result { let out = yazi::decompress(chunk_compressed_data, yazi::Format::Zlib).ok(); match out { - Some(data) => { - match data.1 { - Some(checksum) => { - if Adler32::from_buf(&data.0).finish() == checksum { - Some(data.0) - } else { - error!("Checksum does not match"); - None - } - } - None => { - error!("Failed to decompress Zlib data (No checksum)"); + Some(data) => match data.1 { + Some(checksum) => { + if Adler32::from_buf(&data.0).finish() == checksum { + Some(data.0) + } else { + error!("Checksum does not match"); None } } - } + None => { + error!("Failed to decompress Zlib data (No checksum)"); + None + } + }, None => { error!("Failed to decompress Zlib data"); None @@ -222,20 +214,20 @@ impl LoadedAnvilFile { u32::from(self.table[base_index + 2]), u32::from(self.table[base_index + 3]), ]; - let location = (chunk_data[0] << 24) | (chunk_data[1] << 16) | (chunk_data[2] << 8) | chunk_data[3]; + let location = + (chunk_data[0] << 24) | (chunk_data[1] << 16) | (chunk_data[2] << 8) | chunk_data[3]; self.get_chunk_from_location(location) } } - #[cfg(test)] mod tests { - use std::fs::File; - use std::io::Read; - use fastanvil::Region; use super::*; + use fastanvil::Region; use ferrumc_utils::root; use rayon::prelude::*; + use std::fs::File; + use std::io::Read; #[test] fn test_load_anvil_file() { @@ -269,7 +261,10 @@ mod tests { let file_path = PathBuf::from(root!(".etc/r.0.0.mca")); let loaded_file = load_anvil_file(file_path).unwrap(); let chunk = loaded_file.get_chunk(0, 0); - let fast_chunk = Region::from_stream(File::open(root!(".etc/r.0.0.mca")).unwrap()).unwrap().read_chunk(0, 0).unwrap(); + let fast_chunk = Region::from_stream(File::open(root!(".etc/r.0.0.mca")).unwrap()) + .unwrap() + .read_chunk(0, 0) + .unwrap(); assert!(chunk.is_some()); assert!(fast_chunk.is_some()); assert_eq!(chunk.clone().unwrap(), fast_chunk.unwrap()); @@ -286,4 +281,4 @@ mod tests { }); }); } -} \ No newline at end of file +} diff --git a/src/lib/adapters/mca/Cargo.toml b/src/lib/adapters/mca/Cargo.toml deleted file mode 100644 index 7b5c6bf2..00000000 --- a/src/lib/adapters/mca/Cargo.toml +++ /dev/null @@ -1,10 +0,0 @@ -[package] -name = "ferrumc-mca" -version = "0.1.0" -edition = "2021" - -[dependencies] -ferrumc-nbt = { workspace = true } - -[lints] -workspace = true diff --git a/src/lib/adapters/mca/src/lib.rs b/src/lib/adapters/mca/src/lib.rs deleted file mode 100644 index 13419b4f..00000000 --- a/src/lib/adapters/mca/src/lib.rs +++ /dev/null @@ -1,39 +0,0 @@ -#[allow(unused)] -fn coming_soon() { - unimplemented!(" :) ") -} - -#[cfg(test)] -#[test] -fn test() { - let some_test_nbt: [u8; 40] = [ - 10, 0, 2, b'H', b'i', // compound: (2) "Hi" - 8, 0, 3, b'I', b'\'', b'm', 0, 7, b'f', b'e', b'r', b'r', b'u', b'm', - b'c', // string: (3) "I'm" (7) "ferrumc" - 10, 0, 3, b'Y', b'o', b'u', // compound: (3) "You" - 8, 0, 3, b'I', b'\'', b'm', 0, 4, b'c', b'o', b'o', - b'l', // string: (3) "I'm" (4) "cool" - 0, 0, // End tag - ]; - - let mut nbt = ferrumc_nbt::de::borrow::NbtTape::new(&some_test_nbt); - nbt.parse(); - - println!("{:?}", nbt.root); - - /*let mut nbt = ferrumc_nbt::NbtParser::new(&some_test_nbt); - let tapes = nbt.parse().unwrap(); - - let viewer = ferrumc_nbt::NbtTokenView::new(tapes, 0); - - let compound = viewer.as_compound().expect("Expected a compound"); - - let hi = compound.get("I'm").expect("Expected a key named 'Hi'"); - - let value = hi.value().unwrap(); - if let ferrumc_nbt::NbtToken::String(value) = value { - assert_eq!(*value, "ferrumc") - } else { - panic!("Expected a string") - }*/ -} diff --git a/src/lib/adapters/nbt/src/de/borrow.rs b/src/lib/adapters/nbt/src/de/borrow.rs index 03ecb8c3..aba3f798 100644 --- a/src/lib/adapters/nbt/src/de/borrow.rs +++ b/src/lib/adapters/nbt/src/de/borrow.rs @@ -1,8 +1,8 @@ use crate::de::converter::FromNbt; -use ferrumc_net_codec::encode::{NetEncode, NetEncodeOpts, NetEncodeResult}; -use std::io::Write; use crate::{NBTSerializable, NBTSerializeOptions}; use ferrumc_general_purpose::simd::arrays; +use ferrumc_net_codec::encode::{NetEncode, NetEncodeOpts, NetEncodeResult}; +use std::io::Write; #[repr(u8)] #[derive(Debug, PartialEq, Clone)] @@ -620,14 +620,18 @@ impl NetEncode for NbtTape<'_> { } } - impl NbtTapeElement<'_> { - pub fn serialize_as_network(&self, tape: &mut NbtTape, writer: &mut Vec, opts: &NBTSerializeOptions) -> NetEncodeResult<()> { + pub fn serialize_as_network( + &self, + tape: &mut NbtTape, + writer: &mut Vec, + opts: &NBTSerializeOptions, + ) -> NetEncodeResult<()> { /*if let NBTSerializeOptions::WithHeader(name) = opts { writer.write_all(&[self.nbt_id()])?; name.serialize(writer, &NBTSerializeOptions::None); }*/ - + match opts { NBTSerializeOptions::None => {} NBTSerializeOptions::WithHeader(name) => { @@ -638,8 +642,7 @@ impl NbtTapeElement<'_> { writer.write_all(&[self.nbt_id()])?; } } - - + match self { NbtTapeElement::End => Ok(()), NbtTapeElement::Byte(val) => { @@ -736,18 +739,14 @@ impl NbtTapeElement<'_> { } NbtTapeElement::IntArray(data) => { (data.len() as i32).serialize(writer, &NBTSerializeOptions::None); - let data = unsafe { - std::mem::transmute::<&[i32], &[u32]>(data.as_slice()) - }; + let data = unsafe { std::mem::transmute::<&[i32], &[u32]>(data.as_slice()) }; let data = arrays::u32_slice_to_u8_be(data); writer.write_all(data.as_slice())?; Ok(()) } NbtTapeElement::LongArray(data) => { (data.len() as i32).serialize(writer, &NBTSerializeOptions::None); - let data = unsafe { - std::mem::transmute::<&[i64], &[u64]>(data.as_slice()) - }; + let data = unsafe { std::mem::transmute::<&[i64], &[u64]>(data.as_slice()) }; let data = arrays::u64_slice_to_u8_be(data); writer.write_all(data.as_slice())?; Ok(()) diff --git a/src/lib/adapters/nbt/src/de/converter.rs b/src/lib/adapters/nbt/src/de/converter.rs index efebf147..c3b6393a 100644 --- a/src/lib/adapters/nbt/src/de/converter.rs +++ b/src/lib/adapters/nbt/src/de/converter.rs @@ -99,10 +99,9 @@ mod primitives { } } - mod maps { - use std::collections::{BTreeMap, HashMap}; use crate::{FromNbt, NBTError, NbtTape, NbtTapeElement, Result}; + use std::collections::{BTreeMap, HashMap}; impl<'a, V: FromNbt<'a>> FromNbt<'a> for HashMap { fn from_nbt(tapes: &NbtTape<'a>, element: &NbtTapeElement<'a>) -> Result { @@ -146,7 +145,7 @@ mod maps { .collect() } } - + impl<'a, V: FromNbt<'a>> FromNbt<'a> for BTreeMap { fn from_nbt(tapes: &NbtTape<'a>, element: &NbtTapeElement<'a>) -> Result { let compound = element.as_compound().ok_or(NBTError::TypeMismatch { @@ -189,7 +188,7 @@ mod test_map { assert_eq!(some_hashmap, hashmap); } - + #[test] fn test_btreemap_both_ways() { let some_btreemap = maplit::btreemap! { @@ -211,4 +210,4 @@ mod test_map { assert_eq!(some_btreemap, btreemap); } -} \ No newline at end of file +} diff --git a/src/lib/adapters/nbt/src/ser/impl.rs b/src/lib/adapters/nbt/src/ser/impl.rs index e5087c93..807a7c71 100644 --- a/src/lib/adapters/nbt/src/ser/impl.rs +++ b/src/lib/adapters/nbt/src/ser/impl.rs @@ -1,6 +1,6 @@ -use std::collections::HashMap; -use ferrumc_general_purpose::simd::arrays; use super::{NBTSerializable, NBTSerializeOptions}; +use ferrumc_general_purpose::simd::arrays; +use std::collections::HashMap; use uuid::Uuid; macro_rules! impl_ser_primitives { @@ -184,7 +184,6 @@ impl NBTSerializable for Option { } } - /// Serialize multiple values to a buffer. /// Order: buf, options, values... macro_rules! ser { @@ -197,9 +196,9 @@ macro_rules! ser { } mod hashmaps { - use std::collections::BTreeMap; use super::*; use crate::ser::NBTSerializeOptions; + use std::collections::BTreeMap; impl NBTSerializable for HashMap { //! Equivalent to a COMPOUND tag in NBT. fn serialize(&self, buf: &mut Vec, options: &NBTSerializeOptions<'_>) { @@ -242,7 +241,6 @@ mod hashmaps { } } - impl NBTSerializable for BTreeMap<&str, V> { fn serialize(&self, buf: &mut Vec, options: &NBTSerializeOptions<'_>) { write_header::(buf, options); @@ -263,7 +261,7 @@ mod hashmaps { TAG_COMPOUND } } - + impl NBTSerializable for BTreeMap { fn serialize(&self, buf: &mut Vec, options: &NBTSerializeOptions<'_>) { write_header::(buf, options); diff --git a/src/lib/core/src/identity/mod.rs b/src/lib/core/src/identity/mod.rs index 25d892f9..52fc93e0 100644 --- a/src/lib/core/src/identity/mod.rs +++ b/src/lib/core/src/identity/mod.rs @@ -1 +1 @@ -pub mod player_identity; \ No newline at end of file +pub mod player_identity; diff --git a/src/lib/core/src/identity/player_identity.rs b/src/lib/core/src/identity/player_identity.rs index 3068634c..a8b33e9d 100644 --- a/src/lib/core/src/identity/player_identity.rs +++ b/src/lib/core/src/identity/player_identity.rs @@ -8,4 +8,4 @@ impl PlayerIdentity { pub fn new(username: String, uuid: u128) -> Self { Self { username, uuid } } -} \ No newline at end of file +} diff --git a/src/lib/core/src/lib.rs b/src/lib/core/src/lib.rs index 09a67c3b..dc9eeb0e 100644 --- a/src/lib/core/src/lib.rs +++ b/src/lib/core/src/lib.rs @@ -1,7 +1,6 @@ pub mod errors; - // Core structs/types. Usually used in ECS Components. -pub mod transform; pub mod identity; -pub mod state; \ No newline at end of file +pub mod state; +pub mod transform; diff --git a/src/lib/core/src/state.rs b/src/lib/core/src/state.rs index e69de29b..8b137891 100644 --- a/src/lib/core/src/state.rs +++ b/src/lib/core/src/state.rs @@ -0,0 +1 @@ + diff --git a/src/lib/core/src/transform/grounded.rs b/src/lib/core/src/transform/grounded.rs index 228d2daf..1a0f9253 100644 --- a/src/lib/core/src/transform/grounded.rs +++ b/src/lib/core/src/transform/grounded.rs @@ -1,2 +1,2 @@ #[derive(Debug, Default)] -pub struct OnGround(pub bool); \ No newline at end of file +pub struct OnGround(pub bool); diff --git a/src/lib/core/src/transform/mod.rs b/src/lib/core/src/transform/mod.rs index 6407fea2..effd8b99 100644 --- a/src/lib/core/src/transform/mod.rs +++ b/src/lib/core/src/transform/mod.rs @@ -1,4 +1,3 @@ - +pub mod grounded; pub mod position; pub mod rotation; -pub mod grounded; \ No newline at end of file diff --git a/src/lib/core/src/transform/position.rs b/src/lib/core/src/transform/position.rs index 5a6a96e5..3b1f0fdc 100644 --- a/src/lib/core/src/transform/position.rs +++ b/src/lib/core/src/transform/position.rs @@ -3,7 +3,7 @@ use std::fmt::{Debug, Display, Formatter}; pub struct Position { pub x: f64, pub y: f64, - pub z: f64 + pub z: f64, } // Helper functions: @@ -34,7 +34,11 @@ impl From<&(f64, f64, f64)> for Position { impl Debug for Position { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(f, "Position {{ x: {:.2}, y: {:.2}, z: {:.2} }}", self.x, self.y, self.z) + write!( + f, + "Position {{ x: {:.2}, y: {:.2}, z: {:.2} }}", + self.x, self.y, self.z + ) } } @@ -42,4 +46,4 @@ impl Display for Position { fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { write!(f, "({:.2}, {:.2}, {:.2})", self.x, self.y, self.z) } -} \ No newline at end of file +} diff --git a/src/lib/derive_macros/src/events/mod.rs b/src/lib/derive_macros/src/events/mod.rs index d46d9caf..ac4e0f95 100644 --- a/src/lib/derive_macros/src/events/mod.rs +++ b/src/lib/derive_macros/src/events/mod.rs @@ -140,4 +140,4 @@ pub(crate) fn derive(input: TokenStream) -> TokenStream { }; output.into() -} \ No newline at end of file +} diff --git a/src/lib/derive_macros/src/helpers.rs b/src/lib/derive_macros/src/helpers.rs index 9c1da4fc..453a87ad 100644 --- a/src/lib/derive_macros/src/helpers.rs +++ b/src/lib/derive_macros/src/helpers.rs @@ -59,7 +59,6 @@ pub(crate) fn is_field_type_optional(field: &syn::Field) -> bool { .any(|segment| segment.ident.to_string().to_lowercase() == "option") } - pub struct StructInfo<'a> { pub struct_name: &'a syn::Ident, pub impl_generics: syn::ImplGenerics<'a>, @@ -70,7 +69,10 @@ pub struct StructInfo<'a> { pub force_created: bool, } -pub(crate) fn extract_struct_info<'a>(input: &'a DeriveInput, default_lifetime: Option<&str>) -> StructInfo<'a> { +pub(crate) fn extract_struct_info<'a>( + input: &'a DeriveInput, + default_lifetime: Option<&str>, +) -> StructInfo<'a> { let struct_name = &input.ident; let impl_generics = input.generics.clone(); let ty_generics = input.generics.split_for_impl().1; @@ -86,13 +88,10 @@ pub(crate) fn extract_struct_info<'a>(input: &'a DeriveInput, default_lifetime: ); }*/ - let lifetime = impl_generics - .params - .iter() - .find_map(|param| match param { - GenericParam::Lifetime(lifetime) => Some(lifetime.lifetime.clone()), - _ => None, - }); + let lifetime = impl_generics.params.iter().find_map(|param| match param { + GenericParam::Lifetime(lifetime) => Some(lifetime.lifetime.clone()), + _ => None, + }); let mut force_created = false; let lifetime = match lifetime { @@ -100,7 +99,8 @@ pub(crate) fn extract_struct_info<'a>(input: &'a DeriveInput, default_lifetime: None => { if let Some(default_lifetime) = default_lifetime { force_created = true; - let default_lifetime = syn::Lifetime::new(default_lifetime, proc_macro2::Span::call_site()); + let default_lifetime = + syn::Lifetime::new(default_lifetime, proc_macro2::Span::call_site()); quote! { #default_lifetime } @@ -123,10 +123,15 @@ pub(crate) fn extract_struct_info<'a>(input: &'a DeriveInput, default_lifetime: } else { quote! { <#lifetime> } }, - force_created + force_created, } } pub(crate) fn get_derive_attributes(input: &DeriveInput, path_name: &str) -> Vec { - input.attrs.iter().filter(|attr| attr.path().is_ident(path_name)).cloned().collect() -} \ No newline at end of file + input + .attrs + .iter() + .filter(|attr| attr.path().is_ident(path_name)) + .cloned() + .collect() +} diff --git a/src/lib/derive_macros/src/lib.rs b/src/lib/derive_macros/src/lib.rs index 90d23b32..06fd84f8 100644 --- a/src/lib/derive_macros/src/lib.rs +++ b/src/lib/derive_macros/src/lib.rs @@ -5,22 +5,16 @@ use proc_macro::TokenStream; mod events; mod helpers; mod nbt; -mod profiling; mod net; +mod profiling; #[proc_macro_attribute] -pub fn profile( - attr: TokenStream, - item: TokenStream, -) -> TokenStream { +pub fn profile(attr: TokenStream, item: TokenStream) -> TokenStream { profiling::profile_fn(attr, item) } #[proc_macro_attribute] -pub fn event_handler( - attr: TokenStream, - item: TokenStream, -) -> TokenStream { +pub fn event_handler(attr: TokenStream, item: TokenStream) -> TokenStream { events::event_handler_fn(attr, item) } @@ -59,4 +53,4 @@ pub fn packet(args: TokenStream, input: TokenStream) -> TokenStream { pub fn bake_packet_registry(input: TokenStream) -> TokenStream { net::packets::bake_registry(input) } -// #=================== PACKETS ===================# \ No newline at end of file +// #=================== PACKETS ===================# diff --git a/src/lib/derive_macros/src/nbt/de.rs b/src/lib/derive_macros/src/nbt/de.rs index 8940b30c..ab31e99b 100644 --- a/src/lib/derive_macros/src/nbt/de.rs +++ b/src/lib/derive_macros/src/nbt/de.rs @@ -1,8 +1,8 @@ +use crate::helpers::StructInfo; use crate::nbt::helpers::NbtFieldAttribute; use proc_macro::TokenStream; use quote::quote; use syn::{parse_macro_input, DeriveInput}; -use crate::helpers::StructInfo; pub fn derive(input: TokenStream) -> TokenStream { let input = parse_macro_input!(input as DeriveInput); @@ -43,7 +43,7 @@ pub fn derive(input: TokenStream) -> TokenStream { where_clause, lifetime, lifetime_without_ident, - force_created + force_created, } = crate::helpers::extract_struct_info(&input, Some("'de")); let fields = crate::helpers::get_fields(&input); @@ -98,13 +98,13 @@ pub fn derive(input: TokenStream) -> TokenStream { )?, } }); - + let impl_generics = if force_created { quote! { <'de> #impl_generics } } else { quote! { #impl_generics } }; - + let expanded = quote! { impl #impl_generics ::ferrumc_nbt::FromNbt #lifetime for #struct_name #ty_generics #where_clause { fn from_nbt( diff --git a/src/lib/derive_macros/src/nbt/helpers.rs b/src/lib/derive_macros/src/nbt/helpers.rs index f9a7794d..812213b6 100644 --- a/src/lib/derive_macros/src/nbt/helpers.rs +++ b/src/lib/derive_macros/src/nbt/helpers.rs @@ -1,5 +1,5 @@ use crate::helpers::is_field_type_optional; -use syn::{Variant, Field, LitStr, LitInt, Meta, DeriveInput}; +use syn::{DeriveInput, Field, LitInt, LitStr, Meta, Variant}; #[derive(Debug, Clone)] pub enum Cases { @@ -30,14 +30,14 @@ impl Cases { } } snake_case - }, + } Self::CamelCase => { let mut camel_case = String::with_capacity(str.len()); let mut next_word = false; for c in str.chars() { if c == '_' { next_word = true; - } else if next_word { + } else if next_word { camel_case.extend(c.to_uppercase()); next_word = false; } else { @@ -45,7 +45,7 @@ impl Cases { } } camel_case - }, + } } } } @@ -108,23 +108,13 @@ impl NbtFieldAttribute { match name.to_string().as_str() { "tag" => { - let tag = nested_meta - .value() - .expect("Expected tag to have a value"); - let tag = tag - .parse::() - .expect("Expected tag to be a string"); - attributes.push(NbtFieldAttribute::Tag { - tag: tag.value(), - }); + let tag = nested_meta.value().expect("Expected tag to have a value"); + let tag = tag.parse::().expect("Expected tag to be a string"); + attributes.push(NbtFieldAttribute::Tag { tag: tag.value() }); } "tag_type" => { - let tag = nested_meta - .value() - .expect("Expected tag to have a value"); - let tag = tag - .parse::() - .expect("Expected tag to be a string"); + let tag = nested_meta.value().expect("Expected tag to have a value"); + let tag = tag.parse::().expect("Expected tag to be a string"); attributes.push(NbtFieldAttribute::TagType { tag: tag.base10_parse::().expect("Not a valid u8"), }); @@ -141,22 +131,22 @@ impl NbtFieldAttribute { }); } "rename_all" => { - let case = nested_meta - .value() - .expect("Expected case to have a value"); + let case = nested_meta.value().expect("Expected case to have a value"); let case: Cases = case .parse::() - .expect("Expected case to be a string").value().into(); - attributes.push(NbtFieldAttribute::RenameAll { - case - }); + .expect("Expected case to be a string") + .value() + .into(); + attributes.push(NbtFieldAttribute::RenameAll { case }); } _ => {} } Ok(()) }) - .unwrap_or_else(|_| println!("[WARN] Failed to parse nested meta parsing input attributes")); + .unwrap_or_else(|_| { + println!("[WARN] Failed to parse nested meta parsing input attributes") + }); } attributes diff --git a/src/lib/derive_macros/src/nbt/ser.rs b/src/lib/derive_macros/src/nbt/ser.rs index 72bd4419..08d6bee6 100644 --- a/src/lib/derive_macros/src/nbt/ser.rs +++ b/src/lib/derive_macros/src/nbt/ser.rs @@ -1,9 +1,9 @@ -use crate::nbt::helpers::{NbtFieldAttribute, Cases}; +use crate::nbt::helpers::{Cases, NbtFieldAttribute}; use proc_macro::TokenStream; use proc_macro2::Span; use quote::quote; use syn::spanned::Spanned; -use syn::{Data, Fields, Expr, LitStr}; +use syn::{Data, Expr, Fields, LitStr}; pub fn derive(input: TokenStream) -> TokenStream { let input = syn::parse_macro_input!(input as syn::DeriveInput); @@ -27,13 +27,14 @@ pub fn derive(input: TokenStream) -> TokenStream { match attr { NbtFieldAttribute::RenameAll { case } => { variant_case = case.clone(); - }, - NbtFieldAttribute::TagType { tag } => { tag_type = *tag; }, + } + NbtFieldAttribute::TagType { tag } => { + tag_type = *tag; + } _ => {} } } - let fields = fields.iter().enumerate().map(|(i, field)| { let ident = format!("_{}", i); let ident = syn::Ident::new(&ident, field.span()); @@ -303,15 +304,15 @@ pub fn derive(input: TokenStream) -> TokenStream { writer } } - + impl #impl_generics #name #ty_generics #where_clause { pub fn serialize_as_network(&self) -> Vec { let mut writer = Vec::new(); - + <#name #ty_generics as ::ferrumc_nbt::NBTSerializable>::serialize(self, &mut writer, &::ferrumc_nbt::NBTSerializeOptions::Network); /*::serialize(&::id(), &mut writer, &::ferrumc_nbt::NBTSerializeOptions::None); <#name #ty_generics as ::ferrumc_nbt::NBTSerializable>::serialize(self, &mut writer, &::ferrumc_nbt::NBTSerializeOptions::None);*/ - + writer } } diff --git a/src/lib/derive_macros/src/net/decode.rs b/src/lib/derive_macros/src/net/decode.rs index bb3c9f71..5a964eb9 100644 --- a/src/lib/derive_macros/src/net/decode.rs +++ b/src/lib/derive_macros/src/net/decode.rs @@ -20,7 +20,8 @@ pub(crate) fn derive(input: TokenStream) -> TokenStream { repr_type = Some(ident.to_string()); Ok(()) - }).unwrap(); + }) + .unwrap(); }); repr_type.map(|val| syn::parse_str::(&val).expect("Failed to parse repr type")) @@ -55,18 +56,19 @@ pub(crate) fn derive(input: TokenStream) -> TokenStream { } Ok(()) - }).unwrap(); - + }) + .unwrap(); }); (type_cast, type_cast_handler) }; - // So for enums we can simply read the type and then cast it directly. if let Some(type_cast) = type_cast { let Some(repr_attr) = repr_attr else { - panic!("NetDecode with type_cast enabled requires a repr attribute. Example: #[repr(u8)]"); + panic!( + "NetDecode with type_cast enabled requires a repr attribute. Example: #[repr(u8)]" + ); }; // in netdecode, read a type of type_cast and then if type_cast_handler exists, use it to do `type_cast_handler(type_cast)` @@ -87,28 +89,32 @@ pub(crate) fn derive(input: TokenStream) -> TokenStream { quote! { value } } Some(handler) => { - let handler = syn::parse_str::(&handler).expect("Failed to parse type_cast_handler"); + let handler = syn::parse_str::(&handler) + .expect("Failed to parse type_cast_handler"); quote! { #handler } } }; let enum_arms = if let syn::Data::Enum(data) = &input.data { let mut next_discriminant = 0; - data.variants.iter().map(|variant| { - let variant_name = &variant.ident; - let discriminant = if let Some((_, expr)) = &variant.discriminant { - // Use the explicit discriminant - quote! { #expr } - } else { - // Use the next implicit discriminant - let disc = quote! { #next_discriminant }; - next_discriminant += 1; - disc - }; - quote! { - #discriminant => Ok(#name::#variant_name), - } - }).collect::>() + data.variants + .iter() + .map(|variant| { + let variant_name = &variant.ident; + let discriminant = if let Some((_, expr)) = &variant.discriminant { + // Use the explicit discriminant + quote! { #expr } + } else { + // Use the next implicit discriminant + let disc = quote! { #next_discriminant }; + next_discriminant += 1; + disc + }; + quote! { + #discriminant => Ok(#name::#variant_name), + } + }) + .collect::>() } else { panic!("NetDecode with type_cast enabled can only be derived for enums."); }; @@ -165,4 +171,4 @@ pub(crate) fn derive(input: TokenStream) -> TokenStream { }; TokenStream::from(expanded) -} \ No newline at end of file +} diff --git a/src/lib/derive_macros/src/net/encode.rs b/src/lib/derive_macros/src/net/encode.rs index 4622dd17..afb15d4e 100644 --- a/src/lib/derive_macros/src/net/encode.rs +++ b/src/lib/derive_macros/src/net/encode.rs @@ -1,7 +1,7 @@ use crate::helpers::{get_derive_attributes, StructInfo}; use proc_macro::TokenStream; use quote::quote; -use syn::{parse_macro_input, Attribute, DeriveInput, LitInt, Fields}; +use syn::{parse_macro_input, Attribute, DeriveInput, Fields, LitInt}; // Helper function to extract packet ID from attributes fn extract_packet_id(packet_attr: Vec) -> Option { @@ -18,13 +18,16 @@ fn extract_packet_id(packet_attr: Vec) -> Option { packet_id = Some(value.base10_parse::().expect("base10_parse failed")); } Ok(()) - }).unwrap(); + }) + .unwrap(); }); packet_id } // Generate packet ID encoding snippets -fn generate_packet_id_snippets(packet_id: Option) -> (proc_macro2::TokenStream, proc_macro2::TokenStream) { +fn generate_packet_id_snippets( + packet_id: Option, +) -> (proc_macro2::TokenStream, proc_macro2::TokenStream) { let sync_snippet = if let Some(id) = packet_id { quote! { ::encode(&#id.into(), writer, &ferrumc_net_codec::encode::NetEncodeOpts::None)?; @@ -68,7 +71,9 @@ fn generate_async_field_encoders(fields: &syn::Fields) -> proc_macro2::TokenStre } // Generate enum variant encoding using static dispatch -fn generate_enum_encoders(data: &syn::DataEnum) -> (proc_macro2::TokenStream, proc_macro2::TokenStream) { +fn generate_enum_encoders( + data: &syn::DataEnum, +) -> (proc_macro2::TokenStream, proc_macro2::TokenStream) { let variants = data.variants.iter().map(|variant| { let variant_ident = &variant.ident; @@ -142,7 +147,7 @@ fn generate_enum_encoders(data: &syn::DataEnum) -> (proc_macro2::TokenStream, pr match self { #(#async_variants)* } - } + }, ) } @@ -150,252 +155,257 @@ pub(crate) fn derive(input: TokenStream) -> TokenStream { let input = parse_macro_input!(input as DeriveInput); let packet_attr = get_derive_attributes(&input, "packet"); - let (packet_id_snippet, async_packet_id_snippet) = generate_packet_id_snippets(extract_packet_id(packet_attr)); + let (packet_id_snippet, async_packet_id_snippet) = + generate_packet_id_snippets(extract_packet_id(packet_attr)); let (sync_impl, async_impl) = match &input.data { syn::Data::Struct(data) => { let field_encoders = generate_field_encoders(&data.fields); let async_field_encoders = generate_async_field_encoders(&data.fields); - (quote! { - fn encode(&self, writer: &mut W, opts: &ferrumc_net_codec::encode::NetEncodeOpts) -> ferrumc_net_codec::encode::NetEncodeResult<()> { - match opts { - ferrumc_net_codec::encode::NetEncodeOpts::None => { - #packet_id_snippet - #field_encoders - } - ferrumc_net_codec::encode::NetEncodeOpts::WithLength => { - let actual_writer = writer; - let mut writer = Vec::new(); - let mut writer = &mut writer; - - #packet_id_snippet - #field_encoders - - let len: ferrumc_net_codec::net_types::var_int::VarInt = writer.len().into(); - ::encode(&len, actual_writer, &ferrumc_net_codec::encode::NetEncodeOpts::None)?; - actual_writer.write_all(writer)?; - } - ferrumc_net_codec::encode::NetEncodeOpts::Compressed => { - let actual_writer = writer; - let mut writer = Vec::new(); - let mut writer = &mut writer; - - let compression_threshold = ferrumc_config::statics::get_global_config().network_compression_threshold; - - #packet_id_snippet - #field_encoders - - if writer.len() >= compression_threshold as usize { - let data_length: ferrumc_net_codec::net_types::var_int::VarInt = writer.len().into(); - - let mut compressed_data = Vec::new(); - { - let mut e = flate2::write::ZlibEncoder::new(Vec::new(), flate2::Compression::default()); - e.write_all(writer)?; - compressed_data = e.finish()?; - } - - let packet_length: ferrumc_net_codec::net_types::var_int::VarInt = (data_length.len + compressed_data.len()).into(); + ( + quote! { + fn encode(&self, writer: &mut W, opts: &ferrumc_net_codec::encode::NetEncodeOpts) -> ferrumc_net_codec::encode::NetEncodeResult<()> { + match opts { + ferrumc_net_codec::encode::NetEncodeOpts::None => { + #packet_id_snippet + #field_encoders + } + ferrumc_net_codec::encode::NetEncodeOpts::WithLength => { + let actual_writer = writer; + let mut writer = Vec::new(); + let mut writer = &mut writer; - ::encode(&packet_length, actual_writer, &ferrumc_net_codec::encode::NetEncodeOpts::None)?; - ::encode(&data_length, actual_writer, &ferrumc_net_codec::encode::NetEncodeOpts::None)?; - actual_writer.write_all(&compressed_data)?; - } else { - let data_length: ferrumc_net_codec::net_types::var_int::VarInt = 0.into(); - let packet_length: ferrumc_net_codec::net_types::var_int::VarInt = (data_length.len + writer.len()).into(); + #packet_id_snippet + #field_encoders - ::encode(&packet_length, actual_writer, &ferrumc_net_codec::encode::NetEncodeOpts::None)?; - ::encode(&data_length, actual_writer, &ferrumc_net_codec::encode::NetEncodeOpts::None)?; + let len: ferrumc_net_codec::net_types::var_int::VarInt = writer.len().into(); + ::encode(&len, actual_writer, &ferrumc_net_codec::encode::NetEncodeOpts::None)?; actual_writer.write_all(writer)?; } - }, - e => unimplemented!("Unsupported option for NetEncode: {:?}", e), - } - Ok(()) - } - }, - quote! { - async fn encode_async(&self, writer: &mut W, opts: &ferrumc_net_codec::encode::NetEncodeOpts) -> ferrumc_net_codec::encode::NetEncodeResult<()> { - match opts { - ferrumc_net_codec::encode::NetEncodeOpts::None => { - #async_packet_id_snippet - #async_field_encoders - } - ferrumc_net_codec::encode::NetEncodeOpts::WithLength => { - let actual_writer = writer; - let mut writer = Vec::new(); - let mut writer = &mut writer; - - #async_packet_id_snippet - #field_encoders - - let len: ferrumc_net_codec::net_types::var_int::VarInt = writer.len().into(); - ::encode_async(&len, actual_writer, &ferrumc_net_codec::encode::NetEncodeOpts::None).await?; - ::write_all(actual_writer, writer).await?; - } - ferrumc_net_codec::encode::NetEncodeOpts::Compressed => { - let actual_writer = writer; - let mut writer = Vec::new(); - let mut writer = &mut writer; - - let compression_threshold = ferrumc_config::statics::get_global_config().network_compression_threshold; - - #async_packet_id_snippet - #async_field_encoders - - if writer.len() >= compression_threshold as usize { - let data_length: ferrumc_net_codec::net_types::var_int::VarInt = writer.len().into(); - - let mut compressed_data = Vec::new(); - { - let mut e = flate2::write::ZlibEncoder::new(Vec::new(), flate2::Compression::default()); - e.write_all(writer)?; - compressed_data = e.finish()?; + ferrumc_net_codec::encode::NetEncodeOpts::Compressed => { + let actual_writer = writer; + let mut writer = Vec::new(); + let mut writer = &mut writer; + + let compression_threshold = ferrumc_config::statics::get_global_config().network_compression_threshold; + + #packet_id_snippet + #field_encoders + + if writer.len() >= compression_threshold as usize { + let data_length: ferrumc_net_codec::net_types::var_int::VarInt = writer.len().into(); + + let mut compressed_data = Vec::new(); + { + let mut e = flate2::write::ZlibEncoder::new(Vec::new(), flate2::Compression::default()); + e.write_all(writer)?; + compressed_data = e.finish()?; + } + + let packet_length: ferrumc_net_codec::net_types::var_int::VarInt = (data_length.len + compressed_data.len()).into(); + + ::encode(&packet_length, actual_writer, &ferrumc_net_codec::encode::NetEncodeOpts::None)?; + ::encode(&data_length, actual_writer, &ferrumc_net_codec::encode::NetEncodeOpts::None)?; + actual_writer.write_all(&compressed_data)?; + } else { + let data_length: ferrumc_net_codec::net_types::var_int::VarInt = 0.into(); + let packet_length: ferrumc_net_codec::net_types::var_int::VarInt = (data_length.len + writer.len()).into(); + + ::encode(&packet_length, actual_writer, &ferrumc_net_codec::encode::NetEncodeOpts::None)?; + ::encode(&data_length, actual_writer, &ferrumc_net_codec::encode::NetEncodeOpts::None)?; + actual_writer.write_all(writer)?; } + }, + e => unimplemented!("Unsupported option for NetEncode: {:?}", e), + } + Ok(()) + } + }, + quote! { + async fn encode_async(&self, writer: &mut W, opts: &ferrumc_net_codec::encode::NetEncodeOpts) -> ferrumc_net_codec::encode::NetEncodeResult<()> { + match opts { + ferrumc_net_codec::encode::NetEncodeOpts::None => { + #async_packet_id_snippet + #async_field_encoders + } + ferrumc_net_codec::encode::NetEncodeOpts::WithLength => { + let actual_writer = writer; + let mut writer = Vec::new(); + let mut writer = &mut writer; - let packet_length: ferrumc_net_codec::net_types::var_int::VarInt = (data_length.len + compressed_data.len()).into(); - - ::encode_async(&packet_length, actual_writer, &ferrumc_net_codec::encode::NetEncodeOpts::None).await?; - ::encode_async(&data_length, actual_writer, &ferrumc_net_codec::encode::NetEncodeOpts::None).await?; - // actual_writer.write_all(&compressed_data).await?; - ::write_all(actual_writer, &compressed_data).await?; - } else { - let data_length: ferrumc_net_codec::net_types::var_int::VarInt = 0.into(); - let packet_length: ferrumc_net_codec::net_types::var_int::VarInt = (data_length.len + writer.len()).into(); + #async_packet_id_snippet + #field_encoders - ::encode_async(&packet_length, actual_writer, &ferrumc_net_codec::encode::NetEncodeOpts::None).await?; - ::encode_async(&data_length, actual_writer, &ferrumc_net_codec::encode::NetEncodeOpts::None).await?; - // actual_writer.write_all(writer).await?; + let len: ferrumc_net_codec::net_types::var_int::VarInt = writer.len().into(); + ::encode_async(&len, actual_writer, &ferrumc_net_codec::encode::NetEncodeOpts::None).await?; ::write_all(actual_writer, writer).await?; } - }, - _ => unimplemented!("Unsupported options for NetEncode"), + ferrumc_net_codec::encode::NetEncodeOpts::Compressed => { + let actual_writer = writer; + let mut writer = Vec::new(); + let mut writer = &mut writer; + + let compression_threshold = ferrumc_config::statics::get_global_config().network_compression_threshold; + + #async_packet_id_snippet + #async_field_encoders + + if writer.len() >= compression_threshold as usize { + let data_length: ferrumc_net_codec::net_types::var_int::VarInt = writer.len().into(); + + let mut compressed_data = Vec::new(); + { + let mut e = flate2::write::ZlibEncoder::new(Vec::new(), flate2::Compression::default()); + e.write_all(writer)?; + compressed_data = e.finish()?; + } + + let packet_length: ferrumc_net_codec::net_types::var_int::VarInt = (data_length.len + compressed_data.len()).into(); + + ::encode_async(&packet_length, actual_writer, &ferrumc_net_codec::encode::NetEncodeOpts::None).await?; + ::encode_async(&data_length, actual_writer, &ferrumc_net_codec::encode::NetEncodeOpts::None).await?; + // actual_writer.write_all(&compressed_data).await?; + ::write_all(actual_writer, &compressed_data).await?; + } else { + let data_length: ferrumc_net_codec::net_types::var_int::VarInt = 0.into(); + let packet_length: ferrumc_net_codec::net_types::var_int::VarInt = (data_length.len + writer.len()).into(); + + ::encode_async(&packet_length, actual_writer, &ferrumc_net_codec::encode::NetEncodeOpts::None).await?; + ::encode_async(&data_length, actual_writer, &ferrumc_net_codec::encode::NetEncodeOpts::None).await?; + // actual_writer.write_all(writer).await?; + ::write_all(actual_writer, writer).await?; + } + }, + _ => unimplemented!("Unsupported options for NetEncode"), + } + Ok(()) } - Ok(()) - } - }) - }, + }, + ) + } syn::Data::Enum(data) => { let (sync_enum_encoder, async_enum_encoder) = generate_enum_encoders(data); - (quote! { - fn encode(&self, writer: &mut W, opts: &ferrumc_net_codec::encode::NetEncodeOpts) -> ferrumc_net_codec::encode::NetEncodeResult<()> { - match opts { - ferrumc_net_codec::encode::NetEncodeOpts::None => { - #packet_id_snippet - #sync_enum_encoder - } - ferrumc_net_codec::encode::NetEncodeOpts::WithLength => { - let actual_writer = writer; - let mut writer = Vec::new(); - let mut writer = &mut writer; - - #packet_id_snippet - #sync_enum_encoder - - let len: ferrumc_net_codec::net_types::var_int::VarInt = writer.len().into(); - ::encode(&len, actual_writer, &ferrumc_net_codec::encode::NetEncodeOpts::None)?; - actual_writer.write_all(writer)?; - } - ferrumc_net_codec::encode::NetEncodeOpts::Compressed => { - let actual_writer = writer; - let mut writer = Vec::new(); - let mut writer = &mut writer; - - let compression_threshold = ferrumc_config::statics::get_global_config().network_compression_threshold; - - #packet_id_snippet - #sync_enum_encoder - - if writer.len() >= compression_threshold as usize { - let data_length: ferrumc_net_codec::net_types::var_int::VarInt = writer.len().into(); - - let mut compressed_data = Vec::new(); - { - let mut e = flate2::write::ZlibEncoder::new(Vec::new(), flate2::Compression::default()); - > as std::io::Write>::write_all(&mut e, writer)?; - compressed_data = e.finish()?; - } - - let packet_length: ferrumc_net_codec::net_types::var_int::VarInt = (data_length.len + compressed_data.len()).into(); + ( + quote! { + fn encode(&self, writer: &mut W, opts: &ferrumc_net_codec::encode::NetEncodeOpts) -> ferrumc_net_codec::encode::NetEncodeResult<()> { + match opts { + ferrumc_net_codec::encode::NetEncodeOpts::None => { + #packet_id_snippet + #sync_enum_encoder + } + ferrumc_net_codec::encode::NetEncodeOpts::WithLength => { + let actual_writer = writer; + let mut writer = Vec::new(); + let mut writer = &mut writer; - ::encode(&packet_length, actual_writer, &ferrumc_net_codec::encode::NetEncodeOpts::None)?; - ::encode(&data_length, actual_writer, &ferrumc_net_codec::encode::NetEncodeOpts::None)?; - actual_writer.write_all(&compressed_data)?; - } else { - let data_length: ferrumc_net_codec::net_types::var_int::VarInt = 0.into(); - let packet_length: ferrumc_net_codec::net_types::var_int::VarInt = (data_length.len + writer.len()).into(); + #packet_id_snippet + #sync_enum_encoder - ::encode(&packet_length, actual_writer, &ferrumc_net_codec::encode::NetEncodeOpts::None)?; - ::encode(&data_length, actual_writer, &ferrumc_net_codec::encode::NetEncodeOpts::None)?; + let len: ferrumc_net_codec::net_types::var_int::VarInt = writer.len().into(); + ::encode(&len, actual_writer, &ferrumc_net_codec::encode::NetEncodeOpts::None)?; actual_writer.write_all(writer)?; } - }, - e => unimplemented!("Unsupported option for NetEncode: {:?}", e), - } - Ok(()) - } - }, - quote! { - async fn encode_async(&self, writer: &mut W, opts: &ferrumc_net_codec::encode::NetEncodeOpts) -> ferrumc_net_codec::encode::NetEncodeResult<()> { - match opts { - ferrumc_net_codec::encode::NetEncodeOpts::None => { - #async_packet_id_snippet - #async_enum_encoder - } - ferrumc_net_codec::encode::NetEncodeOpts::WithLength => { - let actual_writer = writer; - let mut writer = Vec::new(); - let mut writer = &mut writer; - - #async_packet_id_snippet - #sync_enum_encoder - - let len: ferrumc_net_codec::net_types::var_int::VarInt = writer.len().into(); - ::encode_async(&len, actual_writer, &ferrumc_net_codec::encode::NetEncodeOpts::None).await?; - ::write_all(actual_writer, writer).await?; - } - ferrumc_net_codec::encode::NetEncodeOpts::Compressed => { - let actual_writer = writer; - let mut writer = Vec::new(); - let mut writer = &mut writer; - - let compression_threshold = ferrumc_config::statics::get_global_config().network_compression_threshold; - - #async_packet_id_snippet - #async_enum_encoder - - if writer.len() >= compression_threshold as usize { - let data_length: ferrumc_net_codec::net_types::var_int::VarInt = writer.len().into(); - - let mut compressed_data = Vec::new(); - { - let mut e = flate2::write::ZlibEncoder::new(Vec::new(), flate2::Compression::default()); - > as std::io::Write>::write_all(&mut e, writer)?; - compressed_data = e.finish()?; + ferrumc_net_codec::encode::NetEncodeOpts::Compressed => { + let actual_writer = writer; + let mut writer = Vec::new(); + let mut writer = &mut writer; + + let compression_threshold = ferrumc_config::statics::get_global_config().network_compression_threshold; + + #packet_id_snippet + #sync_enum_encoder + + if writer.len() >= compression_threshold as usize { + let data_length: ferrumc_net_codec::net_types::var_int::VarInt = writer.len().into(); + + let mut compressed_data = Vec::new(); + { + let mut e = flate2::write::ZlibEncoder::new(Vec::new(), flate2::Compression::default()); + > as std::io::Write>::write_all(&mut e, writer)?; + compressed_data = e.finish()?; + } + + let packet_length: ferrumc_net_codec::net_types::var_int::VarInt = (data_length.len + compressed_data.len()).into(); + + ::encode(&packet_length, actual_writer, &ferrumc_net_codec::encode::NetEncodeOpts::None)?; + ::encode(&data_length, actual_writer, &ferrumc_net_codec::encode::NetEncodeOpts::None)?; + actual_writer.write_all(&compressed_data)?; + } else { + let data_length: ferrumc_net_codec::net_types::var_int::VarInt = 0.into(); + let packet_length: ferrumc_net_codec::net_types::var_int::VarInt = (data_length.len + writer.len()).into(); + + ::encode(&packet_length, actual_writer, &ferrumc_net_codec::encode::NetEncodeOpts::None)?; + ::encode(&data_length, actual_writer, &ferrumc_net_codec::encode::NetEncodeOpts::None)?; + actual_writer.write_all(writer)?; } + }, + e => unimplemented!("Unsupported option for NetEncode: {:?}", e), + } + Ok(()) + } + }, + quote! { + async fn encode_async(&self, writer: &mut W, opts: &ferrumc_net_codec::encode::NetEncodeOpts) -> ferrumc_net_codec::encode::NetEncodeResult<()> { + match opts { + ferrumc_net_codec::encode::NetEncodeOpts::None => { + #async_packet_id_snippet + #async_enum_encoder + } + ferrumc_net_codec::encode::NetEncodeOpts::WithLength => { + let actual_writer = writer; + let mut writer = Vec::new(); + let mut writer = &mut writer; - let packet_length: ferrumc_net_codec::net_types::var_int::VarInt = (data_length.len + compressed_data.len()).into(); - - ::encode_async(&packet_length, actual_writer, &ferrumc_net_codec::encode::NetEncodeOpts::None).await?; - ::encode_async(&data_length, actual_writer, &ferrumc_net_codec::encode::NetEncodeOpts::None).await?; - ::write_all(actual_writer, &compressed_data).await?; - } else { - let data_length: ferrumc_net_codec::net_types::var_int::VarInt = 0.into(); - let packet_length: ferrumc_net_codec::net_types::var_int::VarInt = (data_length.len + writer.len()).into(); + #async_packet_id_snippet + #sync_enum_encoder - ::encode_async(&packet_length, actual_writer, &ferrumc_net_codec::encode::NetEncodeOpts::None).await?; - ::encode_async(&data_length, actual_writer, &ferrumc_net_codec::encode::NetEncodeOpts::None).await?; + let len: ferrumc_net_codec::net_types::var_int::VarInt = writer.len().into(); + ::encode_async(&len, actual_writer, &ferrumc_net_codec::encode::NetEncodeOpts::None).await?; ::write_all(actual_writer, writer).await?; } - }, - _ => unimplemented!("Unsupported options for NetEncode"), + ferrumc_net_codec::encode::NetEncodeOpts::Compressed => { + let actual_writer = writer; + let mut writer = Vec::new(); + let mut writer = &mut writer; + + let compression_threshold = ferrumc_config::statics::get_global_config().network_compression_threshold; + + #async_packet_id_snippet + #async_enum_encoder + + if writer.len() >= compression_threshold as usize { + let data_length: ferrumc_net_codec::net_types::var_int::VarInt = writer.len().into(); + + let mut compressed_data = Vec::new(); + { + let mut e = flate2::write::ZlibEncoder::new(Vec::new(), flate2::Compression::default()); + > as std::io::Write>::write_all(&mut e, writer)?; + compressed_data = e.finish()?; + } + + let packet_length: ferrumc_net_codec::net_types::var_int::VarInt = (data_length.len + compressed_data.len()).into(); + + ::encode_async(&packet_length, actual_writer, &ferrumc_net_codec::encode::NetEncodeOpts::None).await?; + ::encode_async(&data_length, actual_writer, &ferrumc_net_codec::encode::NetEncodeOpts::None).await?; + ::write_all(actual_writer, &compressed_data).await?; + } else { + let data_length: ferrumc_net_codec::net_types::var_int::VarInt = 0.into(); + let packet_length: ferrumc_net_codec::net_types::var_int::VarInt = (data_length.len + writer.len()).into(); + + ::encode_async(&packet_length, actual_writer, &ferrumc_net_codec::encode::NetEncodeOpts::None).await?; + ::encode_async(&data_length, actual_writer, &ferrumc_net_codec::encode::NetEncodeOpts::None).await?; + ::write_all(actual_writer, writer).await?; + } + }, + _ => unimplemented!("Unsupported options for NetEncode"), + } + Ok(()) } - Ok(()) - } - }) - }, + }, + ) + } _ => unimplemented!("NetEncode can only be derived for structs and enums"), }; @@ -414,4 +424,4 @@ pub(crate) fn derive(input: TokenStream) -> TokenStream { #async_impl } }) -} \ No newline at end of file +} diff --git a/src/lib/derive_macros/src/net/mod.rs b/src/lib/derive_macros/src/net/mod.rs index 1a89eb30..fd9556b6 100644 --- a/src/lib/derive_macros/src/net/mod.rs +++ b/src/lib/derive_macros/src/net/mod.rs @@ -1,3 +1,3 @@ +pub(super) mod decode; pub(super) mod encode; -pub(super) mod decode; -pub(super) mod packets; \ No newline at end of file +pub(super) mod packets; diff --git a/src/lib/derive_macros/src/net/packets/mod.rs b/src/lib/derive_macros/src/net/packets/mod.rs index f171511d..2764c0fc 100644 --- a/src/lib/derive_macros/src/net/packets/mod.rs +++ b/src/lib/derive_macros/src/net/packets/mod.rs @@ -1,9 +1,9 @@ +use colored::Colorize; use proc_macro::TokenStream; use quote::quote; use std::env; use std::ops::Add; use syn::{parse_macro_input, LitInt, LitStr}; -use colored::Colorize; /// Essentially, this just reads all the files in the directory and generates a match arm for each packet. /// (packet_id, state) => { ... } @@ -13,7 +13,7 @@ pub fn bake_registry(input: TokenStream) -> TokenStream { let manifest_dir = env::var("CARGO_MANIFEST_DIR").expect("CARGO_MANIFEST_DIR not set"); let module_path = parse_macro_input!(input as syn::LitStr).value(); - + let mut path = manifest_dir.add(module_path.as_str()); path = path.replace("\\", "/"); @@ -25,7 +25,9 @@ pub fn bake_registry(input: TokenStream) -> TokenStream { println!( " {} {}", "[FERRUMC_MACROS]".blue().bold(), - format!("Parsing packets in {}", dir_path.display()).white().bold() + format!("Parsing packets in {}", dir_path.display()) + .white() + .bold() ); if !std::fs::metadata(dir_path).unwrap().is_dir() { @@ -90,7 +92,8 @@ pub fn bake_registry(input: TokenStream) -> TokenStream { } Ok(()) - }).unwrap(); + }) + .unwrap(); let packet_id = packet_id.expect("packet_id not found"); @@ -114,7 +117,8 @@ pub fn bake_registry(input: TokenStream) -> TokenStream { ); let struct_path = format!("{}::{}", path, struct_name); - let struct_path = syn::parse_str::(&struct_path).expect("parse_str failed"); + let struct_path = + syn::parse_str::(&struct_path).expect("parse_str failed"); match_arms.push(quote! { (#packet_id, #state) => { @@ -133,31 +137,37 @@ pub fn bake_registry(input: TokenStream) -> TokenStream { println!( " {} {}", "[FERRUMC_MACROS]".bold().blue(), - format!("Found {} packets", match_arms.len()).purple().bold() + format!("Found {} packets", match_arms.len()) + .purple() + .bold() ); println!( " {} {}", "[FERRUMC_MACROS]".bold().blue(), - format!("It took: {:?} to parse all the files and generate the packet registry", elapsed).red().bold() + format!( + "It took: {:?} to parse all the files and generate the packet registry", + elapsed + ) + .red() + .bold() ); let match_arms = match_arms.into_iter(); - + let output = quote! { pub async fn handle_packet(packet_id: u8, conn_id: usize, conn_state: &crate::connection::ConnectionState, cursor: &mut R, state: std::sync::Arc) -> crate::NetResult<()> { match (packet_id, conn_state.as_str()) { #(#match_arms)* _ => tracing::debug!("No packet found for ID: 0x{:02X} in state: {}", packet_id, conn_state.as_str()), } - + Ok(()) } }; - + TokenStream::from(output) } - /// `#[packet]` attribute is used to declare an incoming/outgoing packet. /// /// packet_id => The packet id of the packet. In hexadecimal. @@ -174,7 +184,7 @@ pub fn bake_registry(input: TokenStream) -> TokenStream { /// pub timestamp: i64, /// } /// ``` -/// +/// /// ```ignore /// use ferrumc_macros::{packet, NetEncode}; /// @@ -206,4 +216,4 @@ pub fn attribute(args: TokenStream, input: TokenStream) -> TokenStream { } input -} \ No newline at end of file +} diff --git a/src/lib/ecs/src/components/mod.rs b/src/lib/ecs/src/components/mod.rs index 79c83a7b..6853fd8e 100644 --- a/src/lib/ecs/src/components/mod.rs +++ b/src/lib/ecs/src/components/mod.rs @@ -50,10 +50,10 @@ impl ComponentStorage { return Vec::new(); } }; - + components.value().entities() } - + pub fn remove(&self, entity: Entity) { let type_id = TypeId::of::(); self.components.get_mut(&type_id) @@ -72,7 +72,7 @@ impl ComponentStorage { components.remove(entity); } }); - + Ok(()) } } @@ -143,7 +143,7 @@ mod debug { Debug::fmt(&**self, f) } } - + impl Debug for ComponentRefMut<'_, T> { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { Debug::fmt(&**self, f) @@ -231,12 +231,13 @@ mod tests { assert!(position.is_err()); } } -*/use crate::components::storage::{Component, ComponentRef, ComponentRefMut, ComponentSparseSet}; -use dashmap::{DashMap}; -use parking_lot::RwLock; -use std::any::{TypeId}; -use crate::ECSResult; +*/ +use crate::components::storage::{Component, ComponentRef, ComponentRefMut, ComponentSparseSet}; use crate::errors::ECSError; +use crate::ECSResult; +use dashmap::DashMap; +use parking_lot::RwLock; +use std::any::TypeId; pub mod storage; @@ -255,7 +256,7 @@ impl ComponentStorage for ComponentSparseSet { fn as_ptr(&self) -> *const () { self as *const Self as *const () } - + fn remove_component(&self, entity_id: usize) -> ECSResult<()> { self.remove(entity_id) } @@ -295,33 +296,41 @@ impl ComponentManager { } }; - Ok(()) } pub fn get<'a, T: Component>(&self, entity_id: usize) -> ECSResult> { let type_id = TypeId::of::(); - let ptr = *self.components.get(&type_id).ok_or(ECSError::ComponentTypeNotFound)?; + let ptr = *self + .components + .get(&type_id) + .ok_or(ECSError::ComponentTypeNotFound)?; let component_set = unsafe { &*(ptr as *const ComponentSparseSet) }; component_set.get(entity_id) } pub fn get_mut<'a, T: Component>(&self, entity_id: usize) -> ECSResult> { let type_id = TypeId::of::(); - let ptr = *self.components.get(&type_id).ok_or(ECSError::ComponentTypeNotFound)?; + let ptr = *self + .components + .get(&type_id) + .ok_or(ECSError::ComponentTypeNotFound)?; let component_set = unsafe { &*(ptr as *const ComponentSparseSet) }; component_set.get_mut(entity_id) } pub fn remove(&self, entity_id: usize) -> ECSResult<()> { let type_id = TypeId::of::(); - let ptr = *self.components.get(&type_id).ok_or(ECSError::ComponentTypeNotFound)?; + let ptr = *self + .components + .get(&type_id) + .ok_or(ECSError::ComponentTypeNotFound)?; let component_set = unsafe { &mut *(ptr as *mut ComponentSparseSet) }; component_set.remove(entity_id)?; Ok(()) } - pub fn remove_all_components(&self, entity_id: usize) -> ECSResult<()>{ + pub fn remove_all_components(&self, entity_id: usize) -> ECSResult<()> { for storage in self.storage.read().iter() { storage.remove_component(entity_id)?; } diff --git a/src/lib/ecs/src/entities.rs b/src/lib/ecs/src/entities.rs index b8881896..1ad1d4a9 100644 --- a/src/lib/ecs/src/entities.rs +++ b/src/lib/ecs/src/entities.rs @@ -1,10 +1,10 @@ #![allow(dead_code)] -use crate::components::{ComponentManager}; -use std::sync::atomic::{AtomicUsize, Ordering}; -use tracing::trace; use crate::components::storage::Component; +use crate::components::ComponentManager; use crate::ECSResult; +use std::sync::atomic::{AtomicUsize, Ordering}; +use tracing::trace; /// Entity is a unique identifier for an entity in the ECS. /// It is a simple usize. @@ -35,10 +35,9 @@ impl EntityManager { trace!("Created entity with id: {}", id); id as Entity } - + pub fn builder<'a>(&'a self, component_storage: &'a ComponentManager) -> EntityBuilder<'a> { - EntityBuilder:: - new(self.create_entity(), component_storage) + EntityBuilder::new(self.create_entity(), component_storage) } } @@ -49,7 +48,10 @@ pub struct EntityBuilder<'a> { impl<'a> EntityBuilder<'a> { pub fn new(entity: Entity, component_storage: &'a ComponentManager) -> Self { - EntityBuilder { entity, component_storage } + EntityBuilder { + entity, + component_storage, + } } pub fn with(self, component: T) -> ECSResult { @@ -62,7 +64,6 @@ impl<'a> EntityBuilder<'a> { } } - #[cfg(test)] mod tests { use super::*; diff --git a/src/lib/ecs/src/lib.rs b/src/lib/ecs/src/lib.rs index 403839e4..53f87759 100644 --- a/src/lib/ecs/src/lib.rs +++ b/src/lib/ecs/src/lib.rs @@ -1,5 +1,5 @@ -use crate::components::ComponentManager; use crate::components::storage::{Component, ComponentRef, ComponentRefMut}; +use crate::components::ComponentManager; use crate::entities::{Entity, EntityBuilder, EntityManager}; use crate::query::{Query, QueryItem}; @@ -13,7 +13,6 @@ pub mod query; mod tests; pub type ECSResult = Result; - /// The main struct that holds all the ECS data. /// It's called the universe because I didn't want to name it 'World'. /// Since it may be confused with the Minecraft world. @@ -48,11 +47,11 @@ impl Universe { self.components.insert(entity, component)?; Ok(self) } - + pub fn remove_component(&self, entity: Entity) -> ECSResult<()> { self.components.remove::(entity) } - + pub fn remove_all_components(&self, entity: Entity) -> ECSResult<()> { self.components.remove_all_components(entity) } @@ -67,8 +66,8 @@ impl Universe { pub fn query(&self) -> Query { Query::new(&self.components) } - + pub fn get_component_manager(&self) -> &ComponentManager { &self.components } -} \ No newline at end of file +} diff --git a/src/lib/ecs/src/query.rs b/src/lib/ecs/src/query.rs index 364c4a74..3413eb01 100644 --- a/src/lib/ecs/src/query.rs +++ b/src/lib/ecs/src/query.rs @@ -7,47 +7,32 @@ use crate::ECSResult; pub trait QueryItem { type Item<'a>; - fn fetch<'a>( - entity: Entity, - storage: &ComponentManager, - ) -> ECSResult>; + fn fetch<'a>(entity: Entity, storage: &ComponentManager) -> ECSResult>; /*fn entities( storage: &ComponentManager, ) -> Vec;*/ - fn entities( - storage: &ComponentManager - ) -> Vec; + fn entities(storage: &ComponentManager) -> Vec; } impl QueryItem for &T { type Item<'a> = ComponentRef<'a, T>; - fn fetch<'a>( - entity: Entity, - storage: &ComponentManager, - ) -> ECSResult> { + fn fetch<'a>(entity: Entity, storage: &ComponentManager) -> ECSResult> { storage.get(entity) } - fn entities( - storage: &ComponentManager, - ) -> Vec { + fn entities(storage: &ComponentManager) -> Vec { storage.get_entities_with::() } } impl QueryItem for &mut T { type Item<'a> = ComponentRefMut<'a, T>; - fn fetch<'a>( - entity: Entity, - storage: &ComponentManager, - ) -> ECSResult> { + fn fetch<'a>(entity: Entity, storage: &ComponentManager) -> ECSResult> { storage.get_mut(entity) } - fn entities( - storage: &ComponentManager, - ) -> Vec { + fn entities(storage: &ComponentManager) -> Vec { storage.get_entities_with::() } } @@ -69,7 +54,6 @@ impl Clone for Query<'_, Q> { } } - impl<'a, Q: QueryItem> Query<'a, Q> { pub fn new(component_storage: &'a ComponentManager) -> Self { Self { @@ -88,13 +72,11 @@ impl<'a, Q: QueryItem> Query<'a, Q> { } } - mod iter_impl { use super::*; use rayon::prelude::*; - impl<'a, Q: QueryItem> Iterator for Query<'a, Q> - { + impl<'a, Q: QueryItem> Iterator for Query<'a, Q> { type Item = (Entity, Q::Item<'a>); fn next(&mut self) -> Option { @@ -130,7 +112,6 @@ mod iter_impl { } } - mod multi_impl { use super::*; macro_rules! impl_query_item_tuple { @@ -166,7 +147,6 @@ mod multi_impl { impl_query_item_tuple!(A, B, C, D, E); impl_query_item_tuple!(A, B, C, D, E, F); - /// Find the common elements in a vector of vectors /// Uses smallest vector as comparator, making it somewhat efficient. fn find_common_elements(mut vecs: Vec>) -> Vec { @@ -191,4 +171,4 @@ mod multi_impl { common } -} \ No newline at end of file +} diff --git a/src/lib/ecs/src/tests/mod.rs b/src/lib/ecs/src/tests/mod.rs index 4e9314c0..8673e5a0 100644 --- a/src/lib/ecs/src/tests/mod.rs +++ b/src/lib/ecs/src/tests/mod.rs @@ -1,10 +1,9 @@ -use std::thread; -use std::time::{Duration}; -use crate::components::{ComponentManager}; +use crate::components::ComponentManager; use crate::entities::EntityManager; use crate::query::Query; use rayon::prelude::*; - +use std::thread; +use std::time::Duration; #[derive(Debug)] #[expect(dead_code)] @@ -31,22 +30,26 @@ fn test_basic() { for x in 0..10 { entity_manager .builder(&component_storage) - .with(Position { x, y: x * 2 }).unwrap() - .with(Player { username: format!("Player{}", x) }).unwrap() + .with(Position { x, y: x * 2 }) + .unwrap() + .with(Player { + username: format!("Player{}", x), + }) + .unwrap() .build(); } let query = Query::<(&Player, &mut Position)>::new(&component_storage); - + ParallelIterator::for_each(query.into_par_iter(), |(_eid, (_player, position))| { let sleep_duration = Duration::from_millis(100 * (position.x as u64)); thread::sleep(sleep_duration); }); - - /* let duration = start.elapsed(); - - // Should be true, since we're running all branches in parallel, therefore, + + /* let duration = start.elapsed(); + + // Should be true, since we're running all branches in parallel, therefore, // at-most it should take the time of the longest branch, // which is 100 * 9, which is 900ms. So with some buffer, it should be less than 1000ms. - + assert!(duration.as_millis() < 1000);*/ -} \ No newline at end of file +} diff --git a/src/lib/events/src/infrastructure.rs b/src/lib/events/src/infrastructure.rs index 59a68480..25f612ce 100644 --- a/src/lib/events/src/infrastructure.rs +++ b/src/lib/events/src/infrastructure.rs @@ -10,7 +10,7 @@ type AsyncEventListener = fn( ::Data, ::State, ) -> Pin< - Box::Data, ::Error>> + Send>, + Box::Data, ::Error>> + Send>, >; /// This is the global map of event listeners. @@ -63,7 +63,6 @@ pub trait Event: Sized + Send + Sync + 'static { /// Event specific error type Error: std::fmt::Debug + Send; - /// Stringified name of the event fn name() -> &'static str; @@ -78,15 +77,12 @@ pub trait Event: Sized + Send + Sync + 'static { .get(Self::name()) .expect("Failed to find event listeners. Impossible;"); - // Convert listeners iterator into Stream stream::iter(listeners.iter()) // TODO: Remove this since it's not possible to have a wrong type in the map of the event??? // Maybe some speedup? // Filter only listeners we can downcast into the correct type - .filter_map( - |dyn_list| async { dyn_list.downcast_ref::>() }, - ) + .filter_map(|dyn_list| async { dyn_list.downcast_ref::>() }) // Trigger listeners in a row .fold(Ok(event), |intercepted, listener| { let state = state.clone(); @@ -104,61 +100,61 @@ pub trait Event: Sized + Send + Sync + 'static { } /*/// Trigger the execution of an event with concurrency support - /// - /// If the event structure supports cloning. This method can be used to execute - /// listeners of the same priority concurrently (using tokio::task). This imply a - /// cloning cost at each listener execution. See `Event::trigger` for a more - /// efficient but more linear approach. - /// - /// # Mutability policy - /// - /// The listeners having the same priority being runned concurrently, there are no - /// guarantees in the order of mutation of the event data. - /// - /// It is recommended to ensure listeners of the same priority exclusively update fields - /// in the event data that are untouched by other listeners of the same group. - async fn trigger_concurrently(event: Self::Data) -> Result<(), Self::Error> - where - Self::Data: Clone, - { - let read_guard = &EVENTS_LISTENERS; - let listeners = read_guard.get(Self::name()).unwrap(); - - // Convert listeners iterator into Stream - let mut stream = stream::iter(listeners.iter()); - - let mut priority_join_set = Vec::new(); - let mut current_priority = 0; - - while let Some(Some(listener)) = stream - .next() - .await - .map(|l| l.downcast_ref::>()) + /// + /// If the event structure supports cloning. This method can be used to execute + /// listeners of the same priority concurrently (using tokio::task). This imply a + /// cloning cost at each listener execution. See `Event::trigger` for a more + /// efficient but more linear approach. + /// + /// # Mutability policy + /// + /// The listeners having the same priority being runned concurrently, there are no + /// guarantees in the order of mutation of the event data. + /// + /// It is recommended to ensure listeners of the same priority exclusively update fields + /// in the event data that are untouched by other listeners of the same group. + async fn trigger_concurrently(event: Self::Data) -> Result<(), Self::Error> + where + Self::Data: Clone, { - if listener.priority == current_priority { - priority_join_set.push(tokio::spawn((listener.listener)(event.clone()))); - } else { - // Await over all listeners launched - let joined = future::join_all(priority_join_set.iter_mut()).await; - - // If one listener fail we return the first error - if let Some(err) = joined - .into_iter() - .filter_map(|res| res.expect("No task should ever panic. Impossible;").err()) - .next() - { - return Err(err); - } + let read_guard = &EVENTS_LISTENERS; + let listeners = read_guard.get(Self::name()).unwrap(); + + // Convert listeners iterator into Stream + let mut stream = stream::iter(listeners.iter()); + + let mut priority_join_set = Vec::new(); + let mut current_priority = 0; + + while let Some(Some(listener)) = stream + .next() + .await + .map(|l| l.downcast_ref::>()) + { + if listener.priority == current_priority { + priority_join_set.push(tokio::spawn((listener.listener)(event.clone()))); + } else { + // Await over all listeners launched + let joined = future::join_all(priority_join_set.iter_mut()).await; + + // If one listener fail we return the first error + if let Some(err) = joined + .into_iter() + .filter_map(|res| res.expect("No task should ever panic. Impossible;").err()) + .next() + { + return Err(err); + } - // Update priority to the new listener(s) - current_priority = listener.priority; - priority_join_set.push(tokio::spawn((listener.listener)(event.clone()))); + // Update priority to the new listener(s) + current_priority = listener.priority; + priority_join_set.push(tokio::spawn((listener.listener)(event.clone()))); + } } - } - Ok(()) - } -*/ + Ok(()) + } + */ /// Register a new event listener for this event fn register(listener: AsyncEventListener, priority: u8) { // Create the event listener structure diff --git a/src/lib/events/src/tests/mod.rs b/src/lib/events/src/tests/mod.rs index a2e199fb..e1ace811 100644 --- a/src/lib/events/src/tests/mod.rs +++ b/src/lib/events/src/tests/mod.rs @@ -68,7 +68,10 @@ async fn some_event_listener(mut event: SomeEvent) -> Result Result { diff --git a/src/lib/net/crates/codec/src/decode/errors.rs b/src/lib/net/crates/codec/src/decode/errors.rs index f7371249..472aee9d 100644 --- a/src/lib/net/crates/codec/src/decode/errors.rs +++ b/src/lib/net/crates/codec/src/decode/errors.rs @@ -8,7 +8,7 @@ pub enum NetDecodeError { #[error("Invalid UTF-8: {0}")] Utf8Error(#[from] std::string::FromUtf8Error), - #[error("Compressed packet smaller than threshold. 'data_length' = {0}, but threshold is {}", get_global_config().network_compression_threshold)] + #[error("Compressed packet smaller than threshold. 'data_length' = {0}, but threshold is {threshold}", threshold = get_global_config().network_compression_threshold)] CompressedPacketTooSmall(usize), #[error("External error: {0}")] diff --git a/src/lib/net/crates/codec/src/decode/primitives.rs b/src/lib/net/crates/codec/src/decode/primitives.rs index 3a8ba064..3e00e794 100644 --- a/src/lib/net/crates/codec/src/decode/primitives.rs +++ b/src/lib/net/crates/codec/src/decode/primitives.rs @@ -59,8 +59,7 @@ where T: NetDecode, { fn decode(reader: &mut R, opts: &NetDecodeOpts) -> NetDecodeResult { - if matches!(opts, NetDecodeOpts::IsSizePrefixed) - { + if matches!(opts, NetDecodeOpts::IsSizePrefixed) { let len = ::decode(reader, opts)?.val as usize; let mut vec = Vec::with_capacity(len); for _ in 0..len { @@ -84,7 +83,6 @@ where } } - /// This isn't actually a type in the Minecraft Protocol. This is just for saving data/ or for general use. /// It was created for saving/reading chunks! impl NetDecode for HashMap @@ -102,4 +100,4 @@ where } Ok(map) } -} \ No newline at end of file +} diff --git a/src/lib/net/crates/codec/src/encode/errors.rs b/src/lib/net/crates/codec/src/encode/errors.rs index 5da37cde..402fe9be 100644 --- a/src/lib/net/crates/codec/src/encode/errors.rs +++ b/src/lib/net/crates/codec/src/encode/errors.rs @@ -1,8 +1,7 @@ - #[derive(Debug, thiserror::Error)] pub enum NetEncodeError { #[error("IO error: {0}")] Io(#[from] std::io::Error), #[error("External error: {0}")] ExternalError(#[from] Box), -} \ No newline at end of file +} diff --git a/src/lib/net/crates/codec/src/encode/primitives.rs b/src/lib/net/crates/codec/src/encode/primitives.rs index 55f1f016..b90ffc79 100644 --- a/src/lib/net/crates/codec/src/encode/primitives.rs +++ b/src/lib/net/crates/codec/src/encode/primitives.rs @@ -44,13 +44,16 @@ impl_for_primitives!( f64 ); - impl NetEncode for bool { fn encode(&self, writer: &mut W, _: &NetEncodeOpts) -> NetEncodeResult<()> { (*self as u8).encode(writer, &NetEncodeOpts::None) } - async fn encode_async(&self, writer: &mut W, opts: &NetEncodeOpts) -> NetEncodeResult<()> { + async fn encode_async( + &self, + writer: &mut W, + opts: &NetEncodeOpts, + ) -> NetEncodeResult<()> { (*self as u8).encode_async(writer, opts).await } } @@ -60,8 +63,14 @@ impl NetEncode for String { self.as_str().encode(writer, &NetEncodeOpts::None) } - async fn encode_async(&self, writer: &mut W, _: &NetEncodeOpts) -> NetEncodeResult<()> { - self.as_str().encode_async(writer, &NetEncodeOpts::None).await + async fn encode_async( + &self, + writer: &mut W, + _: &NetEncodeOpts, + ) -> NetEncodeResult<()> { + self.as_str() + .encode_async(writer, &NetEncodeOpts::None) + .await } } @@ -73,7 +82,11 @@ impl NetEncode for &str { Ok(()) } - async fn encode_async(&self, writer: &mut W, _: &NetEncodeOpts) -> NetEncodeResult<()> { + async fn encode_async( + &self, + writer: &mut W, + _: &NetEncodeOpts, + ) -> NetEncodeResult<()> { let len: VarInt = VarInt::new(self.len() as i32); len.encode_async(writer, &NetEncodeOpts::None).await?; writer.write_all(self.as_bytes()).await?; @@ -86,8 +99,7 @@ where T: NetEncode, { fn encode(&self, writer: &mut W, opts: &NetEncodeOpts) -> NetEncodeResult<()> { - if matches!(opts, NetEncodeOpts::SizePrefixed) - { + if matches!(opts, NetEncodeOpts::SizePrefixed) { let len: VarInt = VarInt::new(self.len() as i32); len.encode(writer, opts)?; } @@ -98,9 +110,12 @@ where Ok(()) } - async fn encode_async(&self, writer: &mut W, opts: &NetEncodeOpts) -> NetEncodeResult<()> { - if matches!(opts, NetEncodeOpts::SizePrefixed) - { + async fn encode_async( + &self, + writer: &mut W, + opts: &NetEncodeOpts, + ) -> NetEncodeResult<()> { + if matches!(opts, NetEncodeOpts::SizePrefixed) { let len: VarInt = VarInt::new(self.len() as i32); len.encode_async(writer, opts).await?; } @@ -114,8 +129,7 @@ where impl NetEncode for &[u8] { fn encode(&self, writer: &mut W, opts: &NetEncodeOpts) -> NetEncodeResult<()> { - if matches!(opts, NetEncodeOpts::SizePrefixed) - { + if matches!(opts, NetEncodeOpts::SizePrefixed) { let len: VarInt = VarInt::new(self.len() as i32); len.encode(writer, opts)?; } @@ -124,9 +138,12 @@ impl NetEncode for &[u8] { Ok(()) } - async fn encode_async(&self, writer: &mut W, opts: &NetEncodeOpts) -> NetEncodeResult<()> { - if matches!(opts, NetEncodeOpts::SizePrefixed) - { + async fn encode_async( + &self, + writer: &mut W, + opts: &NetEncodeOpts, + ) -> NetEncodeResult<()> { + if matches!(opts, NetEncodeOpts::SizePrefixed) { let len: VarInt = VarInt::new(self.len() as i32); len.encode_async(writer, opts).await?; } @@ -138,8 +155,7 @@ impl NetEncode for &[u8] { impl NetEncode for &[&str] { fn encode(&self, writer: &mut W, opts: &NetEncodeOpts) -> NetEncodeResult<()> { - if matches!(opts, NetEncodeOpts::SizePrefixed) - { + if matches!(opts, NetEncodeOpts::SizePrefixed) { let len: VarInt = VarInt::new(self.len() as i32); len.encode(writer, opts)?; } @@ -150,9 +166,12 @@ impl NetEncode for &[&str] { Ok(()) } - async fn encode_async(&self, writer: &mut W, opts: &NetEncodeOpts) -> NetEncodeResult<()> { - if matches!(opts, NetEncodeOpts::SizePrefixed) - { + async fn encode_async( + &self, + writer: &mut W, + opts: &NetEncodeOpts, + ) -> NetEncodeResult<()> { + if matches!(opts, NetEncodeOpts::SizePrefixed) { let len: VarInt = VarInt::new(self.len() as i32); len.encode_async(writer, opts).await?; } @@ -172,7 +191,11 @@ impl NetEncode for Option { } } - async fn encode_async(&self, writer: &mut W, opts: &NetEncodeOpts) -> NetEncodeResult<()> { + async fn encode_async( + &self, + writer: &mut W, + opts: &NetEncodeOpts, + ) -> NetEncodeResult<()> { match self { Some(value) => value.encode_async(writer, opts).await, None => Ok(()), @@ -180,7 +203,6 @@ impl NetEncode for Option { } } - impl NetEncode for HashMap where K: NetEncode, @@ -197,7 +219,11 @@ where Ok(()) } - async fn encode_async(&self, writer: &mut W, opts: &NetEncodeOpts) -> NetEncodeResult<()> { + async fn encode_async( + &self, + writer: &mut W, + opts: &NetEncodeOpts, + ) -> NetEncodeResult<()> { let len: VarInt = VarInt::new(self.len() as i32); len.encode_async(writer, opts).await?; @@ -207,4 +233,4 @@ where } Ok(()) } -} \ No newline at end of file +} diff --git a/src/lib/net/crates/codec/src/lib.rs b/src/lib/net/crates/codec/src/lib.rs index cbbaf742..9cb2af18 100644 --- a/src/lib/net/crates/codec/src/lib.rs +++ b/src/lib/net/crates/codec/src/lib.rs @@ -1,3 +1,3 @@ pub mod decode; pub mod encode; -pub mod net_types; \ No newline at end of file +pub mod net_types; diff --git a/src/lib/net/crates/codec/src/net_types/length_prefixed_vec.rs b/src/lib/net/crates/codec/src/net_types/length_prefixed_vec.rs index ff6ed79b..2f0edabd 100644 --- a/src/lib/net/crates/codec/src/net_types/length_prefixed_vec.rs +++ b/src/lib/net/crates/codec/src/net_types/length_prefixed_vec.rs @@ -1,18 +1,16 @@ -use std::io::{Read, Write}; -use tokio::io::AsyncWrite; use crate::decode::{NetDecode, NetDecodeOpts, NetDecodeResult}; use crate::encode::{NetEncode, NetEncodeOpts, NetEncodeResult}; use crate::net_types::var_int::VarInt; +use std::io::{Read, Write}; +use tokio::io::AsyncWrite; #[derive(Debug)] -pub struct LengthPrefixedVec -{ +pub struct LengthPrefixedVec { pub length: VarInt, pub data: Vec, } -impl LengthPrefixedVec -{ +impl LengthPrefixedVec { pub fn new(data: Vec) -> Self { Self { length: VarInt::new(data.len() as i32), @@ -35,7 +33,11 @@ where Ok(()) } - async fn encode_async(&self, writer: &mut W, opts: &NetEncodeOpts) -> NetEncodeResult<()> { + async fn encode_async( + &self, + writer: &mut W, + opts: &NetEncodeOpts, + ) -> NetEncodeResult<()> { self.length.encode_async(writer, opts).await?; for item in &self.data { @@ -57,9 +59,6 @@ where data.push(T::decode(reader, opts)?); } - Ok(Self { - length, - data, - }) + Ok(Self { length, data }) } -} \ No newline at end of file +} diff --git a/src/lib/net/crates/codec/src/net_types/mod.rs b/src/lib/net/crates/codec/src/net_types/mod.rs index 997e788b..ac384077 100644 --- a/src/lib/net/crates/codec/src/net_types/mod.rs +++ b/src/lib/net/crates/codec/src/net_types/mod.rs @@ -1,7 +1,7 @@ -pub mod var_int; +pub mod bitset; pub mod length_prefixed_vec; pub mod network_position; -pub mod bitset; +pub mod var_int; #[derive(Debug, thiserror::Error)] pub enum NetTypesError { @@ -11,4 +11,4 @@ pub enum NetTypesError { InvalidVarInt, #[error("I couldn't convert the value into a valid i32")] InvalidInputI32, -} \ No newline at end of file +} diff --git a/src/lib/net/crates/codec/src/net_types/network_position.rs b/src/lib/net/crates/codec/src/net_types/network_position.rs index 8cafb828..dd138782 100644 --- a/src/lib/net/crates/codec/src/net_types/network_position.rs +++ b/src/lib/net/crates/codec/src/net_types/network_position.rs @@ -34,9 +34,15 @@ impl NetEncode for NetworkPosition { Ok(()) } - async fn encode_async(&self, writer: &mut W, _: &NetEncodeOpts) -> NetEncodeResult<()> { + async fn encode_async( + &self, + writer: &mut W, + _: &NetEncodeOpts, + ) -> NetEncodeResult<()> { use tokio::io::AsyncWriteExt; - writer.write_all(self.as_u64().to_be_bytes().as_ref()).await?; + writer + .write_all(self.as_u64().to_be_bytes().as_ref()) + .await?; Ok(()) } } @@ -46,4 +52,4 @@ impl NetworkPosition { | ((self.z as u64 & 0x3FFFFFF) << 12) | (self.y as u64 & 0xFFF) } -} \ No newline at end of file +} diff --git a/src/lib/net/crates/codec/src/net_types/var_int.rs b/src/lib/net/crates/codec/src/net_types/var_int.rs index 4eef28a0..78087dd0 100644 --- a/src/lib/net/crates/codec/src/net_types/var_int.rs +++ b/src/lib/net/crates/codec/src/net_types/var_int.rs @@ -2,10 +2,10 @@ use crate::decode::errors::NetDecodeError; use crate::decode::{NetDecode, NetDecodeOpts, NetDecodeResult}; use crate::encode::errors::NetEncodeError; use crate::encode::{NetEncode, NetEncodeOpts, NetEncodeResult}; -use std::io::{Read, Write}; +use crate::net_types::NetTypesError; use bitcode::{Decode, Encode}; +use std::io::{Read, Write}; use tokio::io::{AsyncRead, AsyncReadExt, AsyncWrite, AsyncWriteExt}; -use crate::net_types::NetTypesError; #[derive(Debug, Encode, Decode)] pub struct VarInt { @@ -15,7 +15,7 @@ pub struct VarInt { pub len: usize, } -mod adapters{ +mod adapters { use crate::net_types::var_int::VarInt; impl From for VarInt { @@ -23,33 +23,31 @@ mod adapters{ Self::new(value as i32) } } - - + impl From for u8 { fn from(value: VarInt) -> Self { value.val as u8 } } - + impl From for VarInt { fn from(value: u8) -> Self { Self::new(value as i32) } } - + impl From for VarInt { fn from(value: i32) -> Self { Self::new(value) } } - - + impl Default for VarInt { fn default() -> Self { Self::new(0) } } - + impl PartialEq for VarInt { fn eq(&self, other: &usize) -> bool { self.val == *other as i32 @@ -137,7 +135,10 @@ impl VarInt { } } - pub async fn write_async(&self, cursor: &mut W) -> Result<(), NetTypesError> { + pub async fn write_async( + &self, + cursor: &mut W, + ) -> Result<(), NetTypesError> { let mut val = self.val; loop { if (val & !SEGMENT_BITS) == 0 { @@ -145,7 +146,9 @@ impl VarInt { return Ok(()); } - cursor.write_all(&[((val & SEGMENT_BITS) | CONTINUE_BIT) as u8]).await?; + cursor + .write_all(&[((val & SEGMENT_BITS) | CONTINUE_BIT) as u8]) + .await?; val = ((val as u32) >> 7) as i32; // Rust equivalent of Java's >>> operator } } @@ -153,8 +156,7 @@ impl VarInt { impl NetDecode for VarInt { fn decode(reader: &mut R, _opts: &NetDecodeOpts) -> NetDecodeResult { - VarInt::read(reader) - .map_err(|e| NetDecodeError::ExternalError(e.into())) + VarInt::read(reader).map_err(|e| NetDecodeError::ExternalError(e.into())) } } @@ -164,8 +166,13 @@ impl NetEncode for VarInt { .map_err(|e| NetEncodeError::ExternalError(e.into())) } - async fn encode_async(&self, writer: &mut W, _opts: &NetEncodeOpts) -> NetEncodeResult<()> { - self.write_async(writer).await + async fn encode_async( + &self, + writer: &mut W, + _opts: &NetEncodeOpts, + ) -> NetEncodeResult<()> { + self.write_async(writer) + .await .map_err(|e| NetEncodeError::ExternalError(e.into())) } -} \ No newline at end of file +} diff --git a/src/lib/net/src/errors.rs b/src/lib/net/src/errors.rs index ce150635..d24d3684 100644 --- a/src/lib/net/src/errors.rs +++ b/src/lib/net/src/errors.rs @@ -16,14 +16,13 @@ pub enum NetError { #[error("IO Error: {0}")] IOError(#[from] std::io::Error), - + #[error("Addr parse error: {0}")] AddrParseError(#[from] std::net::AddrParseError), #[error("Task Error: {0}")] TaskError(#[from] tokio::task::JoinError), - #[error("UTF8 Error: {0}")] UTF8Error(#[from] std::string::FromUtf8Error), @@ -41,7 +40,7 @@ pub enum NetError { #[error("{0}")] Packet(#[from] PacketError), - + #[error("{0}")] Chunk(#[from] ChunkError), } diff --git a/src/lib/net/src/lib.rs b/src/lib/net/src/lib.rs index 8187ddcb..dcea638d 100644 --- a/src/lib/net/src/lib.rs +++ b/src/lib/net/src/lib.rs @@ -10,7 +10,4 @@ pub mod server; pub mod utils; pub type NetResult = Result; - - - bake_packet_registry!("\\src\\packets\\incoming"); diff --git a/src/lib/net/src/packets/incoming/ack_finish_configuration.rs b/src/lib/net/src/packets/incoming/ack_finish_configuration.rs index 4ea1168f..175bcf69 100644 --- a/src/lib/net/src/packets/incoming/ack_finish_configuration.rs +++ b/src/lib/net/src/packets/incoming/ack_finish_configuration.rs @@ -1,8 +1,8 @@ use crate::packets::IncomingPacket; use crate::NetResult; -use ferrumc_state::ServerState; use ferrumc_events::infrastructure::Event; use ferrumc_macros::{packet, Event, NetDecode}; +use ferrumc_state::ServerState; use std::sync::Arc; #[derive(NetDecode)] @@ -29,4 +29,4 @@ impl AckFinishConfigurationEvent { pub fn new(packet: AckFinishConfigurationPacket, conn_id: usize) -> Self { Self { packet, conn_id } } -} \ No newline at end of file +} diff --git a/src/lib/net/src/packets/incoming/client_information.rs b/src/lib/net/src/packets/incoming/client_information.rs index c4654ecc..5cd7c1a6 100644 --- a/src/lib/net/src/packets/incoming/client_information.rs +++ b/src/lib/net/src/packets/incoming/client_information.rs @@ -1,10 +1,10 @@ -use std::sync::Arc; -use tracing::debug; -use ferrumc_state::ServerState; -use ferrumc_macros::{packet, NetDecode}; -use ferrumc_net_codec::net_types::var_int::VarInt; use crate::packets::IncomingPacket; use crate::NetResult; +use ferrumc_macros::{packet, NetDecode}; +use ferrumc_net_codec::net_types::var_int::VarInt; +use ferrumc_state::ServerState; +use std::sync::Arc; +use tracing::debug; #[derive(Debug, NetDecode)] #[packet(packet_id = 0x00, state = "configuration")] @@ -16,7 +16,7 @@ pub struct ClientInformation { pub displayed_skin_parts: u8, pub main_hand: MainHand, pub enable_text_filtering: bool, - pub allow_server_listings: bool + pub allow_server_listings: bool, } #[derive(Debug, NetDecode)] @@ -25,7 +25,7 @@ pub struct ClientInformation { pub enum ChatMode { Enabled, CommandsOnly, - Hidden + Hidden, } #[derive(Debug, NetDecode)] @@ -33,7 +33,7 @@ pub enum ChatMode { #[repr(u8)] pub enum MainHand { Left, - Right + Right, } impl IncomingPacket for ClientInformation { @@ -44,4 +44,4 @@ impl IncomingPacket for ClientInformation { Ok(()) } -} \ No newline at end of file +} diff --git a/src/lib/net/src/packets/incoming/handshake.rs b/src/lib/net/src/packets/incoming/handshake.rs index 39d0569f..ce9b6ae5 100644 --- a/src/lib/net/src/packets/incoming/handshake.rs +++ b/src/lib/net/src/packets/incoming/handshake.rs @@ -1,8 +1,9 @@ use crate::packets::IncomingPacket; -use crate::NetResult; use ferrumc_state::ServerState; +use crate::NetResult; use ferrumc_events::infrastructure::Event; use ferrumc_macros::{packet, Event, NetDecode}; use ferrumc_net_codec::net_types::var_int::VarInt; +use ferrumc_state::ServerState; use std::sync::Arc; use tracing::trace; diff --git a/src/lib/net/src/packets/incoming/login_acknowledged.rs b/src/lib/net/src/packets/incoming/login_acknowledged.rs index ca0e16f3..10c4050f 100644 --- a/src/lib/net/src/packets/incoming/login_acknowledged.rs +++ b/src/lib/net/src/packets/incoming/login_acknowledged.rs @@ -1,13 +1,13 @@ -use std::sync::Arc; -use ferrumc_macros::{Event, NetDecode, packet}; -use ferrumc_events::infrastructure::Event; use crate::packets::IncomingPacket; -use crate::NetResult; use ferrumc_state::ServerState; +use crate::NetResult; +use ferrumc_events::infrastructure::Event; +use ferrumc_macros::{packet, Event, NetDecode}; +use ferrumc_state::ServerState; +use std::sync::Arc; #[derive(Debug, NetDecode)] #[packet(packet_id = 0x03, state = "login")] -pub struct LoginAcknowledgedPacket { -} +pub struct LoginAcknowledgedPacket {} #[derive(Event)] pub struct LoginAcknowledgedEvent { @@ -28,4 +28,4 @@ impl LoginAcknowledgedEvent { conn_id, } } -} \ No newline at end of file +} diff --git a/src/lib/net/src/packets/incoming/login_start.rs b/src/lib/net/src/packets/incoming/login_start.rs index 9b189b52..6a9c03d8 100644 --- a/src/lib/net/src/packets/incoming/login_start.rs +++ b/src/lib/net/src/packets/incoming/login_start.rs @@ -1,8 +1,9 @@ -use std::sync::Arc; +use crate::packets::IncomingPacket; +use crate::NetResult; use ferrumc_events::infrastructure::Event; use ferrumc_macros::{packet, Event, NetDecode}; -use crate::packets::IncomingPacket; -use crate::NetResult; use ferrumc_state::ServerState; +use ferrumc_state::ServerState; +use std::sync::Arc; #[derive(Debug, NetDecode)] #[packet(packet_id = 0x00, state = "login")] @@ -31,4 +32,4 @@ impl LoginStartEvent { conn_id, } } -} \ No newline at end of file +} diff --git a/src/lib/net/src/packets/incoming/mod.rs b/src/lib/net/src/packets/incoming/mod.rs index f99e0ee5..2f5254a1 100644 --- a/src/lib/net/src/packets/incoming/mod.rs +++ b/src/lib/net/src/packets/incoming/mod.rs @@ -8,9 +8,9 @@ pub mod server_bound_known_packs; pub mod server_bound_plugin_message; pub mod status_request; -pub mod packet_skeleton; pub mod keep_alive; +pub mod packet_skeleton; pub mod set_player_position; pub mod set_player_position_and_rotation; -pub mod set_player_rotation; \ No newline at end of file +pub mod set_player_rotation; diff --git a/src/lib/net/src/packets/incoming/ping.rs b/src/lib/net/src/packets/incoming/ping.rs index 5f575041..071dbf72 100644 --- a/src/lib/net/src/packets/incoming/ping.rs +++ b/src/lib/net/src/packets/incoming/ping.rs @@ -1,10 +1,11 @@ -use std::sync::Arc; -use ferrumc_macros::{packet, NetDecode}; -use ferrumc_net_codec::encode::NetEncodeOpts; -use crate::packets::IncomingPacket; -use crate::NetResult; use ferrumc_state::ServerState; use crate::connection::StreamWriter; use crate::packets::outgoing::ping_response::PongPacket; +use crate::packets::IncomingPacket; +use crate::NetResult; +use ferrumc_macros::{packet, NetDecode}; +use ferrumc_net_codec::encode::NetEncodeOpts; +use ferrumc_state::ServerState; +use std::sync::Arc; #[derive(NetDecode, Debug)] #[packet(packet_id = 0x01, state = "status")] @@ -16,12 +17,12 @@ impl IncomingPacket for PingPacket { async fn handle(self, conn_id: usize, state: Arc) -> NetResult<()> { let response = PongPacket::new(self.payload); - let mut writer = state - .universe - .get_mut::(conn_id)?; + let mut writer = state.universe.get_mut::(conn_id)?; - writer.send_packet(&response, &NetEncodeOpts::WithLength).await?; + writer + .send_packet(&response, &NetEncodeOpts::WithLength) + .await?; Ok(()) } -} \ No newline at end of file +} diff --git a/src/lib/net/src/packets/incoming/server_bound_known_packs.rs b/src/lib/net/src/packets/incoming/server_bound_known_packs.rs index ff48f711..2e182d72 100644 --- a/src/lib/net/src/packets/incoming/server_bound_known_packs.rs +++ b/src/lib/net/src/packets/incoming/server_bound_known_packs.rs @@ -1,16 +1,17 @@ -use std::sync::Arc; -use tracing::debug; +use crate::packets::IncomingPacket; +use crate::NetResult; use ferrumc_events::infrastructure::Event; use ferrumc_macros::{packet, Event, NetDecode}; use ferrumc_net_codec::net_types::length_prefixed_vec::LengthPrefixedVec; -use crate::packets::IncomingPacket; -use crate::NetResult; use ferrumc_state::ServerState; +use ferrumc_state::ServerState; +use std::sync::Arc; +use tracing::debug; #[derive(Debug, NetDecode)] #[packet(packet_id = 0x07, state = "configuration")] pub struct ServerBoundKnownPacks { #[allow(dead_code)] - pub packs: LengthPrefixedVec + pub packs: LengthPrefixedVec, } #[derive(Debug, NetDecode)] @@ -18,18 +19,16 @@ pub struct ServerBoundKnownPacks { pub struct PackOwned { namespace: String, id: String, - version: String + version: String, } impl IncomingPacket for ServerBoundKnownPacks { - async fn handle(self, conn_id: usize,state : Arc) -> NetResult<()> { + async fn handle(self, conn_id: usize, state: Arc) -> NetResult<()> { //! No clue what this packet is for, but it's not used in the server. //! It's for data packs usually. But we're probably not gonna implement 'em anytime soon. debug!("Received known packs: {:#?}", self); - let event = ServerBoundKnownPacksEvent { - conn_id - }; + let event = ServerBoundKnownPacksEvent { conn_id }; tokio::spawn(ServerBoundKnownPacksEvent::trigger(event, state)); @@ -40,4 +39,4 @@ impl IncomingPacket for ServerBoundKnownPacks { #[derive(Debug, Event)] pub struct ServerBoundKnownPacksEvent { pub conn_id: usize, -} \ No newline at end of file +} diff --git a/src/lib/net/src/packets/incoming/server_bound_plugin_message.rs b/src/lib/net/src/packets/incoming/server_bound_plugin_message.rs index e3c393aa..a23a532f 100644 --- a/src/lib/net/src/packets/incoming/server_bound_plugin_message.rs +++ b/src/lib/net/src/packets/incoming/server_bound_plugin_message.rs @@ -1,33 +1,30 @@ +use crate::packets::IncomingPacket; +use crate::NetResult; +use ferrumc_macros::packet; +use ferrumc_net_codec::decode::{NetDecode, NetDecodeOpts, NetDecodeResult}; +use ferrumc_state::ServerState; use std::io::Read; use std::sync::Arc; use tracing::debug; -use ferrumc_macros::{packet}; -use ferrumc_net_codec::decode::{NetDecode, NetDecodeOpts, NetDecodeResult}; -use crate::packets::IncomingPacket; -use crate::NetResult; use ferrumc_state::ServerState; #[derive(Debug)] #[packet(packet_id = 0x02, state = "configuration")] pub struct ServerBoundPluginMessage { channel: String, - data: Vec + data: Vec, } pub struct ClientMinecraftBrand { - pub brand: String + pub brand: String, } - impl NetDecode for ServerBoundPluginMessage { fn decode(reader: &mut R, opts: &NetDecodeOpts) -> NetDecodeResult { let channel = ::decode(reader, opts)?; let mut buf = Vec::::new(); reader.read_to_end(&mut buf)?; - Ok(Self { - channel, - data: buf - }) + Ok(Self { channel, data: buf }) } } @@ -38,10 +35,12 @@ impl IncomingPacket for ServerBoundPluginMessage { if self.channel == "minecraft:brand" { let brand = String::from_utf8(self.data)?; debug!("Client brand: {}", brand); - - state.universe.add_component(conn_id, ClientMinecraftBrand { brand })?; + + state + .universe + .add_component(conn_id, ClientMinecraftBrand { brand })?; } Ok(()) } -} \ No newline at end of file +} diff --git a/src/lib/net/src/packets/incoming/status_request.rs b/src/lib/net/src/packets/incoming/status_request.rs index bf7983ef..a5f349d1 100644 --- a/src/lib/net/src/packets/incoming/status_request.rs +++ b/src/lib/net/src/packets/incoming/status_request.rs @@ -1,12 +1,13 @@ use crate::connection::StreamWriter; use crate::packets::outgoing::status_response::StatusResponse; use crate::packets::IncomingPacket; -use crate::NetResult; use ferrumc_state::ServerState; +use crate::NetResult; use ferrumc_config::favicon::get_favicon_base64; use ferrumc_config::statics::get_global_config; use ferrumc_core::identity::player_identity::PlayerIdentity; use ferrumc_macros::{packet, NetDecode}; use ferrumc_net_codec::encode::NetEncodeOpts; +use ferrumc_state::ServerState; use rand::seq::IndexedRandom; use std::sync::Arc; diff --git a/src/lib/net/src/packets/mod.rs b/src/lib/net/src/packets/mod.rs index f698e879..0ca4bc6a 100644 --- a/src/lib/net/src/packets/mod.rs +++ b/src/lib/net/src/packets/mod.rs @@ -6,5 +6,9 @@ pub mod packet_events; #[allow(async_fn_in_trait)] pub trait IncomingPacket { - async fn handle(self, conn_id: usize, state: std::sync::Arc) -> NetResult<()>; -} \ No newline at end of file + async fn handle( + self, + conn_id: usize, + state: std::sync::Arc, + ) -> NetResult<()>; +} diff --git a/src/lib/net/src/packets/outgoing/game_event.rs b/src/lib/net/src/packets/outgoing/game_event.rs index fd3c4fc1..19b66809 100644 --- a/src/lib/net/src/packets/outgoing/game_event.rs +++ b/src/lib/net/src/packets/outgoing/game_event.rs @@ -10,10 +10,7 @@ pub struct GameEventPacket { impl GameEventPacket { pub fn new(event_id: u8, value: f32) -> Self { - Self { - event_id, - value, - } + Self { event_id, value } } pub fn start_waiting_for_level_chunks() -> Self { diff --git a/src/lib/net/src/packets/outgoing/mod.rs b/src/lib/net/src/packets/outgoing/mod.rs index 632d043f..9c63db7c 100644 --- a/src/lib/net/src/packets/outgoing/mod.rs +++ b/src/lib/net/src/packets/outgoing/mod.rs @@ -1,16 +1,16 @@ -pub mod status_response; -pub mod ping_response; -pub mod login_success; +pub mod chunk_and_light_data; pub mod client_bound_known_packs; -pub mod registry_data; +pub mod disconnect; pub mod finish_configuration; +pub mod game_event; +pub mod keep_alive; pub mod login_play; +pub mod login_success; +pub mod ping_response; +pub mod registry_data; +pub mod set_center_chunk; pub mod set_default_spawn_position; +pub mod set_render_distance; +pub mod status_response; pub mod synchronize_player_position; -pub mod keep_alive; -pub mod game_event; pub mod update_time; -pub mod disconnect; -pub mod chunk_and_light_data; -pub mod set_center_chunk; -pub mod set_render_distance; \ No newline at end of file diff --git a/src/lib/net/src/packets/outgoing/ping_response.rs b/src/lib/net/src/packets/outgoing/ping_response.rs index ecedc6c3..4f034c56 100644 --- a/src/lib/net/src/packets/outgoing/ping_response.rs +++ b/src/lib/net/src/packets/outgoing/ping_response.rs @@ -9,8 +9,6 @@ pub struct PongPacket { impl PongPacket { pub fn new(payload: i64) -> Self { - Self { - payload, - } + Self { payload } } } diff --git a/src/lib/net/src/packets/outgoing/registry_data.rs b/src/lib/net/src/packets/outgoing/registry_data.rs index 1eaaea80..7678f02e 100644 --- a/src/lib/net/src/packets/outgoing/registry_data.rs +++ b/src/lib/net/src/packets/outgoing/registry_data.rs @@ -1206,7 +1206,9 @@ mod tests { let registry_data = RegistryDataPacket::get_registry_packets(); let mut buffer = Vec::new(); for packet in registry_data { - packet.encode(&mut buffer, &NetEncodeOpts::WithLength).unwrap(); + packet + .encode(&mut buffer, &NetEncodeOpts::WithLength) + .unwrap(); } std::fs::write( r#"D:\Minecraft\framework\ferrumc\ferrumc-2_0\ferrumc\.etc/registry.packet"#, diff --git a/src/lib/net/src/packets/outgoing/set_center_chunk.rs b/src/lib/net/src/packets/outgoing/set_center_chunk.rs index a048c472..72eeb7d2 100644 --- a/src/lib/net/src/packets/outgoing/set_center_chunk.rs +++ b/src/lib/net/src/packets/outgoing/set_center_chunk.rs @@ -1,6 +1,6 @@ -use std::io::Write; use ferrumc_macros::{packet, NetEncode}; use ferrumc_net_codec::net_types::var_int::VarInt; +use std::io::Write; #[derive(NetEncode)] #[packet(packet_id = 0x54)] @@ -16,4 +16,4 @@ impl SetCenterChunk { z: VarInt::new(z), } } -} \ No newline at end of file +} diff --git a/src/lib/net/src/packets/outgoing/set_render_distance.rs b/src/lib/net/src/packets/outgoing/set_render_distance.rs index afbe752a..5992ca70 100644 --- a/src/lib/net/src/packets/outgoing/set_render_distance.rs +++ b/src/lib/net/src/packets/outgoing/set_render_distance.rs @@ -1,6 +1,6 @@ -use std::io::Write; use ferrumc_macros::{packet, NetEncode}; use ferrumc_net_codec::net_types::var_int::VarInt; +use std::io::Write; #[derive(NetEncode)] #[packet(packet_id = 0x55)] @@ -22,4 +22,4 @@ impl SetRenderDistance { distance: VarInt::new(distance as i32), } } -} \ No newline at end of file +} diff --git a/src/lib/net/src/packets/outgoing/status_response.rs b/src/lib/net/src/packets/outgoing/status_response.rs index 918354c6..74bfb66d 100644 --- a/src/lib/net/src/packets/outgoing/status_response.rs +++ b/src/lib/net/src/packets/outgoing/status_response.rs @@ -7,11 +7,8 @@ pub struct StatusResponse { pub json_response: String, } - impl StatusResponse { pub fn new(json_response: String) -> Self { - Self { - json_response, - } + Self { json_response } } } diff --git a/src/lib/net/src/packets/packet_events.rs b/src/lib/net/src/packets/packet_events.rs index a8c218e6..0ceb51c5 100644 --- a/src/lib/net/src/packets/packet_events.rs +++ b/src/lib/net/src/packets/packet_events.rs @@ -33,4 +33,4 @@ impl TransformEvent { self.on_ground = Some(on_ground); self } -} \ No newline at end of file +} diff --git a/src/lib/net/src/server.rs b/src/lib/net/src/server.rs index d3b5c91f..fd6c84e4 100644 --- a/src/lib/net/src/server.rs +++ b/src/lib/net/src/server.rs @@ -1,4 +1,4 @@ -use crate::{NetResult}; +use crate::NetResult; use ferrumc_config::statics::get_global_config; use tokio::net::TcpListener; use tracing::{debug, error}; @@ -20,7 +20,6 @@ pub async fn create_server_listener() -> NetResult { return Err(e.into()); } }; - Ok(listener) -} \ No newline at end of file +} diff --git a/src/lib/net/src/utils/broadcast.rs b/src/lib/net/src/utils/broadcast.rs index 6cb14cdd..5aff8f22 100644 --- a/src/lib/net/src/utils/broadcast.rs +++ b/src/lib/net/src/utils/broadcast.rs @@ -1,15 +1,17 @@ -use std::future::Future; -use std::pin::Pin; +use crate::connection::StreamWriter; +use crate::NetResult; use async_trait::async_trait; -use futures::StreamExt; -use tracing::debug; use ferrumc_ecs::entities::Entity; use ferrumc_net_codec::encode::{NetEncode, NetEncodeOpts}; -use crate::NetResult; use ferrumc_state::GlobalState; -use crate::connection::StreamWriter; +use futures::StreamExt; +use std::future::Future; +use std::pin::Pin; +use tracing::debug; -type AsyncCallbackFn = Box Pin + Send + '_>> + Send + Sync>; +type AsyncCallbackFn = Box< + dyn Fn(Entity, &GlobalState) -> Pin + Send + '_>> + Send + Sync, +>; type SyncCallbackFn = Box; #[derive(Default)] @@ -35,9 +37,7 @@ impl BroadcastOptions { F: Fn(Entity, &GlobalState) -> Fut + Send + Sync + 'static, Fut: Future + Send + 'static, { - self.async_callback = Some(Box::new(move |entity, state| { - Box::pin(f(entity, state)) - })); + self.async_callback = Some(Box::new(move |entity, state| Box::pin(f(entity, state)))); self } @@ -50,10 +50,17 @@ impl BroadcastOptions { } } -pub async fn broadcast(packet: &impl NetEncode, state: &GlobalState, opts: BroadcastOptions) -> NetResult<()> { +pub async fn broadcast( + packet: &impl NetEncode, + state: &GlobalState, + opts: BroadcastOptions, +) -> NetResult<()> { let entities = match opts.only_entities { - None => state.universe.get_component_manager().get_entities_with::(), - Some(entities) => entities + None => state + .universe + .get_component_manager() + .get_entities_with::(), + Some(entities) => entities, }; // Pre-encode the packet to save resources. @@ -64,49 +71,57 @@ pub async fn broadcast(packet: &impl NetEncode, state: &GlobalState, opts: Broad buffer }; - let (state, packet, async_callback, sync_callback) = (state, packet, opts.async_callback, opts.sync_callback); + let (state, packet, async_callback, sync_callback) = + (state, packet, opts.async_callback, opts.sync_callback); futures::stream::iter(entities.into_iter()) - .fold((state, packet, async_callback, sync_callback), move |(state, packet, async_callback, sync_callback), entity| { - async move { - let Ok(mut writer) = state.universe.get_mut::(entity) else { - return (state, packet, async_callback, sync_callback); - }; - - if let Err(e) = writer - .send_packet(&packet, &NetEncodeOpts::None) - .await - { - debug!("Error sending packet: {}", e); - } - - // Execute sync callback first if it exists - if let Some(ref callback) = sync_callback { - callback(entity, state); - } - - // Then execute async callback if it exists - if let Some(ref callback) = async_callback { - callback(entity, state).await; + .fold( + (state, packet, async_callback, sync_callback), + move |(state, packet, async_callback, sync_callback), entity| { + async move { + let Ok(mut writer) = state.universe.get_mut::(entity) else { + return (state, packet, async_callback, sync_callback); + }; + + if let Err(e) = writer.send_packet(&packet, &NetEncodeOpts::None).await { + debug!("Error sending packet: {}", e); + } + + // Execute sync callback first if it exists + if let Some(ref callback) = sync_callback { + callback(entity, state); + } + + // Then execute async callback if it exists + if let Some(ref callback) = async_callback { + callback(entity, state).await; + } + + (state, packet, async_callback, sync_callback) } - - (state, packet, async_callback, sync_callback) - } - }).await; + }, + ) + .await; Ok(()) } - - #[async_trait] pub trait BroadcastToAll { - async fn broadcast(&self, packet: &(impl NetEncode + Sync), opts: BroadcastOptions) -> NetResult<()>; + async fn broadcast( + &self, + packet: &(impl NetEncode + Sync), + opts: BroadcastOptions, + ) -> NetResult<()>; } #[async_trait] impl BroadcastToAll for GlobalState { - async fn broadcast(&self, packet: &(impl NetEncode + Sync), opts: BroadcastOptions) -> NetResult<()> { + async fn broadcast( + &self, + packet: &(impl NetEncode + Sync), + opts: BroadcastOptions, + ) -> NetResult<()> { broadcast(packet, self, opts).await } -} \ No newline at end of file +} diff --git a/src/lib/net/src/utils/ecs_helpers.rs b/src/lib/net/src/utils/ecs_helpers.rs index c8dd4c20..4ae80e68 100644 --- a/src/lib/net/src/utils/ecs_helpers.rs +++ b/src/lib/net/src/utils/ecs_helpers.rs @@ -15,4 +15,4 @@ impl EntityExt for usize { fn get_mut(&self, state: &GlobalState) -> ECSResult> { state.universe.get_mut::(*self) } -} \ No newline at end of file +} diff --git a/src/lib/net/src/utils/mod.rs b/src/lib/net/src/utils/mod.rs index 4daafa52..04731bf2 100644 --- a/src/lib/net/src/utils/mod.rs +++ b/src/lib/net/src/utils/mod.rs @@ -1,3 +1,3 @@ -pub mod ecs_helpers; pub mod broadcast; -pub mod state; \ No newline at end of file +pub mod ecs_helpers; +pub mod state; diff --git a/src/lib/storage/src/backends/redb.rs b/src/lib/storage/src/backends/redb.rs index 2f29c854..85e1eb66 100644 --- a/src/lib/storage/src/backends/redb.rs +++ b/src/lib/storage/src/backends/redb.rs @@ -32,12 +32,7 @@ impl DatabaseBackend for RedbBackend { } } - async fn insert( - &self, - table: String, - key: u64, - value: Vec, - ) -> Result<(), StorageError> { + async fn insert(&self, table: String, key: u64, value: Vec) -> Result<(), StorageError> { let db = self.db.clone(); if self.exists(table.clone(), key).await? { return Err(StorageError::KeyExists(key)); @@ -124,12 +119,7 @@ impl DatabaseBackend for RedbBackend { .expect("Failed to spawn task") } - async fn update( - &self, - table: String, - key: u64, - value: Vec, - ) -> Result<(), StorageError> { + async fn update(&self, table: String, key: u64, value: Vec) -> Result<(), StorageError> { let db = self.db.clone(); tokio::task::spawn_blocking(move || { let table_def: TableDefinition = TableDefinition::new(&table); @@ -158,12 +148,7 @@ impl DatabaseBackend for RedbBackend { .map_err(|e| StorageError::UpdateError(e.to_string())) } - async fn upsert( - &self, - table: String, - key: u64, - value: Vec, - ) -> Result { + async fn upsert(&self, table: String, key: u64, value: Vec) -> Result { let db = self.db.clone(); let result = tokio::task::spawn_blocking(move || { let table_def: TableDefinition = TableDefinition::new(&table); diff --git a/src/lib/storage/src/backends/rocksdb.rs b/src/lib/storage/src/backends/rocksdb.rs index 7f57bda2..469aba82 100644 --- a/src/lib/storage/src/backends/rocksdb.rs +++ b/src/lib/storage/src/backends/rocksdb.rs @@ -34,12 +34,7 @@ impl DatabaseBackend for RocksDBBackend { } } - async fn insert( - &self, - table: String, - key: u64, - value: Vec, - ) -> Result<(), StorageError> { + async fn insert(&self, table: String, key: u64, value: Vec) -> Result<(), StorageError> { let db = self.db.clone(); tokio::task::spawn_blocking(move || { let db = db.read(); diff --git a/src/lib/storage/src/backends/sled.rs b/src/lib/storage/src/backends/sled.rs index f96cba25..9b2d7d74 100644 --- a/src/lib/storage/src/backends/sled.rs +++ b/src/lib/storage/src/backends/sled.rs @@ -24,12 +24,7 @@ impl DatabaseBackend for SledBackend { } } - async fn insert( - &self, - table: String, - key: u64, - value: Vec, - ) -> Result<(), StorageError> { + async fn insert(&self, table: String, key: u64, value: Vec) -> Result<(), StorageError> { let db = self.db.clone(); tokio::task::spawn_blocking(move || { let table = db @@ -81,12 +76,7 @@ impl DatabaseBackend for SledBackend { Ok(()) } - async fn update( - &self, - table: String, - key: u64, - value: Vec, - ) -> Result<(), StorageError> { + async fn update(&self, table: String, key: u64, value: Vec) -> Result<(), StorageError> { let db = self.db.clone(); tokio::task::spawn_blocking(move || { let table = db @@ -102,12 +92,7 @@ impl DatabaseBackend for SledBackend { Ok(()) } - async fn upsert( - &self, - table: String, - key: u64, - value: Vec, - ) -> Result { + async fn upsert(&self, table: String, key: u64, value: Vec) -> Result { let db = self.db.clone(); let result = tokio::task::spawn_blocking(move || { let table = db diff --git a/src/lib/storage/src/backends/surrealkv.rs b/src/lib/storage/src/backends/surrealkv.rs index 2d118e5f..3587ceb0 100644 --- a/src/lib/storage/src/backends/surrealkv.rs +++ b/src/lib/storage/src/backends/surrealkv.rs @@ -33,12 +33,7 @@ impl DatabaseBackend for SurrealKVBackend { } } - async fn insert( - &self, - table: String, - key: u64, - value: Vec, - ) -> Result<(), StorageError> { + async fn insert(&self, table: String, key: u64, value: Vec) -> Result<(), StorageError> { if self.exists(table.clone(), key).await? { return Err(StorageError::KeyExists(key)); } @@ -87,12 +82,7 @@ impl DatabaseBackend for SurrealKVBackend { Ok(()) } - async fn update( - &self, - table: String, - key: u64, - value: Vec, - ) -> Result<(), StorageError> { + async fn update(&self, table: String, key: u64, value: Vec) -> Result<(), StorageError> { if self.exists(table.clone(), key).await? { self.insert(table, key, value).await } else { @@ -100,12 +90,7 @@ impl DatabaseBackend for SurrealKVBackend { } } - async fn upsert( - &self, - table: String, - key: u64, - value: Vec, - ) -> Result { + async fn upsert(&self, table: String, key: u64, value: Vec) -> Result { if self.exists(table.clone(), key).await? { self.update(table, key, value).await?; Ok(false) @@ -184,7 +169,7 @@ impl DatabaseBackend for SurrealKVBackend { async fn create_table(&self, _: String) -> Result<(), StorageError> { Ok(()) } - + async fn close(&self) -> Result<(), StorageError> { // I should probably do something here, but I'm just hoping the drop trait will handle it. Ok(()) diff --git a/src/lib/storage/src/benches/compression.rs b/src/lib/storage/src/benches/compression.rs index 6289a887..89acd6be 100644 --- a/src/lib/storage/src/benches/compression.rs +++ b/src/lib/storage/src/benches/compression.rs @@ -51,7 +51,8 @@ fn zlib_decompress(data: &[u8]) { } fn zlib_yazi_compress(data: &[u8]) { - let compressed = yazi::compress(data, yazi::Format::Zlib, yazi::CompressionLevel::Default).unwrap(); + let compressed = + yazi::compress(data, yazi::Format::Zlib, yazi::CompressionLevel::Default).unwrap(); black_box(compressed); } @@ -82,7 +83,9 @@ pub fn compression_benchmarks(c: &mut criterion::Criterion) { compress_group.bench_function("Gzip", |b| b.iter(|| gzip_compress(black_box(data)))); compress_group.bench_function("Deflate", |b| b.iter(|| deflate_compress(black_box(data)))); compress_group.bench_function("Zlib", |b| b.iter(|| zlib_compress(black_box(data)))); - compress_group.bench_function("Zlib (Yazi)", |b| b.iter(|| zlib_yazi_compress(black_box(data)))); + compress_group.bench_function("Zlib (Yazi)", |b| { + b.iter(|| zlib_yazi_compress(black_box(data))) + }); compress_group.bench_function("Brotli", |b| b.iter(|| brotli_compress(black_box(data)))); compress_group.finish(); @@ -153,7 +156,8 @@ pub fn compression_benchmarks(c: &mut criterion::Criterion) { }); roundtrip_group.bench_function("Zlib (Yazi)", |b| { b.iter(|| { - let compressed = yazi::compress(data, yazi::Format::Zlib, yazi::CompressionLevel::Default).unwrap(); + let compressed = + yazi::compress(data, yazi::Format::Zlib, yazi::CompressionLevel::Default).unwrap(); let decompressed = yazi::decompress(compressed.as_slice(), yazi::Format::Zlib).unwrap(); black_box(decompressed); }) diff --git a/src/lib/storage/src/compressors/deflate.rs b/src/lib/storage/src/compressors/deflate.rs index 6a236480..c1da1144 100644 --- a/src/lib/storage/src/compressors/deflate.rs +++ b/src/lib/storage/src/compressors/deflate.rs @@ -1,10 +1,8 @@ use crate::errors::StorageError; use std::io::Read; - pub(crate) fn compress_deflate(level: u32, data: &[u8]) -> Result, StorageError> { - let mut compressor = - flate2::read::DeflateEncoder::new(data, flate2::Compression::new(level)); + let mut compressor = flate2::read::DeflateEncoder::new(data, flate2::Compression::new(level)); let mut compressed = Vec::new(); compressor .read_to_end(&mut compressed) @@ -21,11 +19,10 @@ pub(crate) fn decompress_deflate(data: &[u8]) -> Result, StorageError> { Ok(decompressed) } - #[cfg(test)] mod test { - use ferrumc_utils::root; use crate::compressors::{Compressor, CompressorType}; + use ferrumc_utils::root; #[test] fn test_compress_decompress() { diff --git a/src/lib/storage/src/lib.rs b/src/lib/storage/src/lib.rs index 46ac429c..5ee1dd33 100644 --- a/src/lib/storage/src/lib.rs +++ b/src/lib/storage/src/lib.rs @@ -37,8 +37,7 @@ pub trait DatabaseBackend { /// # Returns /// /// A Result containing the inserted key-value pair or a StorageError - async fn insert(&self, table: String, key: u64, value: Vec) - -> Result<(), StorageError>; + async fn insert(&self, table: String, key: u64, value: Vec) -> Result<(), StorageError>; /// Retrieves a value from the database /// @@ -75,8 +74,7 @@ pub trait DatabaseBackend { /// # Returns /// /// A Result containing nothing or a StorageError - async fn update(&self, table: String, key: u64, value: Vec) - -> Result<(), StorageError>; + async fn update(&self, table: String, key: u64, value: Vec) -> Result<(), StorageError>; /// Upserts a key-value pair in the database /// @@ -89,12 +87,7 @@ pub trait DatabaseBackend { /// # Returns /// /// A Result containing a boolean indicating if the key was inserted or updated or a StorageError - async fn upsert( - &self, - table: String, - key: u64, - value: Vec, - ) -> Result; + async fn upsert(&self, table: String, key: u64, value: Vec) -> Result; /// Checks if a key exists in the database /// diff --git a/src/lib/text/src/builders.rs b/src/lib/text/src/builders.rs index 829b85f1..ebcbd4ed 100644 --- a/src/lib/text/src/builders.rs +++ b/src/lib/text/src/builders.rs @@ -4,7 +4,7 @@ use paste::paste; /// Build a component (text, translate, keybind). /// pub struct ComponentBuilder { - _private: () + _private: (), } impl ComponentBuilder { @@ -17,7 +17,7 @@ impl ComponentBuilder { pub fn keybind>(keybind: S) -> TextComponent { TextComponent { content: TextContent::Keybind { - keybind: keybind.into() + keybind: keybind.into(), }, ..Default::default() } @@ -74,7 +74,13 @@ impl TextComponentBuilder { } } - make_setters!((Color, color), (Font, font), (String, insertion), (ClickEvent, click_event), (HoverEvent, hover_event)); + make_setters!( + (Color, color), + (Font, font), + (String, insertion), + (ClickEvent, click_event), + (HoverEvent, hover_event) + ); make_bool_setters!(bold, italic, underlined, strikethrough, obfuscated); pub fn space(self) -> Self { @@ -88,9 +94,7 @@ impl TextComponentBuilder { pub fn build(self) -> TextComponent { TextComponent { - content: TextContent::Text { - text: self.text, - }, + content: TextContent::Text { text: self.text }, color: self.color, font: self.font, bold: self.bold, diff --git a/src/lib/text/src/impl.rs b/src/lib/text/src/impl.rs index 5e5f14be..a6c30c24 100644 --- a/src/lib/text/src/impl.rs +++ b/src/lib/text/src/impl.rs @@ -1,22 +1,18 @@ use crate::*; -use ferrumc_net_codec::encode::{ - NetEncode, NetEncodeOpts, errors::NetEncodeError -}; use ferrumc_nbt::{NBTSerializable, NBTSerializeOptions}; +use ferrumc_net_codec::encode::{errors::NetEncodeError, NetEncode, NetEncodeOpts}; +use paste::paste; +use std::fmt; use std::io::Write; use std::marker::Unpin; -use tokio::io::AsyncWriteExt; -use std::fmt; use std::ops::Add; use std::str::FromStr; -use paste::paste; +use tokio::io::AsyncWriteExt; impl From for TextComponent { fn from(value: String) -> Self { Self { - content: TextContent::Text { - text: value, - }, + content: TextContent::Text { text: value }, ..Default::default() } } @@ -25,9 +21,7 @@ impl From for TextComponent { impl From<&str> for TextComponent { fn from(value: &str) -> Self { Self { - content: TextContent::Text { - text: value.into(), - }, + content: TextContent::Text { text: value.into() }, ..Default::default() } } @@ -86,7 +80,13 @@ impl fmt::Display for TextComponent { } impl TextComponent { - make_setters!((Color, color), (Font, font), (String, insertion), (ClickEvent, click_event), (HoverEvent, hover_event)); + make_setters!( + (Color, color), + (Font, font), + (String, insertion), + (ClickEvent, click_event), + (HoverEvent, hover_event) + ); make_bool_setters!(bold, italic, underlined, strikethrough, obfuscated); pub fn serialize_nbt(&self) -> Vec { @@ -102,7 +102,11 @@ impl NetEncode for TextComponent { Ok(()) } - async fn encode_async(&self, writer: &mut W, _: &NetEncodeOpts) -> Result<(), NetEncodeError>{ + async fn encode_async( + &self, + writer: &mut W, + _: &NetEncodeOpts, + ) -> Result<(), NetEncodeError> { writer.write_all(&self.serialize_nbt()[..]).await?; Ok(()) } diff --git a/src/lib/text/src/lib.rs b/src/lib/text/src/lib.rs index 0434b8bc..36e06cb9 100644 --- a/src/lib/text/src/lib.rs +++ b/src/lib/text/src/lib.rs @@ -1,12 +1,12 @@ use ferrumc_macros::NBTSerialize; -use serde::{Serialize, Deserialize}; +use serde::{Deserialize, Serialize}; #[cfg(test)] mod tests; -mod utils; mod builders; mod r#impl; +mod utils; pub use builders::*; pub use utils::*; diff --git a/src/lib/text/src/tests.rs b/src/lib/text/src/tests.rs index c33baed7..1ee4b05a 100644 --- a/src/lib/text/src/tests.rs +++ b/src/lib/text/src/tests.rs @@ -18,9 +18,7 @@ fn bytes_to_readable_string(bytes: &[u8]) -> String { fn bytes_to_string(bytes: &[u8]) -> String { bytes .iter() - .map(|&byte| { - format!("{:02X}", byte) - }) + .map(|&byte| format!("{:02X}", byte)) .collect::>() .join(" ") } @@ -29,24 +27,22 @@ fn bytes_to_string(bytes: &[u8]) -> String { fn test_to_string() { let component = TextComponent::from("This is a test!"); assert_eq!( - component.to_string(), + component.to_string(), "{\"text\":\"This is a test!\"}".to_string() ); let component = ComponentBuilder::text("This is a test!") .color(NamedColor::Blue) .build(); assert_eq!( - component.to_string(), + component.to_string(), "{\"text\":\"This is a test!\",\"color\":\"blue\"}".to_string() ); let component = ComponentBuilder::keybind("key.jump"); assert_eq!( - component.to_string(), + component.to_string(), "{\"keybind\":\"key.jump\"}".to_string() ); - let component = - TextComponent::from("This is a test!") - + TextComponent::from(" extra!"); + let component = TextComponent::from("This is a test!") + TextComponent::from(" extra!"); assert_eq!( component.to_string(), "{\"text\":\"This is a test!\",\"extra\":[{\"text\":\" extra!\"}]}".to_string() @@ -55,21 +51,20 @@ fn test_to_string() { .hover_event(HoverEvent::ShowText(Box::new(TextComponent::from("boo")))) .build(); assert_eq!( - component.to_string(), - ("This is a test!".into_text() - .on_hover_show_text("boo")) - .to_string() + component.to_string(), + ("This is a test!".into_text().on_hover_show_text("boo")).to_string() ); let component = ComponentBuilder::text("This is a test!") .underlined() .hover_event(HoverEvent::ShowText(Box::new(TextComponent::from("boo")))) .build(); assert_eq!( - component.to_string(), - ("This is a test!".into_text() + component.to_string(), + ("This is a test!" + .into_text() .underlined() .on_hover_show_text("boo")) - .to_string() + .to_string() ); let component = ComponentBuilder::text("This is a test!") .underlined() @@ -77,31 +72,27 @@ fn test_to_string() { .hover_event(HoverEvent::ShowText(Box::new(TextComponent::from("boo")))) .build(); assert_eq!( - component.to_string(), + component.to_string(), ("This is a test!" .underlined() .bold() .on_hover_show_text("boo")) - .to_string() + .to_string() ); let component = ComponentBuilder::keybind("key.jump"); - assert_eq!( - component.to_string(), - Text::keybind("key.jump").to_string() - ); - + assert_eq!(component.to_string(), Text::keybind("key.jump").to_string()); } -use std::io::{Cursor, Write}; -use ferrumc_macros::{NetEncode, packet}; +use ferrumc_macros::{packet, NetEncode}; +use ferrumc_nbt::NBTSerializable; +use ferrumc_nbt::NBTSerializeOptions; use ferrumc_net_codec::{ - encode::{NetEncode, NetEncodeOpts}, decode::{NetDecode, NetDecodeOpts}, - net_types::var_int::VarInt + encode::{NetEncode, NetEncodeOpts}, + net_types::var_int::VarInt, }; -use ferrumc_nbt::NBTSerializable; -use ferrumc_nbt::NBTSerializeOptions; use std::fs::File; +use std::io::{Cursor, Write}; #[derive(NetEncode)] #[packet(packet_id = 0x6C)] @@ -113,24 +104,30 @@ struct TestPacket { #[tokio::test] #[ignore] async fn test_serialize_to_nbt() { - let component = ComponentBuilder::translate("chat.type.text", vec![ - ComponentBuilder::text("GStudiosX") - .click_event(ClickEvent::SuggestCommand("/msg GStudiosX".to_string())) - .hover_event(HoverEvent::ShowEntity { - entity_type: "minecraft:player".to_string(), - id: uuid::Uuid::new_v4(), - name: Some("GStudiosX".to_string()), - }) - .color(NamedColor::Blue) - .build(), - ComponentBuilder::text("Hi") - .font("custom:test") - .extra(ComponentBuilder::keybind("key.jump")) - .build(), - ]); + let component = ComponentBuilder::translate( + "chat.type.text", + vec![ + ComponentBuilder::text("GStudiosX") + .click_event(ClickEvent::SuggestCommand("/msg GStudiosX".to_string())) + .hover_event(HoverEvent::ShowEntity { + entity_type: "minecraft:player".to_string(), + id: uuid::Uuid::new_v4(), + name: Some("GStudiosX".to_string()), + }) + .color(NamedColor::Blue) + .build(), + ComponentBuilder::text("Hi") + .font("custom:test") + .extra(ComponentBuilder::keybind("key.jump")) + .build(), + ], + ); //println!("{:#?}", component.color); println!("{}", component); - println!("{}", bytes_to_readable_string(&component.serialize_nbt()[..])); + println!( + "{}", + bytes_to_readable_string(&component.serialize_nbt()[..]) + ); println!("{}", component.serialize_nbt().len()); @@ -138,18 +135,28 @@ async fn test_serialize_to_nbt() { let mut file = File::create("foo.nbt").unwrap(); let mut bytes = Vec::new(); - NBTSerializable::serialize(&vec![component.clone()], &mut bytes, &NBTSerializeOptions::Network); + NBTSerializable::serialize( + &vec![component.clone()], + &mut bytes, + &NBTSerializeOptions::Network, + ); //file.write_all(&bytes).unwrap(); println!("\n{}\n", bytes_to_readable_string(&bytes[..])); file.write_all(&component.serialize_nbt()[..]).unwrap(); let mut cursor = Cursor::new(Vec::new()); - TestPacket::encode_async(&TestPacket { - message: TextComponentBuilder::new("test") - .color(NamedColor::Blue) - .build(), - overlay: false, - }, &mut cursor, &NetEncodeOpts::WithLength).await.unwrap(); + TestPacket::encode_async( + &TestPacket { + message: TextComponentBuilder::new("test") + .color(NamedColor::Blue) + .build(), + overlay: false, + }, + &mut cursor, + &NetEncodeOpts::WithLength, + ) + .await + .unwrap(); println!("\n{}\n", bytes_to_string(&cursor.get_ref()[..])); @@ -160,6 +167,14 @@ async fn test_serialize_to_nbt() { println!("{}\n", bytes_to_string(&component.serialize_nbt()[..])); - println!("id: {}, length: {}, left: {}", id.val, length.val, length.val as u64 - cursor.position()); - println!("{}", bytes_to_readable_string(&cursor.get_ref()[cursor.position() as usize..])); + println!( + "id: {}, length: {}, left: {}", + id.val, + length.val, + length.val as u64 - cursor.position() + ); + println!( + "{}", + bytes_to_readable_string(&cursor.get_ref()[cursor.position() as usize..]) + ); } diff --git a/src/lib/text/src/utils.rs b/src/lib/text/src/utils.rs index 92058166..20912952 100644 --- a/src/lib/text/src/utils.rs +++ b/src/lib/text/src/utils.rs @@ -1,6 +1,6 @@ use crate::*; -use serde::{Serialize, Deserialize}; use ferrumc_macros::NBTSerialize; +use serde::{Deserialize, Serialize}; #[macro_export] macro_rules! make_bool_setters { @@ -119,7 +119,11 @@ impl From<&str> for Font { /// The click event of the text component /// #[derive(Clone, PartialEq, Debug, Serialize, Deserialize, NBTSerialize)] -#[serde(tag = "action", content = "value", rename_all(serialize = "snake_case"))] +#[serde( + tag = "action", + content = "value", + rename_all(serialize = "snake_case") +)] #[nbt(tag = "action", content = "value", rename_all = "snake_case")] pub enum ClickEvent { /// Opens an URL @@ -142,7 +146,11 @@ pub enum ClickEvent { /// The hover event of the text component /// #[derive(Clone, PartialEq, Debug, Serialize, Deserialize, NBTSerialize)] -#[serde(tag = "action", content = "contents", rename_all(serialize = "snake_case"))] +#[serde( + tag = "action", + content = "contents", + rename_all(serialize = "snake_case") +)] #[nbt(tag = "action", content = "contents", rename_all = "snake_case")] pub enum HoverEvent { ShowText(Box), diff --git a/src/lib/utils/config/src/favicon.rs b/src/lib/utils/config/src/favicon.rs index 3feb4b97..efff275d 100644 --- a/src/lib/utils/config/src/favicon.rs +++ b/src/lib/utils/config/src/favicon.rs @@ -1,8 +1,8 @@ -use std::fs::File; -use std::io::Read; use base64::Engine; -use lazy_static::lazy_static; use ferrumc_general_purpose::paths::get_root_path; +use lazy_static::lazy_static; +use std::fs::File; +use std::io::Read; const BAKED_FAVICON: &[u8] = include_bytes!("../../../../../icon-64.png"); @@ -11,7 +11,6 @@ lazy_static! { let encoded = base64::engine::general_purpose::STANDARD.encode(BAKED_FAVICON); format!("data:image/png;base64,{}", encoded) }; - static ref CUSTOM_FAVICON: Option = { let icon_path = get_root_path().join("icon.png"); if icon_path.exists() { @@ -27,7 +26,10 @@ lazy_static! { eprintln!("Could not read custom favicon file: {}", e); return None; } - let res = format!("data:image/png;base64,{}", base64::engine::general_purpose::STANDARD.encode(icon)); + let res = format!( + "data:image/png;base64,{}", + base64::engine::general_purpose::STANDARD.encode(icon) + ); Some(res) } else { None diff --git a/src/lib/utils/config/src/lib.rs b/src/lib/utils/config/src/lib.rs index 4469456f..4e053f16 100644 --- a/src/lib/utils/config/src/lib.rs +++ b/src/lib/utils/config/src/lib.rs @@ -2,7 +2,7 @@ //! # FerrumC Configuration Utilities //! //! This crate provides utilities for reading and storing server configurations. -//! +//! //! ## Organization //! //! The crate is organized into the following modules: @@ -11,9 +11,10 @@ //! - [statics](statics/index.html): Static global configuration and related functions. pub mod errors; +pub mod favicon; pub mod server_config; +pub mod setup; pub mod statics; -pub mod favicon; // Re-exports pub use server_config::DatabaseConfig; diff --git a/src/lib/utils/config/src/server_config.rs b/src/lib/utils/config/src/server_config.rs index ddbf00e6..89fdeb4f 100644 --- a/src/lib/utils/config/src/server_config.rs +++ b/src/lib/utils/config/src/server_config.rs @@ -4,8 +4,6 @@ use serde_derive::{Deserialize, Serialize}; - - /// The server configuration struct. /// /// Fields: @@ -33,7 +31,7 @@ pub struct ServerConfig { /// /// Fields: /// - `cache_size`: The cache size in KB. -/// - `compression` - Which compression algorithm to use. Options are `brotli`, `deflate`, `gzip`, `zlib` +/// - `compression` - Which compression algorithm to use. Options are `brotli`, `deflate`, `gzip`, `zlib` /// and `zstd` /// - `backend` - Which database backend to use. Options are `redb`, `rocksdb`, `sled`, `surrealkv`. /// - `world_path`: The path to the world database. @@ -65,4 +63,4 @@ pub enum DatabaseCompression { Fast, #[serde(rename = "best")] Best, -} \ No newline at end of file +} diff --git a/src/lib/utils/config/src/setup.rs b/src/lib/utils/config/src/setup.rs new file mode 100644 index 00000000..b582e0a1 --- /dev/null +++ b/src/lib/utils/config/src/setup.rs @@ -0,0 +1,41 @@ +use crate::statics::DEFAULT_CONFIG; +use ferrumc_general_purpose::paths::get_root_path; +use std::fs::File; +use std::io::Write; +use thiserror::Error; + +#[derive(Debug, Error)] +pub enum SetupError { + #[error("Could not write the config file: {0}")] + WriteError(std::io::Error), + #[error("Could not read the config file: {0}")] + ReadError(std::io::Error), + #[error("IO error: {0}")] + IoError(std::io::Error), +} + +impl From for SetupError { + fn from(e: std::io::Error) -> Self { + match e.kind() { + std::io::ErrorKind::NotFound => SetupError::ReadError(e), + std::io::ErrorKind::PermissionDenied => SetupError::WriteError(e), + std::io::ErrorKind::AlreadyExists => SetupError::WriteError(e), + _ => SetupError::IoError(e), + } + } +} + +pub fn setup() -> Result<(), SetupError> { + if std::fs::exists(get_root_path().join("config.toml"))? { + return Ok(()); + } + let mut config_file = File::create(get_root_path().join("config.toml"))?; + config_file.write_all(DEFAULT_CONFIG.as_bytes())?; + if !std::fs::exists(get_root_path().join("import"))? { + std::fs::create_dir(get_root_path().join("import"))?; + } + if !std::fs::exists(get_root_path().join("world"))? { + std::fs::create_dir(get_root_path().join("world"))?; + } + Ok(()) +} diff --git a/src/lib/utils/config/src/statics.rs b/src/lib/utils/config/src/statics.rs index 8c18454c..1601370f 100644 --- a/src/lib/utils/config/src/statics.rs +++ b/src/lib/utils/config/src/statics.rs @@ -2,16 +2,16 @@ //! //! Contains the static global configuration and its related functions. +use crate::server_config::ServerConfig; +use ferrumc_general_purpose::paths::get_root_path; +use lazy_static::lazy_static; use std::fs::File; use std::io::{Read, Write}; use std::process::exit; -use crate::server_config::ServerConfig; -use lazy_static::lazy_static; use tracing::{error, info}; -use ferrumc_general_purpose::paths::get_root_path; /// The default server configuration that is stored in memory. -const DEFAULT_CONFIG: &str = include_str!("../../../../../.etc/example-config.toml"); +pub(crate) const DEFAULT_CONFIG: &str = include_str!("../../../../../.etc/example-config.toml"); lazy_static! { /// The server configuration that is stored in memory. @@ -29,7 +29,7 @@ fn create_config() -> ServerConfig { }; let mut config_str = String::new(); if let Err(e) = file.read_to_string(&mut config_str) { - error!("Could not read configuration file: {}",e ); + error!("Could not read configuration file: {}", e); exit(1); } else { if config_str.is_empty() { @@ -45,7 +45,10 @@ fn create_config() -> ServerConfig { } } } else { - info!("Configuration file not found. Making a default configuration at {}", config_location.display()); + info!( + "Configuration file not found. Making a default configuration at {}", + config_location.display() + ); let default_config = DEFAULT_CONFIG; // write to the config file let mut file = match File::create(config_location) { @@ -74,6 +77,3 @@ fn create_config() -> ServerConfig { pub fn get_global_config() -> &'static ServerConfig { &CONFIG } - - - diff --git a/src/lib/utils/general_purpose/src/hashing/mod.rs b/src/lib/utils/general_purpose/src/hashing/mod.rs index 62fdb33f..5205b309 100644 --- a/src/lib/utils/general_purpose/src/hashing/mod.rs +++ b/src/lib/utils/general_purpose/src/hashing/mod.rs @@ -1,5 +1,5 @@ -use std::hash::{Hash, Hasher}; use fnv::FnvHasher; +use std::hash::{Hash, Hasher}; /// ### DO NOT USE THIS FOR SECURITY PURPOSES /// This is a very simple hashing function that is not secure at all. It is only meant to be used @@ -21,7 +21,10 @@ mod tests { let hash1 = hash(data); let hash2 = hash(data); - assert_eq!(hash1, hash2, "FNV should produce consistent results for the same input"); + assert_eq!( + hash1, hash2, + "FNV should produce consistent results for the same input" + ); } #[test] @@ -31,14 +34,20 @@ mod tests { let hash1 = hash(data1); let hash2 = hash(data2); - assert_ne!(hash1, hash2, "FNV should produce different results for different inputs"); + assert_ne!( + hash1, hash2, + "FNV should produce different results for different inputs" + ); } - + #[test] fn test_specific_output() { let data = "hello_world"; let hash = hash(data); - assert_eq!(hash, 0x768aff4672817d95, "FNV should produce a specific output for a specific input"); + assert_eq!( + hash, 0x768aff4672817d95, + "FNV should produce a specific output for a specific input" + ); } -} \ No newline at end of file +} diff --git a/src/lib/utils/general_purpose/src/lib.rs b/src/lib/utils/general_purpose/src/lib.rs index 99524994..3dc1a972 100644 --- a/src/lib/utils/general_purpose/src/lib.rs +++ b/src/lib/utils/general_purpose/src/lib.rs @@ -1,3 +1,3 @@ +pub mod hashing; pub mod paths; pub mod simd; -pub mod hashing; \ No newline at end of file diff --git a/src/lib/utils/general_purpose/src/paths/mod.rs b/src/lib/utils/general_purpose/src/paths/mod.rs index d1e78288..c33c4fbd 100644 --- a/src/lib/utils/general_purpose/src/paths/mod.rs +++ b/src/lib/utils/general_purpose/src/paths/mod.rs @@ -1,5 +1,5 @@ -use std::path::PathBuf; use std::env::current_exe; +use std::path::PathBuf; #[derive(thiserror::Error, Debug)] pub enum RootPathError { @@ -31,7 +31,10 @@ pub fn get_root_path() -> PathBuf { //! ``` //! let exe_location = current_exe().expect("Failed to get the current executable location."); - let exe_dir = exe_location.parent().ok_or(RootPathError::NoParent).expect("Failed to get the parent directory of the executable."); + let exe_dir = exe_location + .parent() + .ok_or(RootPathError::NoParent) + .expect("Failed to get the parent directory of the executable."); exe_dir.to_path_buf() } @@ -43,14 +46,15 @@ impl BetterPathExt for PathBuf { fn better_display(&self) -> String { //! Returns a string representation of the path that is more readable. //!
- //! e.g. + //! e.g. //! If the path is `D:\\server\\world\\region\\r.0.0.mca`, //!
//! -> `D:/server/world/region/r.0.0.mca`. - let path = self.to_string_lossy() - .replace(r"\\?\", "") // Remove Windows extended path prefix - .replace(r"\\", r"\"); // Normalize backslashes - + let path = self + .to_string_lossy() + .replace(r"\\?\", "") // Remove Windows extended path prefix + .replace(r"\\", r"\"); // Normalize backslashes + format!("`{}`", path) } -} \ No newline at end of file +} diff --git a/src/lib/utils/general_purpose/src/simd.rs b/src/lib/utils/general_purpose/src/simd.rs index 4377b4de..a8386d66 100644 --- a/src/lib/utils/general_purpose/src/simd.rs +++ b/src/lib/utils/general_purpose/src/simd.rs @@ -1,2 +1 @@ - -pub mod arrays; \ No newline at end of file +pub mod arrays; diff --git a/src/lib/utils/logging/src/lib.rs b/src/lib/utils/logging/src/lib.rs index 3d1d2149..17afe9a5 100644 --- a/src/lib/utils/logging/src/lib.rs +++ b/src/lib/utils/logging/src/lib.rs @@ -2,21 +2,20 @@ pub mod errors; use ferrumc_profiling::ProfilerTracingLayer; use tracing::Level; -use tracing_subscriber::EnvFilter; use tracing_subscriber::fmt::Layer; use tracing_subscriber::layer::SubscriberExt; use tracing_subscriber::util::SubscriberInitExt; +use tracing_subscriber::EnvFilter; pub fn init_logging(trace_level: tracing::Level) { - - let env_filter = EnvFilter::from_default_env() - .add_directive(trace_level.into()); + let env_filter = EnvFilter::from_default_env().add_directive(trace_level.into()); let mut fmt_layer = Layer::default(); // remove path from logs if log level is INFO if trace_level == Level::INFO { - fmt_layer = fmt_layer.with_target(false) + fmt_layer = fmt_layer + .with_target(false) .with_thread_ids(false) .with_thread_names(false); } diff --git a/src/lib/world/src/db_functions.rs b/src/lib/world/src/db_functions.rs index 986ae5fb..18a06fd9 100644 --- a/src/lib/world/src/db_functions.rs +++ b/src/lib/world/src/db_functions.rs @@ -1,21 +1,21 @@ -use ferrumc_storage::compressors::Compressor; use crate::chunk_format::Chunk; use crate::errors::WorldError; use crate::World; +use ferrumc_storage::compressors::Compressor; impl World { pub async fn save_chunk(&self, chunk: Chunk) -> Result<(), WorldError> { save_chunk_internal(self, chunk).await } - + pub async fn load_chunk(&self, x: i32, z: i32) -> Result { load_chunk_internal(self, &self.compressor, x, z).await } - + pub async fn chunk_exists(&self, x: i32, z: i32) -> Result { chunk_exists_internal(self, x, z).await } - + pub async fn delete_chunk(&self, x: i32, z: i32) -> Result<(), WorldError> { delete_chunk_internal(self, x, z).await } @@ -23,19 +23,22 @@ impl World { pub async fn sync(&self) -> Result<(), WorldError> { sync_internal(self).await } - - pub async fn load_chunk_batch(&self, coords: Vec<(i32, i32)>) -> Result, WorldError> { + + pub async fn load_chunk_batch( + &self, + coords: Vec<(i32, i32)>, + ) -> Result, WorldError> { load_chunk_batch_internal(self, coords).await } } -pub(crate) async fn save_chunk_internal( - world: &World, - chunk: Chunk, -) -> Result<(), WorldError> { +pub(crate) async fn save_chunk_internal(world: &World, chunk: Chunk) -> Result<(), WorldError> { let as_bytes = world.compressor.compress(&bitcode::encode(&chunk))?; let digest = ferrumc_general_purpose::hashing::hash((chunk.x, chunk.z)); - world.storage_backend.upsert("chunks".to_string(), digest, as_bytes).await?; + world + .storage_backend + .upsert("chunks".to_string(), digest, as_bytes) + .await?; Ok(()) } @@ -46,10 +49,15 @@ pub(crate) async fn load_chunk_internal( z: i32, ) -> Result { let digest = ferrumc_general_purpose::hashing::hash((x, z)); - match world.storage_backend.get("chunks".to_string(), digest).await? { + match world + .storage_backend + .get("chunks".to_string(), digest) + .await? + { Some(compressed) => { let data = compressor.decompress(&compressed)?; - let chunk: Chunk = bitcode::decode(&data).map_err(|e| WorldError::BitcodeDecodeError(e.to_string()))?; + let chunk: Chunk = bitcode::decode(&data) + .map_err(|e| WorldError::BitcodeDecodeError(e.to_string()))?; Ok(chunk) } None => Err(WorldError::ChunkNotFound), @@ -60,17 +68,25 @@ pub(crate) async fn load_chunk_batch_internal( world: &World, coords: Vec<(i32, i32)>, ) -> Result, WorldError> { - let digests = coords.into_iter().map(|(x, z)| ferrumc_general_purpose::hashing::hash((x, z))).collect(); - world.storage_backend.batch_get("chunks".to_string(), digests).await?.iter().map(|chunk| { - match chunk { + let digests = coords + .into_iter() + .map(|(x, z)| ferrumc_general_purpose::hashing::hash((x, z))) + .collect(); + world + .storage_backend + .batch_get("chunks".to_string(), digests) + .await? + .iter() + .map(|chunk| match chunk { Some(compressed) => { let data = world.compressor.decompress(compressed)?; - let chunk: Chunk = bitcode::decode(&data).map_err(|e| WorldError::BitcodeDecodeError(e.to_string()))?; + let chunk: Chunk = bitcode::decode(&data) + .map_err(|e| WorldError::BitcodeDecodeError(e.to_string()))?; Ok(chunk) } None => Err(WorldError::ChunkNotFound), - } - }).collect() + }) + .collect() } pub(crate) async fn chunk_exists_internal( @@ -79,20 +95,22 @@ pub(crate) async fn chunk_exists_internal( z: i32, ) -> Result { let digest = ferrumc_general_purpose::hashing::hash((x, z)); - Ok(world.storage_backend.exists("chunks".to_string(), digest).await?) + Ok(world + .storage_backend + .exists("chunks".to_string(), digest) + .await?) } -pub(crate) async fn delete_chunk_internal( - world: &World, - x: i32, - z: i32, -) -> Result<(), WorldError> { +pub(crate) async fn delete_chunk_internal(world: &World, x: i32, z: i32) -> Result<(), WorldError> { let digest = ferrumc_general_purpose::hashing::hash((x, z)); - world.storage_backend.delete("chunks".to_string(), digest).await?; + world + .storage_backend + .delete("chunks".to_string(), digest) + .await?; Ok(()) } pub(crate) async fn sync_internal(world: &World) -> Result<(), WorldError> { world.storage_backend.flush().await?; Ok(()) -} \ No newline at end of file +} diff --git a/src/lib/world/src/errors.rs b/src/lib/world/src/errors.rs index 2554ad0e..d745d94f 100644 --- a/src/lib/world/src/errors.rs +++ b/src/lib/world/src/errors.rs @@ -1,10 +1,10 @@ use crate::errors::WorldError::{GenericIOError, PermissionError}; -use std::io::ErrorKind; -use thiserror::Error; +use crate::vanilla_chunk_format::Palette; use errors::AnvilError; use ferrumc_anvil::errors; use ferrumc_storage::errors::StorageError; -use crate::vanilla_chunk_format::Palette; +use std::io::ErrorKind; +use thiserror::Error; #[derive(Debug, Error)] pub enum WorldError { @@ -58,4 +58,4 @@ impl From for WorldError { fn from(err: errors::AnvilError) -> Self { WorldError::AnvilDecodeError(err) } -} \ No newline at end of file +} diff --git a/src/lib/world/src/importing.rs b/src/lib/world/src/importing.rs index e10ef25b..6604e803 100644 --- a/src/lib/world/src/importing.rs +++ b/src/lib/world/src/importing.rs @@ -1,23 +1,26 @@ -use std::path::PathBuf; -use std::sync::Arc; -use std::sync::atomic::AtomicU64; -use indicatif::ProgressBar; -use rayon::prelude::*; -use tokio::task::JoinSet; -use tracing::{error, info}; +use crate::db_functions::save_chunk_internal; use crate::errors::WorldError; +use crate::vanilla_chunk_format::VanillaChunk; use crate::World; use ferrumc_anvil::load_anvil_file; use ferrumc_general_purpose::paths::BetterPathExt; -use crate::db_functions::save_chunk_internal; -use crate::vanilla_chunk_format::VanillaChunk; +use indicatif::ProgressBar; +use rayon::prelude::*; +use std::path::PathBuf; +use std::sync::atomic::AtomicU64; +use std::sync::Arc; +use tokio::task::JoinSet; +use tracing::{error, info}; /// This function is used to check if the import path is valid. It checks if the path exists, if it /// is a file, if the region folder exists, if the region folder is a file, and if the region folder /// is empty. fn check_paths_validity(import_dir: PathBuf) -> Result<(), WorldError> { if !import_dir.exists() { - error!("Import path does not exist: {}", import_dir.better_display()); + error!( + "Import path does not exist: {}", + import_dir.better_display() + ); return Err(WorldError::InvalidImportPath(import_dir.better_display())); } if import_dir.is_file() { @@ -27,45 +30,60 @@ fn check_paths_validity(import_dir: PathBuf) -> Result<(), WorldError> { if let Ok(dir) = import_dir.read_dir() { if dir.count() == 0 { - error!("Import path's region folder is empty: {}", import_dir.better_display()); + error!( + "Import path's region folder is empty: {}", + import_dir.better_display() + ); return Err(WorldError::NoRegionFiles); } } else { - error!("Could not read import path's region folder: {}", import_dir.better_display()); + error!( + "Could not read import path's region folder: {}", + import_dir.better_display() + ); return Err(WorldError::InvalidImportPath(import_dir.better_display())); } Ok(()) } impl World { - fn get_chunk_count(&self, import_dir: PathBuf) -> Result { info!("Counting chunks in import directory..."); let regions_dir = import_dir.join("region").read_dir()?; let chunk_count = AtomicU64::new(0); - regions_dir.into_iter().par_bridge().for_each(|region_file| { - match region_file { + regions_dir + .into_iter() + .par_bridge() + .for_each(|region_file| match region_file { Ok(dir_entry) => { if dir_entry.path().is_dir() { - error!("Region file is a directory: {}", dir_entry.path().to_string_lossy()); + error!( + "Region file is a directory: {}", + dir_entry.path().to_string_lossy() + ); } else { let file_path = dir_entry.path(); let Ok(anvil_file) = load_anvil_file(file_path.clone()) else { - error!("Could not load region file: {}", file_path.clone().display()); + error!( + "Could not load region file: {}", + file_path.clone().display() + ); return; }; let locations = anvil_file.get_locations(); - chunk_count.fetch_add(locations.len() as u64, std::sync::atomic::Ordering::Relaxed); + chunk_count.fetch_add( + locations.len() as u64, + std::sync::atomic::Ordering::Relaxed, + ); } } Err(e) => { error!("Could not read region file: {}", e); } - } - }); + }); Ok(chunk_count.load(std::sync::atomic::Ordering::Relaxed)) } - + pub async fn import(&mut self, import_dir: PathBuf, _: PathBuf) -> Result<(), WorldError> { // Check if the import path is valid. We can assume the database path is valid since we // checked it in the config validity check. @@ -79,11 +97,17 @@ impl World { match region_file { Ok(dir_entry) => { if dir_entry.path().is_dir() { - error!("Region file is a directory: {}", dir_entry.path().to_string_lossy()); + error!( + "Region file is a directory: {}", + dir_entry.path().to_string_lossy() + ); } else { let file_path = dir_entry.path(); let Ok(anvil_file) = load_anvil_file(file_path.clone()) else { - error!("Could not load region file: {}", file_path.clone().display()); + error!( + "Could not load region file: {}", + file_path.clone().display() + ); continue; }; let locations = anvil_file.get_locations(); @@ -110,18 +134,22 @@ impl World { } } } - }; + } } } Err(e) => { error!("Could not read region file: {}", e); } } - }; + } while task_set.join_next().await.is_some() {} self.sync().await?; progress_bar.clone().finish(); - info!("Imported {} chunks in {:?}", progress_bar.clone().position(), start.elapsed()); + info!( + "Imported {} chunks in {:?}", + progress_bar.clone().position(), + start.elapsed() + ); Ok(()) } } diff --git a/src/lib/world/src/lib.rs b/src/lib/world/src/lib.rs index 01065663..355f0ce5 100644 --- a/src/lib/world/src/lib.rs +++ b/src/lib/world/src/lib.rs @@ -1,10 +1,12 @@ +pub mod chunk_format; +mod db_functions; pub mod errors; mod importing; mod vanilla_chunk_format; -mod db_functions; -pub mod chunk_format; use crate::errors::WorldError; +use ferrumc_config::statics::get_global_config; +use ferrumc_general_purpose::paths::get_root_path; use ferrumc_storage::compressors::Compressor; use ferrumc_storage::DatabaseBackend; use std::path::{Path, PathBuf}; @@ -12,8 +14,6 @@ use std::process::exit; use std::sync::Arc; use tokio::fs::create_dir_all; use tracing::{error, info, warn}; -use ferrumc_config::statics::get_global_config; -use ferrumc_general_purpose::paths::get_root_path; #[derive(Clone)] pub struct World { @@ -73,7 +73,7 @@ async fn check_config_validity() -> Result<(), WorldError> { impl World { /// Creates a new world instance. - /// + /// /// You'd probably want to call this at the start of your program. And then use the returned /// in a state struct or something. pub async fn new() -> Self { @@ -87,81 +87,83 @@ impl World { if backend_path.is_relative() { backend_path = get_root_path().join(backend_path); } - let storage_backend: Result, WorldError> = match backend_string - .to_lowercase() - .as_str() - { - "surrealkv" => { - #[cfg(feature = "surrealkv")] - match ferrumc_storage::backends::surrealkv::SurrealKVBackend::initialize(Some( - backend_path, - )) - .await - { - Ok(backend) => Ok(Box::new(backend)), - Err(e) => Err(WorldError::InvalidBackend(e.to_string())), - } - #[cfg(not(feature = "surrealkv"))] - { - error!("SurrealKV backend is not enabled. Please enable the 'surrealkv' feature in the Cargo.toml file."); - exit(1); - } - } - "sled" => { - #[cfg(feature = "sled")] - match ferrumc_storage::backends::sled::SledBackend::initialize(Some(backend_path)) - .await - { - Ok(backend) => Ok(Box::new(backend)), - Err(e) => Err(WorldError::InvalidBackend(e.to_string())), - } - #[cfg(not(feature = "sled"))] - { - error!("Sled backend is not enabled. Please enable the 'sled' feature in the Cargo.toml file."); - exit(1); + let storage_backend: Result, WorldError> = + match backend_string.to_lowercase().as_str() { + "surrealkv" => { + #[cfg(feature = "surrealkv")] + match ferrumc_storage::backends::surrealkv::SurrealKVBackend::initialize(Some( + backend_path, + )) + .await + { + Ok(backend) => Ok(Box::new(backend)), + Err(e) => Err(WorldError::InvalidBackend(e.to_string())), + } + #[cfg(not(feature = "surrealkv"))] + { + error!("SurrealKV backend is not enabled. Please enable the 'surrealkv' feature in the Cargo.toml file."); + exit(1); + } } - } - "rocksdb" => { - #[cfg(feature = "rocksdb")] - match ferrumc_storage::backends::rocksdb::RocksDBBackend::initialize(Some(backend_path)) - .await - { - Ok(backend) => Ok(Box::new(backend)), - Err(e) => Err(WorldError::InvalidBackend(e.to_string())), + "sled" => { + #[cfg(feature = "sled")] + match ferrumc_storage::backends::sled::SledBackend::initialize(Some( + backend_path, + )) + .await + { + Ok(backend) => Ok(Box::new(backend)), + Err(e) => Err(WorldError::InvalidBackend(e.to_string())), + } + #[cfg(not(feature = "sled"))] + { + error!("Sled backend is not enabled. Please enable the 'sled' feature in the Cargo.toml file."); + exit(1); + } } - #[cfg(not(feature = "rocksdb"))] - { - error!("RocksDB backend is not enabled. Please enable the 'rocksdb' feature in the Cargo.toml file."); - exit(1); + "rocksdb" => { + #[cfg(feature = "rocksdb")] + match ferrumc_storage::backends::rocksdb::RocksDBBackend::initialize(Some( + backend_path, + )) + .await + { + Ok(backend) => Ok(Box::new(backend)), + Err(e) => Err(WorldError::InvalidBackend(e.to_string())), + } + #[cfg(not(feature = "rocksdb"))] + { + error!("RocksDB backend is not enabled. Please enable the 'rocksdb' feature in the Cargo.toml file."); + exit(1); + } } - } - "redb" => { - #[cfg(feature = "redb")] - match ferrumc_storage::backends::redb::RedbBackend::initialize(Some(backend_path)) - .await - { - Ok(backend) => Ok(Box::new(backend)), - Err(e) => Err(WorldError::InvalidBackend(e.to_string())), + "redb" => { + #[cfg(feature = "redb")] + match ferrumc_storage::backends::redb::RedbBackend::initialize(Some( + backend_path, + )) + .await + { + Ok(backend) => Ok(Box::new(backend)), + Err(e) => Err(WorldError::InvalidBackend(e.to_string())), + } + #[cfg(not(feature = "redb"))] + { + error!("Redb backend is not enabled. Please enable the 'redb' feature in the Cargo.toml file."); + exit(1); + } } - #[cfg(not(feature = "redb"))] - { - error!("Redb backend is not enabled. Please enable the 'redb' feature in the Cargo.toml file."); + _ => { + error!( + "Invalid storage backend: {}", + get_global_config().database.backend + ); exit(1); } - } - _ => { - error!( - "Invalid storage backend: {}", - get_global_config().database.backend - ); - exit(1); - } - }; + }; let storage_backend = match storage_backend { - Ok(backend) => { - backend - } + Ok(backend) => backend, Err(e) => { error!("Could not initialize storage backend: {}", e); exit(1); @@ -169,7 +171,7 @@ impl World { }; let compressor_string = get_global_config().database.compression.trim(); - + info!("Using {} compression algorithm", compressor_string); let compression_algo = match compressor_string.to_lowercase().as_str() { diff --git a/src/tests/src/ecs/mod.rs b/src/tests/src/ecs/mod.rs index f5985695..7c2e0a6b 100644 --- a/src/tests/src/ecs/mod.rs +++ b/src/tests/src/ecs/mod.rs @@ -8,16 +8,17 @@ async fn test_1mil_entities() { tokio::time::sleep(std::time::Duration::from_secs(5)).await; - let start = std::time::Instant::now(); - for _ in 0 ..1_000_000 { - world.builder() - .with(Position::from((0.0, 0.0, 0.0))).unwrap() - .with(20f32).unwrap() + for _ in 0..1_000_000 { + world + .builder() + .with(Position::from((0.0, 0.0, 0.0))) + .unwrap() + .with(20f32) + .unwrap() .build(); } println!("Time to create 1mil entities: {:?}", start.elapsed()); - tokio::time::sleep(std::time::Duration::from_secs(15)).await; } diff --git a/src/tests/src/lib.rs b/src/tests/src/lib.rs index 92868441..b602b5e5 100644 --- a/src/tests/src/lib.rs +++ b/src/tests/src/lib.rs @@ -1,5 +1,5 @@ #![cfg(test)] +mod ecs; mod nbt; mod net; -mod ecs; \ No newline at end of file diff --git a/src/tests/src/net/codec/bothway/mod.rs b/src/tests/src/net/codec/bothway/mod.rs index dca61fe4..403d5aa0 100644 --- a/src/tests/src/net/codec/bothway/mod.rs +++ b/src/tests/src/net/codec/bothway/mod.rs @@ -9,18 +9,19 @@ fn hashmaps() { "key3".to_string() => "value3".to_string(), "key4".to_string() => "value4".to_string(), }; - + let encoded = { let mut buffer = Vec::new(); map.encode(&mut buffer, &NetEncodeOpts::None).unwrap(); - + buffer }; let decoded = { let mut buffer = encoded.as_slice(); - std::collections::HashMap::::decode(&mut buffer, &NetDecodeOpts::None).unwrap() + std::collections::HashMap::::decode(&mut buffer, &NetDecodeOpts::None) + .unwrap() }; - + assert_eq!(map, decoded); -} \ No newline at end of file +} diff --git a/src/tests/src/net/codec/de/mod.rs b/src/tests/src/net/codec/de/mod.rs index b5c78134..5a8e5042 100644 --- a/src/tests/src/net/codec/de/mod.rs +++ b/src/tests/src/net/codec/de/mod.rs @@ -11,12 +11,11 @@ struct SomeExampleEncStruct { #[test] fn test_decode() { let file = include_bytes!("../../../../../../.etc/tests/enc_test_encode"); - + let mut reader = std::io::Cursor::new(file); - let example = SomeExampleEncStruct::decode( - &mut reader, - &ferrumc_net_codec::decode::NetDecodeOpts::None - ).unwrap(); - + let example = + SomeExampleEncStruct::decode(&mut reader, &ferrumc_net_codec::decode::NetDecodeOpts::None) + .unwrap(); + println!("{:?}", example); -} \ No newline at end of file +} diff --git a/src/tests/src/net/codec/enc/enums.rs b/src/tests/src/net/codec/enc/enums.rs index 9d87cb9e..1a8a9831 100644 --- a/src/tests/src/net/codec/enc/enums.rs +++ b/src/tests/src/net/codec/enc/enums.rs @@ -4,16 +4,19 @@ use ferrumc_net_codec::encode::{NetEncode, NetEncodeOpts}; #[derive(NetEncode)] enum TestPacket { - Ping { - timestamp: i64 - } + Ping { timestamp: i64 }, } #[test] fn main() { - let packet = TestPacket::Ping { timestamp: 1234567890 }; + let packet = TestPacket::Ping { + timestamp: 1234567890, + }; let mut buffer = Vec::new(); packet.encode(&mut buffer, &NetEncodeOpts::None).unwrap(); - - assert_eq!(1234567890, i64::decode(&mut buffer.as_slice(), &NetDecodeOpts::None).unwrap()); -} \ No newline at end of file + + assert_eq!( + 1234567890, + i64::decode(&mut buffer.as_slice(), &NetDecodeOpts::None).unwrap() + ); +} diff --git a/src/tests/src/net/codec/enc/mod.rs b/src/tests/src/net/codec/enc/mod.rs index 348fa236..e61b081f 100644 --- a/src/tests/src/net/codec/enc/mod.rs +++ b/src/tests/src/net/codec/enc/mod.rs @@ -29,7 +29,7 @@ fn test_encode() { .encode(&mut writer, &ferrumc_net_codec::encode::NetEncodeOpts::None) .unwrap(); } - + #[allow(unreachable_code)] fn _test_compression() -> ! { let example = SomeExampleEncStructWithPacketId { @@ -47,4 +47,4 @@ fn _test_compression() -> ! { ) .unwrap(); } -} \ No newline at end of file +} diff --git a/src/tests/src/net/codec/mod.rs b/src/tests/src/net/codec/mod.rs index d4043a59..b8110ef3 100644 --- a/src/tests/src/net/codec/mod.rs +++ b/src/tests/src/net/codec/mod.rs @@ -1,3 +1,3 @@ -mod enc; +mod bothway; mod de; -mod bothway; \ No newline at end of file +mod enc; diff --git a/src/tests/src/net/mod.rs b/src/tests/src/net/mod.rs index 2fee9a89..9ebc9bb3 100644 --- a/src/tests/src/net/mod.rs +++ b/src/tests/src/net/mod.rs @@ -1 +1 @@ -mod codec; \ No newline at end of file +mod codec;